i915_gem.c 163.6 KB
Newer Older
1
/*
2
 * Copyright © 2008-2015 Intel Corporation
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *
 */

28
#include <drm/drmP.h>
29
#include <drm/drm_vma_manager.h>
30
#include <drm/i915_drm.h>
31
#include "i915_drv.h"
32
#include "i915_gem_clflush.h"
33
#include "i915_vgpu.h"
C
Chris Wilson 已提交
34
#include "i915_trace.h"
35
#include "intel_drv.h"
36
#include "intel_frontbuffer.h"
37
#include "intel_mocs.h"
38
#include "intel_workarounds.h"
M
Matthew Auld 已提交
39
#include "i915_gemfs.h"
40
#include <linux/dma-fence-array.h>
41
#include <linux/kthread.h>
42
#include <linux/reservation.h>
43
#include <linux/shmem_fs.h>
44
#include <linux/slab.h>
45
#include <linux/stop_machine.h>
46
#include <linux/swap.h>
J
Jesse Barnes 已提交
47
#include <linux/pci.h>
48
#include <linux/dma-buf.h>
49

50
static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
51

52 53
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
54
	if (obj->cache_dirty)
55 56
		return false;

57
	if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
58 59
		return true;

60
	return obj->pin_global; /* currently in use by HW, keep flushed */
61 62
}

63
static int
64
insert_mappable_node(struct i915_ggtt *ggtt,
65 66 67
                     struct drm_mm_node *node, u32 size)
{
	memset(node, 0, sizeof(*node));
68
	return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
69 70 71
					   size, 0, I915_COLOR_UNEVICTABLE,
					   0, ggtt->mappable_end,
					   DRM_MM_INSERT_LOW);
72 73 74 75 76 77 78 79
}

static void
remove_mappable_node(struct drm_mm_node *node)
{
	drm_mm_remove_node(node);
}

80 81
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
82
				  u64 size)
83
{
84
	spin_lock(&dev_priv->mm.object_stat_lock);
85 86
	dev_priv->mm.object_count++;
	dev_priv->mm.object_memory += size;
87
	spin_unlock(&dev_priv->mm.object_stat_lock);
88 89 90
}

static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
91
				     u64 size)
92
{
93
	spin_lock(&dev_priv->mm.object_stat_lock);
94 95
	dev_priv->mm.object_count--;
	dev_priv->mm.object_memory -= size;
96
	spin_unlock(&dev_priv->mm.object_stat_lock);
97 98
}

99
static int
100
i915_gem_wait_for_error(struct i915_gpu_error *error)
101 102 103
{
	int ret;

104 105
	might_sleep();

106 107 108 109 110
	/*
	 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
	 * userspace. If it takes that long something really bad is going on and
	 * we should simply try to bail out and fail as gracefully as possible.
	 */
111
	ret = wait_event_interruptible_timeout(error->reset_queue,
112
					       !i915_reset_backoff(error),
113
					       I915_RESET_TIMEOUT);
114 115 116 117
	if (ret == 0) {
		DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
		return -EIO;
	} else if (ret < 0) {
118
		return ret;
119 120
	} else {
		return 0;
121
	}
122 123
}

124
int i915_mutex_lock_interruptible(struct drm_device *dev)
125
{
126
	struct drm_i915_private *dev_priv = to_i915(dev);
127 128
	int ret;

129
	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
130 131 132 133 134 135 136 137 138
	if (ret)
		return ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	return 0;
}
139

140 141
static u32 __i915_gem_park(struct drm_i915_private *i915)
{
142 143
	GEM_TRACE("\n");

144 145
	lockdep_assert_held(&i915->drm.struct_mutex);
	GEM_BUG_ON(i915->gt.active_requests);
146
	GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166

	if (!i915->gt.awake)
		return I915_EPOCH_INVALID;

	GEM_BUG_ON(i915->gt.epoch == I915_EPOCH_INVALID);

	/*
	 * Be paranoid and flush a concurrent interrupt to make sure
	 * we don't reactivate any irq tasklets after parking.
	 *
	 * FIXME: Note that even though we have waited for execlists to be idle,
	 * there may still be an in-flight interrupt even though the CSB
	 * is now empty. synchronize_irq() makes sure that a residual interrupt
	 * is completed before we continue, but it doesn't prevent the HW from
	 * raising a spurious interrupt later. To complete the shield we should
	 * coordinate disabling the CS irq with flushing the interrupts.
	 */
	synchronize_irq(i915->drm.irq);

	intel_engines_park(i915);
167
	i915_timelines_park(i915);
168 169

	i915_pmu_gt_parked(i915);
170
	i915_vma_parked(i915);
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185

	i915->gt.awake = false;

	if (INTEL_GEN(i915) >= 6)
		gen6_rps_idle(i915);

	intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ);

	intel_runtime_pm_put(i915);

	return i915->gt.epoch;
}

void i915_gem_park(struct drm_i915_private *i915)
{
186 187
	GEM_TRACE("\n");

188 189 190 191 192 193 194 195 196 197 198 199
	lockdep_assert_held(&i915->drm.struct_mutex);
	GEM_BUG_ON(i915->gt.active_requests);

	if (!i915->gt.awake)
		return;

	/* Defer the actual call to __i915_gem_park() to prevent ping-pongs */
	mod_delayed_work(i915->wq, &i915->gt.idle_work, msecs_to_jiffies(100));
}

void i915_gem_unpark(struct drm_i915_private *i915)
{
200 201
	GEM_TRACE("\n");

202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
	lockdep_assert_held(&i915->drm.struct_mutex);
	GEM_BUG_ON(!i915->gt.active_requests);

	if (i915->gt.awake)
		return;

	intel_runtime_pm_get_noresume(i915);

	/*
	 * It seems that the DMC likes to transition between the DC states a lot
	 * when there are no connected displays (no active power domains) during
	 * command submission.
	 *
	 * This activity has negative impact on the performance of the chip with
	 * huge latencies observed in the interrupt handler and elsewhere.
	 *
	 * Work around it by grabbing a GT IRQ power domain whilst there is any
	 * GT activity, preventing any DC state transitions.
	 */
	intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);

	i915->gt.awake = true;
	if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
		i915->gt.epoch = 1;

	intel_enable_gt_powersave(i915);
	i915_update_gfx_val(i915);
	if (INTEL_GEN(i915) >= 6)
		gen6_rps_busy(i915);
	i915_pmu_gt_unparked(i915);

	intel_engines_unpark(i915);

	i915_queue_hangcheck(i915);

	queue_delayed_work(i915->wq,
			   &i915->gt.retire_work,
			   round_jiffies_up_relative(HZ));
}

242 243
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
244
			    struct drm_file *file)
245
{
246
	struct drm_i915_private *dev_priv = to_i915(dev);
247
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
248
	struct drm_i915_gem_get_aperture *args = data;
249
	struct i915_vma *vma;
250
	u64 pinned;
251

252
	pinned = ggtt->vm.reserved;
253
	mutex_lock(&dev->struct_mutex);
254
	list_for_each_entry(vma, &ggtt->vm.active_list, vm_link)
255
		if (i915_vma_is_pinned(vma))
256
			pinned += vma->node.size;
257
	list_for_each_entry(vma, &ggtt->vm.inactive_list, vm_link)
258
		if (i915_vma_is_pinned(vma))
259
			pinned += vma->node.size;
260
	mutex_unlock(&dev->struct_mutex);
261

262
	args->aper_size = ggtt->vm.total;
263
	args->aper_available_size = args->aper_size - pinned;
264

265 266 267
	return 0;
}

268
static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
269
{
270
	struct address_space *mapping = obj->base.filp->f_mapping;
271
	drm_dma_handle_t *phys;
272 273
	struct sg_table *st;
	struct scatterlist *sg;
274
	char *vaddr;
275
	int i;
276
	int err;
277

278
	if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
279
		return -EINVAL;
280

281 282 283 284 285
	/* Always aligning to the object size, allows a single allocation
	 * to handle all possible callers, and given typical object sizes,
	 * the alignment of the buddy allocation will naturally match.
	 */
	phys = drm_pci_alloc(obj->base.dev,
286
			     roundup_pow_of_two(obj->base.size),
287 288
			     roundup_pow_of_two(obj->base.size));
	if (!phys)
289
		return -ENOMEM;
290 291

	vaddr = phys->vaddr;
292 293 294 295 296
	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
		struct page *page;
		char *src;

		page = shmem_read_mapping_page(mapping, i);
297
		if (IS_ERR(page)) {
298
			err = PTR_ERR(page);
299 300
			goto err_phys;
		}
301 302 303 304 305 306

		src = kmap_atomic(page);
		memcpy(vaddr, src, PAGE_SIZE);
		drm_clflush_virt_range(vaddr, PAGE_SIZE);
		kunmap_atomic(src);

307
		put_page(page);
308 309 310
		vaddr += PAGE_SIZE;
	}

311
	i915_gem_chipset_flush(to_i915(obj->base.dev));
312 313

	st = kmalloc(sizeof(*st), GFP_KERNEL);
314
	if (!st) {
315
		err = -ENOMEM;
316 317
		goto err_phys;
	}
318 319 320

	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
		kfree(st);
321
		err = -ENOMEM;
322
		goto err_phys;
323 324 325 326 327
	}

	sg = st->sgl;
	sg->offset = 0;
	sg->length = obj->base.size;
328

329
	sg_dma_address(sg) = phys->busaddr;
330 331
	sg_dma_len(sg) = obj->base.size;

332
	obj->phys_handle = phys;
333

334
	__i915_gem_object_set_pages(obj, st, sg->length);
335 336

	return 0;
337 338 339

err_phys:
	drm_pci_free(obj->base.dev, phys);
340 341

	return err;
342 343
}

344 345
static void __start_cpu_write(struct drm_i915_gem_object *obj)
{
346 347
	obj->read_domains = I915_GEM_DOMAIN_CPU;
	obj->write_domain = I915_GEM_DOMAIN_CPU;
348 349 350 351
	if (cpu_write_needs_clflush(obj))
		obj->cache_dirty = true;
}

352
static void
353
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
354 355
				struct sg_table *pages,
				bool needs_clflush)
356
{
C
Chris Wilson 已提交
357
	GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
358

C
Chris Wilson 已提交
359 360
	if (obj->mm.madv == I915_MADV_DONTNEED)
		obj->mm.dirty = false;
361

362
	if (needs_clflush &&
363
	    (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
364
	    !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
365
		drm_clflush_sg(pages);
366

367
	__start_cpu_write(obj);
368 369 370 371 372 373
}

static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
			       struct sg_table *pages)
{
374
	__i915_gem_object_release_shmem(obj, pages, false);
375

C
Chris Wilson 已提交
376
	if (obj->mm.dirty) {
377
		struct address_space *mapping = obj->base.filp->f_mapping;
378
		char *vaddr = obj->phys_handle->vaddr;
379 380 381
		int i;

		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
382 383 384 385 386 387 388 389 390 391 392 393 394
			struct page *page;
			char *dst;

			page = shmem_read_mapping_page(mapping, i);
			if (IS_ERR(page))
				continue;

			dst = kmap_atomic(page);
			drm_clflush_virt_range(vaddr, PAGE_SIZE);
			memcpy(dst, vaddr, PAGE_SIZE);
			kunmap_atomic(dst);

			set_page_dirty(page);
C
Chris Wilson 已提交
395
			if (obj->mm.madv == I915_MADV_WILLNEED)
396
				mark_page_accessed(page);
397
			put_page(page);
398 399
			vaddr += PAGE_SIZE;
		}
C
Chris Wilson 已提交
400
		obj->mm.dirty = false;
401 402
	}

403 404
	sg_free_table(pages);
	kfree(pages);
405 406

	drm_pci_free(obj->base.dev, obj->phys_handle);
407 408 409 410 411
}

static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{
C
Chris Wilson 已提交
412
	i915_gem_object_unpin_pages(obj);
413 414 415 416 417 418 419 420
}

static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
	.get_pages = i915_gem_object_get_pages_phys,
	.put_pages = i915_gem_object_put_pages_phys,
	.release = i915_gem_object_release_phys,
};

421 422
static const struct drm_i915_gem_object_ops i915_gem_object_ops;

423
int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
424 425 426
{
	struct i915_vma *vma;
	LIST_HEAD(still_in_list);
427 428 429
	int ret;

	lockdep_assert_held(&obj->base.dev->struct_mutex);
430

431 432 433 434
	/* Closed vma are removed from the obj->vma_list - but they may
	 * still have an active binding on the object. To remove those we
	 * must wait for all rendering to complete to the object (as unbinding
	 * must anyway), and retire the requests.
435
	 */
436
	ret = i915_gem_object_set_to_cpu_domain(obj, false);
437 438 439
	if (ret)
		return ret;

440 441 442 443 444 445 446 447 448 449 450 451 452
	while ((vma = list_first_entry_or_null(&obj->vma_list,
					       struct i915_vma,
					       obj_link))) {
		list_move_tail(&vma->obj_link, &still_in_list);
		ret = i915_vma_unbind(vma);
		if (ret)
			break;
	}
	list_splice(&still_in_list, &obj->vma_list);

	return ret;
}

453 454 455 456
static long
i915_gem_object_wait_fence(struct dma_fence *fence,
			   unsigned int flags,
			   long timeout,
457
			   struct intel_rps_client *rps_client)
458
{
459
	struct i915_request *rq;
460

461
	BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
462

463 464 465 466 467 468 469 470 471
	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
		return timeout;

	if (!dma_fence_is_i915(fence))
		return dma_fence_wait_timeout(fence,
					      flags & I915_WAIT_INTERRUPTIBLE,
					      timeout);

	rq = to_request(fence);
472
	if (i915_request_completed(rq))
473 474
		goto out;

475 476
	/*
	 * This client is about to stall waiting for the GPU. In many cases
477 478 479 480 481 482 483 484 485 486 487 488 489 490
	 * this is undesirable and limits the throughput of the system, as
	 * many clients cannot continue processing user input/output whilst
	 * blocked. RPS autotuning may take tens of milliseconds to respond
	 * to the GPU load and thus incurs additional latency for the client.
	 * We can circumvent that by promoting the GPU frequency to maximum
	 * before we wait. This makes the GPU throttle up much more quickly
	 * (good for benchmarks and user experience, e.g. window animations),
	 * but at a cost of spending more power processing the workload
	 * (bad for battery). Not all clients even want their results
	 * immediately and for them we should just let the GPU select its own
	 * frequency to maximise efficiency. To prevent a single client from
	 * forcing the clocks too high for the whole system, we only allow
	 * each client to waitboost once in a busy period.
	 */
491
	if (rps_client && !i915_request_started(rq)) {
492
		if (INTEL_GEN(rq->i915) >= 6)
493
			gen6_rps_boost(rq, rps_client);
494 495
	}

496
	timeout = i915_request_wait(rq, flags, timeout);
497 498

out:
499 500
	if (flags & I915_WAIT_LOCKED && i915_request_completed(rq))
		i915_request_retire_upto(rq);
501 502 503 504 505 506 507 508

	return timeout;
}

static long
i915_gem_object_wait_reservation(struct reservation_object *resv,
				 unsigned int flags,
				 long timeout,
509
				 struct intel_rps_client *rps_client)
510
{
511
	unsigned int seq = __read_seqcount_begin(&resv->seq);
512
	struct dma_fence *excl;
513
	bool prune_fences = false;
514 515 516 517

	if (flags & I915_WAIT_ALL) {
		struct dma_fence **shared;
		unsigned int count, i;
518 519
		int ret;

520 521
		ret = reservation_object_get_fences_rcu(resv,
							&excl, &count, &shared);
522 523 524
		if (ret)
			return ret;

525 526 527
		for (i = 0; i < count; i++) {
			timeout = i915_gem_object_wait_fence(shared[i],
							     flags, timeout,
528
							     rps_client);
529
			if (timeout < 0)
530
				break;
531

532 533 534 535 536 537
			dma_fence_put(shared[i]);
		}

		for (; i < count; i++)
			dma_fence_put(shared[i]);
		kfree(shared);
538

539 540 541 542 543 544 545 546 547
		/*
		 * If both shared fences and an exclusive fence exist,
		 * then by construction the shared fences must be later
		 * than the exclusive fence. If we successfully wait for
		 * all the shared fences, we know that the exclusive fence
		 * must all be signaled. If all the shared fences are
		 * signaled, we can prune the array and recover the
		 * floating references on the fences/requests.
		 */
548
		prune_fences = count && timeout >= 0;
549 550
	} else {
		excl = reservation_object_get_excl_rcu(resv);
551 552
	}

553
	if (excl && timeout >= 0)
554 555
		timeout = i915_gem_object_wait_fence(excl, flags, timeout,
						     rps_client);
556 557 558

	dma_fence_put(excl);

559 560
	/*
	 * Opportunistically prune the fences iff we know they have *all* been
561 562 563
	 * signaled and that the reservation object has not been changed (i.e.
	 * no new fences have been added).
	 */
564
	if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
565 566 567 568 569
		if (reservation_object_trylock(resv)) {
			if (!__read_seqcount_retry(&resv->seq, seq))
				reservation_object_add_excl_fence(resv, NULL);
			reservation_object_unlock(resv);
		}
570 571
	}

572
	return timeout;
573 574
}

575 576
static void __fence_set_priority(struct dma_fence *fence,
				 const struct i915_sched_attr *attr)
577
{
578
	struct i915_request *rq;
579 580
	struct intel_engine_cs *engine;

581
	if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
582 583 584 585 586
		return;

	rq = to_request(fence);
	engine = rq->engine;

587 588
	local_bh_disable();
	rcu_read_lock(); /* RCU serialisation for set-wedged protection */
589
	if (engine->schedule)
590
		engine->schedule(rq, attr);
591
	rcu_read_unlock();
592
	local_bh_enable(); /* kick the tasklets if queues were reprioritised */
593 594
}

595 596
static void fence_set_priority(struct dma_fence *fence,
			       const struct i915_sched_attr *attr)
597 598 599 600 601 602 603
{
	/* Recurse once into a fence-array */
	if (dma_fence_is_array(fence)) {
		struct dma_fence_array *array = to_dma_fence_array(fence);
		int i;

		for (i = 0; i < array->num_fences; i++)
604
			__fence_set_priority(array->fences[i], attr);
605
	} else {
606
		__fence_set_priority(fence, attr);
607 608 609 610 611 612
	}
}

int
i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
			      unsigned int flags,
613
			      const struct i915_sched_attr *attr)
614 615 616 617 618 619 620 621 622 623 624 625 626 627
{
	struct dma_fence *excl;

	if (flags & I915_WAIT_ALL) {
		struct dma_fence **shared;
		unsigned int count, i;
		int ret;

		ret = reservation_object_get_fences_rcu(obj->resv,
							&excl, &count, &shared);
		if (ret)
			return ret;

		for (i = 0; i < count; i++) {
628
			fence_set_priority(shared[i], attr);
629 630 631 632 633 634 635 636 637
			dma_fence_put(shared[i]);
		}

		kfree(shared);
	} else {
		excl = reservation_object_get_excl_rcu(obj->resv);
	}

	if (excl) {
638
		fence_set_priority(excl, attr);
639 640 641 642 643
		dma_fence_put(excl);
	}
	return 0;
}

644 645 646 647 648
/**
 * Waits for rendering to the object to be completed
 * @obj: i915 gem object
 * @flags: how to wait (under a lock, for all rendering or just for writes etc)
 * @timeout: how long to wait
649
 * @rps_client: client (user process) to charge for any waitboosting
650
 */
651 652 653 654
int
i915_gem_object_wait(struct drm_i915_gem_object *obj,
		     unsigned int flags,
		     long timeout,
655
		     struct intel_rps_client *rps_client)
656
{
657 658 659 660 661 662 663
	might_sleep();
#if IS_ENABLED(CONFIG_LOCKDEP)
	GEM_BUG_ON(debug_locks &&
		   !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
		   !!(flags & I915_WAIT_LOCKED));
#endif
	GEM_BUG_ON(timeout < 0);
664

665 666
	timeout = i915_gem_object_wait_reservation(obj->resv,
						   flags, timeout,
667
						   rps_client);
668
	return timeout < 0 ? timeout : 0;
669 670 671 672 673 674
}

static struct intel_rps_client *to_rps_client(struct drm_file *file)
{
	struct drm_i915_file_private *fpriv = file->driver_priv;

675
	return &fpriv->rps_client;
676 677
}

678 679 680
static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pwrite *args,
681
		     struct drm_file *file)
682 683
{
	void *vaddr = obj->phys_handle->vaddr + args->offset;
684
	char __user *user_data = u64_to_user_ptr(args->data_ptr);
685 686 687 688

	/* We manually control the domain here and pretend that it
	 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
	 */
689
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
690 691
	if (copy_from_user(vaddr, user_data, args->size))
		return -EFAULT;
692

693
	drm_clflush_virt_range(vaddr, args->size);
694
	i915_gem_chipset_flush(to_i915(obj->base.dev));
695

696
	intel_fb_obj_flush(obj, ORIGIN_CPU);
697
	return 0;
698 699
}

700
void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
701
{
702
	return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
703 704 705 706
}

void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
707
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
708
	kmem_cache_free(dev_priv->objects, obj);
709 710
}

711 712
static int
i915_gem_create(struct drm_file *file,
713
		struct drm_i915_private *dev_priv,
714 715
		uint64_t size,
		uint32_t *handle_p)
716
{
717
	struct drm_i915_gem_object *obj;
718 719
	int ret;
	u32 handle;
720

721
	size = roundup(size, PAGE_SIZE);
722 723
	if (size == 0)
		return -EINVAL;
724 725

	/* Allocate the new object */
726
	obj = i915_gem_object_create(dev_priv, size);
727 728
	if (IS_ERR(obj))
		return PTR_ERR(obj);
729

730
	ret = drm_gem_handle_create(file, &obj->base, &handle);
731
	/* drop reference from allocate - handle holds it now */
C
Chris Wilson 已提交
732
	i915_gem_object_put(obj);
733 734
	if (ret)
		return ret;
735

736
	*handle_p = handle;
737 738 739
	return 0;
}

740 741 742 743 744 745
int
i915_gem_dumb_create(struct drm_file *file,
		     struct drm_device *dev,
		     struct drm_mode_create_dumb *args)
{
	/* have to work out size/pitch and return them */
746
	args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
747
	args->size = args->pitch * args->height;
748
	return i915_gem_create(file, to_i915(dev),
749
			       args->size, &args->handle);
750 751
}

752 753 754 755 756 757
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
	return !(obj->cache_level == I915_CACHE_NONE ||
		 obj->cache_level == I915_CACHE_WT);
}

758 759
/**
 * Creates a new mm object and returns a handle to it.
760 761 762
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
763 764 765 766 767
 */
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file)
{
768
	struct drm_i915_private *dev_priv = to_i915(dev);
769
	struct drm_i915_gem_create *args = data;
770

771
	i915_gem_flush_free_objects(dev_priv);
772

773
	return i915_gem_create(file, dev_priv,
774
			       args->size, &args->handle);
775 776
}

777 778 779 780 781 782 783
static inline enum fb_op_origin
fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
{
	return (domain == I915_GEM_DOMAIN_GTT ?
		obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
}

784
void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
785
{
786 787 788 789 790
	/*
	 * No actual flushing is required for the GTT write domain for reads
	 * from the GTT domain. Writes to it "immediately" go to main memory
	 * as far as we know, so there's no chipset flush. It also doesn't
	 * land in the GPU render cache.
791 792 793 794 795 796 797 798 799 800
	 *
	 * However, we do have to enforce the order so that all writes through
	 * the GTT land before any writes to the device, such as updates to
	 * the GATT itself.
	 *
	 * We also have to wait a bit for the writes to land from the GTT.
	 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
	 * timing. This issue has only been observed when switching quickly
	 * between GTT writes and CPU reads from inside the kernel on recent hw,
	 * and it appears to only affect discrete GTT blocks (i.e. on LLC
801 802
	 * system agents we cannot reproduce this behaviour, until Cannonlake
	 * that was!).
803
	 */
804

805 806
	wmb();

807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
	intel_runtime_pm_get(dev_priv);
	spin_lock_irq(&dev_priv->uncore.lock);

	POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));

	spin_unlock_irq(&dev_priv->uncore.lock);
	intel_runtime_pm_put(dev_priv);
}

static void
flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
{
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
	struct i915_vma *vma;

822
	if (!(obj->write_domain & flush_domains))
823 824
		return;

825
	switch (obj->write_domain) {
826
	case I915_GEM_DOMAIN_GTT:
827
		i915_gem_flush_ggtt_writes(dev_priv);
828 829 830

		intel_fb_obj_flush(obj,
				   fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
831

832
		for_each_ggtt_vma(vma, obj) {
833 834 835 836 837
			if (vma->iomap)
				continue;

			i915_vma_unset_ggtt_write(vma);
		}
838 839 840 841 842
		break;

	case I915_GEM_DOMAIN_CPU:
		i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
		break;
843 844 845 846 847

	case I915_GEM_DOMAIN_RENDER:
		if (gpu_write_needs_clflush(obj))
			obj->cache_dirty = true;
		break;
848 849
	}

850
	obj->write_domain = 0;
851 852
}

853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
			const char *gpu_vaddr, int gpu_offset,
			int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_to_user(cpu_vaddr + cpu_offset,
				     gpu_vaddr + swizzled_gpu_offset,
				     this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

879
static inline int
880 881
__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
			  const char __user *cpu_vaddr,
882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904
			  int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
				       cpu_vaddr + cpu_offset,
				       this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

905 906 907 908 909 910
/*
 * Pins the specified object's pages and synchronizes the object with
 * GPU accesses. Sets needs_clflush to non-zero if the caller should
 * flush the object from the CPU cache.
 */
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
911
				    unsigned int *needs_clflush)
912 913 914
{
	int ret;

915
	lockdep_assert_held(&obj->base.dev->struct_mutex);
916

917
	*needs_clflush = 0;
918 919
	if (!i915_gem_object_has_struct_page(obj))
		return -ENODEV;
920

921 922 923 924 925
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
926 927 928
	if (ret)
		return ret;

C
Chris Wilson 已提交
929
	ret = i915_gem_object_pin_pages(obj);
930 931 932
	if (ret)
		return ret;

933 934
	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
	    !static_cpu_has(X86_FEATURE_CLFLUSH)) {
935 936 937 938 939 940 941
		ret = i915_gem_object_set_to_cpu_domain(obj, false);
		if (ret)
			goto err_unpin;
		else
			goto out;
	}

942
	flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
943

944 945 946 947 948
	/* If we're not in the cpu read domain, set ourself into the gtt
	 * read domain and manually flush cachelines (if required). This
	 * optimizes for the case when the gpu will dirty the data
	 * anyway again before the next pread happens.
	 */
949
	if (!obj->cache_dirty &&
950
	    !(obj->read_domains & I915_GEM_DOMAIN_CPU))
951
		*needs_clflush = CLFLUSH_BEFORE;
952

953
out:
954
	/* return with the pages pinned */
955
	return 0;
956 957 958 959

err_unpin:
	i915_gem_object_unpin_pages(obj);
	return ret;
960 961 962 963 964 965 966
}

int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
				     unsigned int *needs_clflush)
{
	int ret;

967 968
	lockdep_assert_held(&obj->base.dev->struct_mutex);

969 970 971 972
	*needs_clflush = 0;
	if (!i915_gem_object_has_struct_page(obj))
		return -ENODEV;

973 974 975 976 977 978
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   I915_WAIT_ALL,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
979 980 981
	if (ret)
		return ret;

C
Chris Wilson 已提交
982
	ret = i915_gem_object_pin_pages(obj);
983 984 985
	if (ret)
		return ret;

986 987
	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
	    !static_cpu_has(X86_FEATURE_CLFLUSH)) {
988 989 990 991 992 993 994
		ret = i915_gem_object_set_to_cpu_domain(obj, true);
		if (ret)
			goto err_unpin;
		else
			goto out;
	}

995
	flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
996

997 998 999 1000 1001
	/* If we're not in the cpu write domain, set ourself into the
	 * gtt write domain and manually flush cachelines (as required).
	 * This optimizes for the case when the gpu will use the data
	 * right away and we therefore have to clflush anyway.
	 */
1002
	if (!obj->cache_dirty) {
1003
		*needs_clflush |= CLFLUSH_AFTER;
1004

1005 1006 1007 1008
		/*
		 * Same trick applies to invalidate partially written
		 * cachelines read before writing.
		 */
1009
		if (!(obj->read_domains & I915_GEM_DOMAIN_CPU))
1010 1011
			*needs_clflush |= CLFLUSH_BEFORE;
	}
1012

1013
out:
1014
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
C
Chris Wilson 已提交
1015
	obj->mm.dirty = true;
1016
	/* return with the pages pinned */
1017
	return 0;
1018 1019 1020 1021

err_unpin:
	i915_gem_object_unpin_pages(obj);
	return ret;
1022 1023
}

1024 1025 1026 1027
static void
shmem_clflush_swizzled_range(char *addr, unsigned long length,
			     bool swizzled)
{
1028
	if (unlikely(swizzled)) {
1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
		unsigned long start = (unsigned long) addr;
		unsigned long end = (unsigned long) addr + length;

		/* For swizzling simply ensure that we always flush both
		 * channels. Lame, but simple and it works. Swizzled
		 * pwrite/pread is far from a hotpath - current userspace
		 * doesn't use it at all. */
		start = round_down(start, 128);
		end = round_up(end, 128);

		drm_clflush_virt_range((void *)start, end - start);
	} else {
		drm_clflush_virt_range(addr, length);
	}

}

1046 1047 1048
/* Only difference to the fast-path function is that this can handle bit17
 * and uses non-atomic copy and kmap functions. */
static int
1049
shmem_pread_slow(struct page *page, int offset, int length,
1050 1051 1052 1053 1054 1055 1056 1057
		 char __user *user_data,
		 bool page_do_bit17_swizzling, bool needs_clflush)
{
	char *vaddr;
	int ret;

	vaddr = kmap(page);
	if (needs_clflush)
1058
		shmem_clflush_swizzled_range(vaddr + offset, length,
1059
					     page_do_bit17_swizzling);
1060 1061

	if (page_do_bit17_swizzling)
1062
		ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
1063
	else
1064
		ret = __copy_to_user(user_data, vaddr + offset, length);
1065 1066
	kunmap(page);

1067
	return ret ? - EFAULT : 0;
1068 1069
}

1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145
static int
shmem_pread(struct page *page, int offset, int length, char __user *user_data,
	    bool page_do_bit17_swizzling, bool needs_clflush)
{
	int ret;

	ret = -ENODEV;
	if (!page_do_bit17_swizzling) {
		char *vaddr = kmap_atomic(page);

		if (needs_clflush)
			drm_clflush_virt_range(vaddr + offset, length);
		ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
		kunmap_atomic(vaddr);
	}
	if (ret == 0)
		return 0;

	return shmem_pread_slow(page, offset, length, user_data,
				page_do_bit17_swizzling, needs_clflush);
}

static int
i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pread *args)
{
	char __user *user_data;
	u64 remain;
	unsigned int obj_do_bit17_swizzling;
	unsigned int needs_clflush;
	unsigned int idx, offset;
	int ret;

	obj_do_bit17_swizzling = 0;
	if (i915_gem_object_needs_bit17_swizzle(obj))
		obj_do_bit17_swizzling = BIT(17);

	ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
	if (ret)
		return ret;

	ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
	mutex_unlock(&obj->base.dev->struct_mutex);
	if (ret)
		return ret;

	remain = args->size;
	user_data = u64_to_user_ptr(args->data_ptr);
	offset = offset_in_page(args->offset);
	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
		struct page *page = i915_gem_object_get_page(obj, idx);
		int length;

		length = remain;
		if (offset + length > PAGE_SIZE)
			length = PAGE_SIZE - offset;

		ret = shmem_pread(page, offset, length, user_data,
				  page_to_phys(page) & obj_do_bit17_swizzling,
				  needs_clflush);
		if (ret)
			break;

		remain -= length;
		user_data += length;
		offset = 0;
	}

	i915_gem_obj_finish_shmem_access(obj);
	return ret;
}

static inline bool
gtt_user_read(struct io_mapping *mapping,
	      loff_t base, int offset,
	      char __user *user_data, int length)
1146
{
1147
	void __iomem *vaddr;
1148
	unsigned long unwritten;
1149 1150

	/* We can use the cpu mem copy function because this is X86. */
1151 1152 1153 1154
	vaddr = io_mapping_map_atomic_wc(mapping, base);
	unwritten = __copy_to_user_inatomic(user_data,
					    (void __force *)vaddr + offset,
					    length);
1155 1156
	io_mapping_unmap_atomic(vaddr);
	if (unwritten) {
1157 1158 1159 1160
		vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
		unwritten = copy_to_user(user_data,
					 (void __force *)vaddr + offset,
					 length);
1161 1162
		io_mapping_unmap(vaddr);
	}
1163 1164 1165 1166
	return unwritten;
}

static int
1167 1168
i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
		   const struct drm_i915_gem_pread *args)
1169
{
1170 1171
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	struct i915_ggtt *ggtt = &i915->ggtt;
1172
	struct drm_mm_node node;
1173 1174 1175
	struct i915_vma *vma;
	void __user *user_data;
	u64 remain, offset;
1176 1177
	int ret;

1178 1179 1180 1181 1182 1183
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
	if (ret)
		return ret;

	intel_runtime_pm_get(i915);
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1184 1185 1186
				       PIN_MAPPABLE |
				       PIN_NONFAULT |
				       PIN_NONBLOCK);
1187 1188 1189
	if (!IS_ERR(vma)) {
		node.start = i915_ggtt_offset(vma);
		node.allocated = false;
1190
		ret = i915_vma_put_fence(vma);
1191 1192 1193 1194 1195
		if (ret) {
			i915_vma_unpin(vma);
			vma = ERR_PTR(ret);
		}
	}
C
Chris Wilson 已提交
1196
	if (IS_ERR(vma)) {
1197
		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1198
		if (ret)
1199 1200
			goto out_unlock;
		GEM_BUG_ON(!node.allocated);
1201 1202 1203 1204 1205 1206
	}

	ret = i915_gem_object_set_to_gtt_domain(obj, false);
	if (ret)
		goto out_unpin;

1207
	mutex_unlock(&i915->drm.struct_mutex);
1208

1209 1210 1211
	user_data = u64_to_user_ptr(args->data_ptr);
	remain = args->size;
	offset = args->offset;
1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225

	while (remain > 0) {
		/* Operation in this page
		 *
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
		 */
		u32 page_base = node.start;
		unsigned page_offset = offset_in_page(offset);
		unsigned page_length = PAGE_SIZE - page_offset;
		page_length = remain < page_length ? remain : page_length;
		if (node.allocated) {
			wmb();
1226 1227 1228
			ggtt->vm.insert_page(&ggtt->vm,
					     i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
					     node.start, I915_CACHE_NONE, 0);
1229 1230 1231 1232
			wmb();
		} else {
			page_base += offset & PAGE_MASK;
		}
1233

1234
		if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
1235
				  user_data, page_length)) {
1236 1237 1238 1239 1240 1241 1242 1243 1244
			ret = -EFAULT;
			break;
		}

		remain -= page_length;
		user_data += page_length;
		offset += page_length;
	}

1245
	mutex_lock(&i915->drm.struct_mutex);
1246 1247 1248
out_unpin:
	if (node.allocated) {
		wmb();
1249
		ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
1250 1251
		remove_mappable_node(&node);
	} else {
C
Chris Wilson 已提交
1252
		i915_vma_unpin(vma);
1253
	}
1254 1255 1256
out_unlock:
	intel_runtime_pm_put(i915);
	mutex_unlock(&i915->drm.struct_mutex);
1257

1258 1259 1260
	return ret;
}

1261 1262
/**
 * Reads data from the object referenced by handle.
1263 1264 1265
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
1266 1267 1268 1269 1270
 *
 * On error, the contents of *data are undefined.
 */
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1271
		     struct drm_file *file)
1272 1273
{
	struct drm_i915_gem_pread *args = data;
1274
	struct drm_i915_gem_object *obj;
1275
	int ret;
1276

1277 1278 1279 1280
	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_WRITE,
1281
		       u64_to_user_ptr(args->data_ptr),
1282 1283 1284
		       args->size))
		return -EFAULT;

1285
	obj = i915_gem_object_lookup(file, args->handle);
1286 1287
	if (!obj)
		return -ENOENT;
1288

1289
	/* Bounds check source.  */
1290
	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
C
Chris Wilson 已提交
1291
		ret = -EINVAL;
1292
		goto out;
C
Chris Wilson 已提交
1293 1294
	}

C
Chris Wilson 已提交
1295 1296
	trace_i915_gem_object_pread(obj, args->offset, args->size);

1297 1298 1299 1300
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE,
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
1301
	if (ret)
1302
		goto out;
1303

1304
	ret = i915_gem_object_pin_pages(obj);
1305
	if (ret)
1306
		goto out;
1307

1308
	ret = i915_gem_shmem_pread(obj, args);
1309
	if (ret == -EFAULT || ret == -ENODEV)
1310
		ret = i915_gem_gtt_pread(obj, args);
1311

1312 1313
	i915_gem_object_unpin_pages(obj);
out:
C
Chris Wilson 已提交
1314
	i915_gem_object_put(obj);
1315
	return ret;
1316 1317
}

1318 1319
/* This is the fast write path which cannot handle
 * page faults in the source data
1320
 */
1321

1322 1323 1324 1325
static inline bool
ggtt_write(struct io_mapping *mapping,
	   loff_t base, int offset,
	   char __user *user_data, int length)
1326
{
1327
	void __iomem *vaddr;
1328
	unsigned long unwritten;
1329

1330
	/* We can use the cpu mem copy function because this is X86. */
1331 1332
	vaddr = io_mapping_map_atomic_wc(mapping, base);
	unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
1333
						      user_data, length);
1334 1335
	io_mapping_unmap_atomic(vaddr);
	if (unwritten) {
1336 1337 1338
		vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
		unwritten = copy_from_user((void __force *)vaddr + offset,
					   user_data, length);
1339 1340
		io_mapping_unmap(vaddr);
	}
1341 1342 1343 1344

	return unwritten;
}

1345 1346 1347
/**
 * This is the fast pwrite path, where we copy the data directly from the
 * user into the GTT, uncached.
1348
 * @obj: i915 GEM object
1349
 * @args: pwrite arguments structure
1350
 */
1351
static int
1352 1353
i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
			 const struct drm_i915_gem_pwrite *args)
1354
{
1355
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
1356 1357
	struct i915_ggtt *ggtt = &i915->ggtt;
	struct drm_mm_node node;
1358 1359 1360
	struct i915_vma *vma;
	u64 remain, offset;
	void __user *user_data;
1361
	int ret;
1362

1363 1364 1365
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
	if (ret)
		return ret;
D
Daniel Vetter 已提交
1366

1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383
	if (i915_gem_object_has_struct_page(obj)) {
		/*
		 * Avoid waking the device up if we can fallback, as
		 * waking/resuming is very slow (worst-case 10-100 ms
		 * depending on PCI sleeps and our own resume time).
		 * This easily dwarfs any performance advantage from
		 * using the cache bypass of indirect GGTT access.
		 */
		if (!intel_runtime_pm_get_if_in_use(i915)) {
			ret = -EFAULT;
			goto out_unlock;
		}
	} else {
		/* No backing pages, no fallback, we must force GGTT access */
		intel_runtime_pm_get(i915);
	}

C
Chris Wilson 已提交
1384
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1385 1386 1387
				       PIN_MAPPABLE |
				       PIN_NONFAULT |
				       PIN_NONBLOCK);
1388 1389 1390
	if (!IS_ERR(vma)) {
		node.start = i915_ggtt_offset(vma);
		node.allocated = false;
1391
		ret = i915_vma_put_fence(vma);
1392 1393 1394 1395 1396
		if (ret) {
			i915_vma_unpin(vma);
			vma = ERR_PTR(ret);
		}
	}
C
Chris Wilson 已提交
1397
	if (IS_ERR(vma)) {
1398
		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1399
		if (ret)
1400
			goto out_rpm;
1401
		GEM_BUG_ON(!node.allocated);
1402
	}
D
Daniel Vetter 已提交
1403 1404 1405 1406 1407

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		goto out_unpin;

1408 1409
	mutex_unlock(&i915->drm.struct_mutex);

1410
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1411

1412 1413 1414 1415
	user_data = u64_to_user_ptr(args->data_ptr);
	offset = args->offset;
	remain = args->size;
	while (remain) {
1416 1417
		/* Operation in this page
		 *
1418 1419 1420
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
1421
		 */
1422
		u32 page_base = node.start;
1423 1424
		unsigned int page_offset = offset_in_page(offset);
		unsigned int page_length = PAGE_SIZE - page_offset;
1425 1426 1427
		page_length = remain < page_length ? remain : page_length;
		if (node.allocated) {
			wmb(); /* flush the write before we modify the GGTT */
1428 1429 1430
			ggtt->vm.insert_page(&ggtt->vm,
					     i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
					     node.start, I915_CACHE_NONE, 0);
1431 1432 1433 1434
			wmb(); /* flush modifications to the GGTT (insert_page) */
		} else {
			page_base += offset & PAGE_MASK;
		}
1435
		/* If we get a fault while copying data, then (presumably) our
1436 1437
		 * source page isn't available.  Return the error and we'll
		 * retry in the slow path.
1438 1439
		 * If the object is non-shmem backed, we retry again with the
		 * path that handles page fault.
1440
		 */
1441
		if (ggtt_write(&ggtt->iomap, page_base, page_offset,
1442 1443 1444
			       user_data, page_length)) {
			ret = -EFAULT;
			break;
D
Daniel Vetter 已提交
1445
		}
1446

1447 1448 1449
		remain -= page_length;
		user_data += page_length;
		offset += page_length;
1450
	}
1451
	intel_fb_obj_flush(obj, ORIGIN_CPU);
1452 1453

	mutex_lock(&i915->drm.struct_mutex);
D
Daniel Vetter 已提交
1454
out_unpin:
1455 1456
	if (node.allocated) {
		wmb();
1457
		ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
1458 1459
		remove_mappable_node(&node);
	} else {
C
Chris Wilson 已提交
1460
		i915_vma_unpin(vma);
1461
	}
1462
out_rpm:
1463
	intel_runtime_pm_put(i915);
1464
out_unlock:
1465
	mutex_unlock(&i915->drm.struct_mutex);
1466
	return ret;
1467 1468
}

1469
static int
1470
shmem_pwrite_slow(struct page *page, int offset, int length,
1471 1472 1473 1474
		  char __user *user_data,
		  bool page_do_bit17_swizzling,
		  bool needs_clflush_before,
		  bool needs_clflush_after)
1475
{
1476 1477
	char *vaddr;
	int ret;
1478

1479
	vaddr = kmap(page);
1480
	if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1481
		shmem_clflush_swizzled_range(vaddr + offset, length,
1482
					     page_do_bit17_swizzling);
1483
	if (page_do_bit17_swizzling)
1484 1485
		ret = __copy_from_user_swizzled(vaddr, offset, user_data,
						length);
1486
	else
1487
		ret = __copy_from_user(vaddr + offset, user_data, length);
1488
	if (needs_clflush_after)
1489
		shmem_clflush_swizzled_range(vaddr + offset, length,
1490
					     page_do_bit17_swizzling);
1491
	kunmap(page);
1492

1493
	return ret ? -EFAULT : 0;
1494 1495
}

1496 1497 1498 1499 1500
/* Per-page copy function for the shmem pwrite fastpath.
 * Flushes invalid cachelines before writing to the target if
 * needs_clflush_before is set and flushes out any written cachelines after
 * writing if needs_clflush is set.
 */
1501
static int
1502 1503 1504 1505
shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
	     bool page_do_bit17_swizzling,
	     bool needs_clflush_before,
	     bool needs_clflush_after)
1506
{
1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538
	int ret;

	ret = -ENODEV;
	if (!page_do_bit17_swizzling) {
		char *vaddr = kmap_atomic(page);

		if (needs_clflush_before)
			drm_clflush_virt_range(vaddr + offset, len);
		ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
		if (needs_clflush_after)
			drm_clflush_virt_range(vaddr + offset, len);

		kunmap_atomic(vaddr);
	}
	if (ret == 0)
		return ret;

	return shmem_pwrite_slow(page, offset, len, user_data,
				 page_do_bit17_swizzling,
				 needs_clflush_before,
				 needs_clflush_after);
}

static int
i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
		      const struct drm_i915_gem_pwrite *args)
{
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	void __user *user_data;
	u64 remain;
	unsigned int obj_do_bit17_swizzling;
	unsigned int partial_cacheline_write;
1539
	unsigned int needs_clflush;
1540 1541
	unsigned int offset, idx;
	int ret;
1542

1543
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1544 1545 1546
	if (ret)
		return ret;

1547 1548 1549 1550
	ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
	mutex_unlock(&i915->drm.struct_mutex);
	if (ret)
		return ret;
1551

1552 1553 1554
	obj_do_bit17_swizzling = 0;
	if (i915_gem_object_needs_bit17_swizzle(obj))
		obj_do_bit17_swizzling = BIT(17);
1555

1556 1557 1558 1559 1560 1561 1562
	/* If we don't overwrite a cacheline completely we need to be
	 * careful to have up-to-date data by first clflushing. Don't
	 * overcomplicate things and flush the entire patch.
	 */
	partial_cacheline_write = 0;
	if (needs_clflush & CLFLUSH_BEFORE)
		partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
1563

1564 1565 1566 1567 1568 1569
	user_data = u64_to_user_ptr(args->data_ptr);
	remain = args->size;
	offset = offset_in_page(args->offset);
	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
		struct page *page = i915_gem_object_get_page(obj, idx);
		int length;
1570

1571 1572 1573
		length = remain;
		if (offset + length > PAGE_SIZE)
			length = PAGE_SIZE - offset;
1574

1575 1576 1577 1578
		ret = shmem_pwrite(page, offset, length, user_data,
				   page_to_phys(page) & obj_do_bit17_swizzling,
				   (offset | length) & partial_cacheline_write,
				   needs_clflush & CLFLUSH_AFTER);
1579
		if (ret)
1580
			break;
1581

1582 1583 1584
		remain -= length;
		user_data += length;
		offset = 0;
1585
	}
1586

1587
	intel_fb_obj_flush(obj, ORIGIN_CPU);
1588
	i915_gem_obj_finish_shmem_access(obj);
1589
	return ret;
1590 1591 1592 1593
}

/**
 * Writes data to the object referenced by handle.
1594 1595 1596
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1597 1598 1599 1600 1601
 *
 * On error, the contents of the buffer that were to be modified are undefined.
 */
int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1602
		      struct drm_file *file)
1603 1604
{
	struct drm_i915_gem_pwrite *args = data;
1605
	struct drm_i915_gem_object *obj;
1606 1607 1608 1609 1610 1611
	int ret;

	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_READ,
1612
		       u64_to_user_ptr(args->data_ptr),
1613 1614 1615
		       args->size))
		return -EFAULT;

1616
	obj = i915_gem_object_lookup(file, args->handle);
1617 1618
	if (!obj)
		return -ENOENT;
1619

1620
	/* Bounds check destination. */
1621
	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
C
Chris Wilson 已提交
1622
		ret = -EINVAL;
1623
		goto err;
C
Chris Wilson 已提交
1624 1625
	}

C
Chris Wilson 已提交
1626 1627
	trace_i915_gem_object_pwrite(obj, args->offset, args->size);

1628 1629 1630 1631 1632 1633
	ret = -ENODEV;
	if (obj->ops->pwrite)
		ret = obj->ops->pwrite(obj, args);
	if (ret != -ENODEV)
		goto err;

1634 1635 1636 1637 1638
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_ALL,
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
1639 1640 1641
	if (ret)
		goto err;

1642
	ret = i915_gem_object_pin_pages(obj);
1643
	if (ret)
1644
		goto err;
1645

D
Daniel Vetter 已提交
1646
	ret = -EFAULT;
1647 1648 1649 1650 1651 1652
	/* We can only do the GTT pwrite on untiled buffers, as otherwise
	 * it would end up going through the fenced access, and we'll get
	 * different detiling behavior between reading and writing.
	 * pread/pwrite currently are reading and writing from the CPU
	 * perspective, requiring manual detiling by the client.
	 */
1653
	if (!i915_gem_object_has_struct_page(obj) ||
1654
	    cpu_write_needs_clflush(obj))
D
Daniel Vetter 已提交
1655 1656
		/* Note that the gtt paths might fail with non-page-backed user
		 * pointers (e.g. gtt mappings when moving data between
1657 1658
		 * textures). Fallback to the shmem path in that case.
		 */
1659
		ret = i915_gem_gtt_pwrite_fast(obj, args);
1660

1661
	if (ret == -EFAULT || ret == -ENOSPC) {
1662 1663
		if (obj->phys_handle)
			ret = i915_gem_phys_pwrite(obj, args, file);
1664
		else
1665
			ret = i915_gem_shmem_pwrite(obj, args);
1666
	}
1667

1668
	i915_gem_object_unpin_pages(obj);
1669
err:
C
Chris Wilson 已提交
1670
	i915_gem_object_put(obj);
1671
	return ret;
1672 1673
}

1674 1675 1676 1677 1678 1679
static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *i915;
	struct list_head *list;
	struct i915_vma *vma;

1680 1681
	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));

1682
	for_each_ggtt_vma(vma, obj) {
1683 1684 1685 1686 1687 1688 1689 1690 1691 1692
		if (i915_vma_is_active(vma))
			continue;

		if (!drm_mm_node_allocated(&vma->node))
			continue;

		list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
	}

	i915 = to_i915(obj->base.dev);
1693
	spin_lock(&i915->mm.obj_lock);
1694
	list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
1695 1696
	list_move_tail(&obj->mm.link, list);
	spin_unlock(&i915->mm.obj_lock);
1697 1698
}

1699
/**
1700 1701
 * Called when user space prepares to use an object with the CPU, either
 * through the mmap ioctl's mapping or a GTT mapping.
1702 1703 1704
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1705 1706 1707
 */
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1708
			  struct drm_file *file)
1709 1710
{
	struct drm_i915_gem_set_domain *args = data;
1711
	struct drm_i915_gem_object *obj;
1712 1713
	uint32_t read_domains = args->read_domains;
	uint32_t write_domain = args->write_domain;
1714
	int err;
1715

1716
	/* Only handle setting domains to types used by the CPU. */
1717
	if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
1718 1719 1720 1721 1722 1723 1724 1725
		return -EINVAL;

	/* Having something in the write domain implies it's in the read
	 * domain, and only that read domain.  Enforce that in the request.
	 */
	if (write_domain != 0 && read_domains != write_domain)
		return -EINVAL;

1726
	obj = i915_gem_object_lookup(file, args->handle);
1727 1728
	if (!obj)
		return -ENOENT;
1729

1730 1731 1732 1733
	/* Try to flush the object off the GPU without holding the lock.
	 * We will repeat the flush holding the lock in the normal manner
	 * to catch cases where we are gazumped.
	 */
1734
	err = i915_gem_object_wait(obj,
1735 1736 1737 1738
				   I915_WAIT_INTERRUPTIBLE |
				   (write_domain ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
1739
	if (err)
C
Chris Wilson 已提交
1740
		goto out;
1741

T
Tina Zhang 已提交
1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754
	/*
	 * Proxy objects do not control access to the backing storage, ergo
	 * they cannot be used as a means to manipulate the cache domain
	 * tracking for that backing storage. The proxy object is always
	 * considered to be outside of any cache domain.
	 */
	if (i915_gem_object_is_proxy(obj)) {
		err = -ENXIO;
		goto out;
	}

	/*
	 * Flush and acquire obj->pages so that we are coherent through
1755 1756 1757 1758 1759 1760 1761 1762 1763
	 * direct access in memory with previous cached writes through
	 * shmemfs and that our cache domain tracking remains valid.
	 * For example, if the obj->filp was moved to swap without us
	 * being notified and releasing the pages, we would mistakenly
	 * continue to assume that the obj remained out of the CPU cached
	 * domain.
	 */
	err = i915_gem_object_pin_pages(obj);
	if (err)
C
Chris Wilson 已提交
1764
		goto out;
1765 1766 1767

	err = i915_mutex_lock_interruptible(dev);
	if (err)
C
Chris Wilson 已提交
1768
		goto out_unpin;
1769

1770 1771 1772 1773
	if (read_domains & I915_GEM_DOMAIN_WC)
		err = i915_gem_object_set_to_wc_domain(obj, write_domain);
	else if (read_domains & I915_GEM_DOMAIN_GTT)
		err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
1774
	else
1775
		err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
1776

1777 1778
	/* And bump the LRU for this access */
	i915_gem_object_bump_inactive_ggtt(obj);
1779

1780
	mutex_unlock(&dev->struct_mutex);
1781

1782
	if (write_domain != 0)
1783 1784
		intel_fb_obj_invalidate(obj,
					fb_write_origin(obj, write_domain));
1785

C
Chris Wilson 已提交
1786
out_unpin:
1787
	i915_gem_object_unpin_pages(obj);
C
Chris Wilson 已提交
1788 1789
out:
	i915_gem_object_put(obj);
1790
	return err;
1791 1792 1793 1794
}

/**
 * Called when user space has done writes to this buffer
1795 1796 1797
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1798 1799 1800
 */
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1801
			 struct drm_file *file)
1802 1803
{
	struct drm_i915_gem_sw_finish *args = data;
1804
	struct drm_i915_gem_object *obj;
1805

1806
	obj = i915_gem_object_lookup(file, args->handle);
1807 1808
	if (!obj)
		return -ENOENT;
1809

T
Tina Zhang 已提交
1810 1811 1812 1813 1814
	/*
	 * Proxy objects are barred from CPU access, so there is no
	 * need to ban sw_finish as it is a nop.
	 */

1815
	/* Pinned buffers may be scanout, so flush the cache */
1816
	i915_gem_object_flush_if_display(obj);
C
Chris Wilson 已提交
1817
	i915_gem_object_put(obj);
1818 1819

	return 0;
1820 1821 1822
}

/**
1823 1824 1825 1826 1827
 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
 *			 it is mapped to.
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1828 1829 1830
 *
 * While the mapping holds a reference on the contents of the object, it doesn't
 * imply a ref on the object itself.
1831 1832 1833 1834 1835 1836 1837 1838 1839 1840
 *
 * IMPORTANT:
 *
 * DRM driver writers who look a this function as an example for how to do GEM
 * mmap support, please don't implement mmap support like here. The modern way
 * to implement DRM mmap support is with an mmap offset ioctl (like
 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
 * That way debug tooling like valgrind will understand what's going on, hiding
 * the mmap call in a driver private ioctl will break that. The i915 driver only
 * does cpu mmaps this way because we didn't know better.
1841 1842 1843
 */
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1844
		    struct drm_file *file)
1845 1846
{
	struct drm_i915_gem_mmap *args = data;
1847
	struct drm_i915_gem_object *obj;
1848 1849
	unsigned long addr;

1850 1851 1852
	if (args->flags & ~(I915_MMAP_WC))
		return -EINVAL;

1853
	if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1854 1855
		return -ENODEV;

1856 1857
	obj = i915_gem_object_lookup(file, args->handle);
	if (!obj)
1858
		return -ENOENT;
1859

1860 1861 1862
	/* prime objects have no backing filp to GEM mmap
	 * pages from.
	 */
1863
	if (!obj->base.filp) {
C
Chris Wilson 已提交
1864
		i915_gem_object_put(obj);
1865
		return -ENXIO;
1866 1867
	}

1868
	addr = vm_mmap(obj->base.filp, 0, args->size,
1869 1870
		       PROT_READ | PROT_WRITE, MAP_SHARED,
		       args->offset);
1871 1872 1873 1874
	if (args->flags & I915_MMAP_WC) {
		struct mm_struct *mm = current->mm;
		struct vm_area_struct *vma;

1875
		if (down_write_killable(&mm->mmap_sem)) {
C
Chris Wilson 已提交
1876
			i915_gem_object_put(obj);
1877 1878
			return -EINTR;
		}
1879 1880 1881 1882 1883 1884 1885
		vma = find_vma(mm, addr);
		if (vma)
			vma->vm_page_prot =
				pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
		else
			addr = -ENOMEM;
		up_write(&mm->mmap_sem);
1886 1887

		/* This may race, but that's ok, it only gets set */
1888
		WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
1889
	}
C
Chris Wilson 已提交
1890
	i915_gem_object_put(obj);
1891 1892 1893 1894 1895 1896 1897 1898
	if (IS_ERR((void *)addr))
		return addr;

	args->addr_ptr = (uint64_t) addr;

	return 0;
}

1899 1900
static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
{
1901
	return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
1902 1903
}

1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923
/**
 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
 *
 * A history of the GTT mmap interface:
 *
 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
 *     aligned and suitable for fencing, and still fit into the available
 *     mappable space left by the pinned display objects. A classic problem
 *     we called the page-fault-of-doom where we would ping-pong between
 *     two objects that could not fit inside the GTT and so the memcpy
 *     would page one object in at the expense of the other between every
 *     single byte.
 *
 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
 *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
 *     object is too large for the available space (or simply too large
 *     for the mappable aperture!), a view is created instead and faulted
 *     into userspace. (This view is aligned and sized appropriately for
 *     fenced access.)
 *
1924 1925 1926
 * 2 - Recognise WC as a separate cache domain so that we can flush the
 *     delayed writes via GTT before performing direct access via WC.
 *
1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953
 * Restrictions:
 *
 *  * snoopable objects cannot be accessed via the GTT. It can cause machine
 *    hangs on some architectures, corruption on others. An attempt to service
 *    a GTT page fault from a snoopable object will generate a SIGBUS.
 *
 *  * the object must be able to fit into RAM (physical memory, though no
 *    limited to the mappable aperture).
 *
 *
 * Caveats:
 *
 *  * a new GTT page fault will synchronize rendering from the GPU and flush
 *    all data to system memory. Subsequent access will not be synchronized.
 *
 *  * all mappings are revoked on runtime device suspend.
 *
 *  * there are only 8, 16 or 32 fence registers to share between all users
 *    (older machines require fence register for display and blitter access
 *    as well). Contention of the fence registers will cause the previous users
 *    to be unmapped and any new access will generate new page faults.
 *
 *  * running out of memory while servicing a fault may generate a SIGBUS,
 *    rather than the expected SIGSEGV.
 */
int i915_gem_mmap_gtt_version(void)
{
1954
	return 2;
1955 1956
}

1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967
static inline struct i915_ggtt_view
compute_partial_view(struct drm_i915_gem_object *obj,
		     pgoff_t page_offset,
		     unsigned int chunk)
{
	struct i915_ggtt_view view;

	if (i915_gem_object_is_tiled(obj))
		chunk = roundup(chunk, tile_row_pages(obj));

	view.type = I915_GGTT_VIEW_PARTIAL;
1968 1969
	view.partial.offset = rounddown(page_offset, chunk);
	view.partial.size =
1970
		min_t(unsigned int, chunk,
1971
		      (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
1972 1973 1974 1975 1976 1977 1978 1979

	/* If the partial covers the entire object, just create a normal VMA. */
	if (chunk >= obj->base.size >> PAGE_SHIFT)
		view.type = I915_GGTT_VIEW_NORMAL;

	return view;
}

1980 1981
/**
 * i915_gem_fault - fault a page into the GTT
1982
 * @vmf: fault info
1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993
 *
 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
 * from userspace.  The fault handler takes care of binding the object to
 * the GTT (if needed), allocating and programming a fence register (again,
 * only if needed based on whether the old reg is still valid or the object
 * is tiled) and inserting a new PTE into the faulting process.
 *
 * Note that the faulting process may involve evicting existing objects
 * from the GTT and/or fence registers to make room.  So performance may
 * suffer if the GTT working set is large or there are few fence registers
 * left.
1994 1995 1996
 *
 * The current feature set supported by i915_gem_fault() and thus GTT mmaps
 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
1997
 */
1998
vm_fault_t i915_gem_fault(struct vm_fault *vmf)
1999
{
2000
#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
2001
	struct vm_area_struct *area = vmf->vma;
C
Chris Wilson 已提交
2002
	struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
2003
	struct drm_device *dev = obj->base.dev;
2004 2005
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2006
	bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
C
Chris Wilson 已提交
2007
	struct i915_vma *vma;
2008
	pgoff_t page_offset;
2009
	unsigned int flags;
2010
	int ret;
2011

2012
	/* We don't use vmf->pgoff since that has the fake offset */
2013
	page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
2014

C
Chris Wilson 已提交
2015 2016
	trace_i915_gem_object_fault(obj, page_offset, true, write);

2017
	/* Try to flush the object off the GPU first without holding the lock.
2018
	 * Upon acquiring the lock, we will perform our sanity checks and then
2019 2020 2021
	 * repeat the flush holding the lock in the normal manner to catch cases
	 * where we are gazumped.
	 */
2022 2023 2024 2025
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
2026
	if (ret)
2027 2028
		goto err;

2029 2030 2031 2032
	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		goto err;

2033 2034 2035 2036 2037
	intel_runtime_pm_get(dev_priv);

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto err_rpm;
2038

2039
	/* Access to snoopable pages through the GTT is incoherent. */
2040
	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
2041
		ret = -EFAULT;
2042
		goto err_unlock;
2043 2044
	}

2045 2046 2047 2048 2049 2050 2051 2052
	/* If the object is smaller than a couple of partial vma, it is
	 * not worth only creating a single partial vma - we may as well
	 * clear enough space for the full object.
	 */
	flags = PIN_MAPPABLE;
	if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
		flags |= PIN_NONBLOCK | PIN_NONFAULT;

2053
	/* Now pin it into the GTT as needed */
2054
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
2055 2056
	if (IS_ERR(vma)) {
		/* Use a partial view if it is bigger than available space */
2057
		struct i915_ggtt_view view =
2058
			compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
2059

2060 2061 2062 2063 2064
		/* Userspace is now writing through an untracked VMA, abandon
		 * all hope that the hardware is able to track future writes.
		 */
		obj->frontbuffer_ggtt_origin = ORIGIN_CPU;

2065 2066
		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
	}
C
Chris Wilson 已提交
2067 2068
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
2069
		goto err_unlock;
C
Chris Wilson 已提交
2070
	}
2071

2072 2073
	ret = i915_gem_object_set_to_gtt_domain(obj, write);
	if (ret)
2074
		goto err_unpin;
2075

2076
	ret = i915_vma_pin_fence(vma);
2077
	if (ret)
2078
		goto err_unpin;
2079

2080
	/* Finally, remap it using the new GTT offset */
2081
	ret = remap_io_mapping(area,
2082
			       area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
2083
			       (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
2084
			       min_t(u64, vma->size, area->vm_end - area->vm_start),
2085
			       &ggtt->iomap);
2086 2087
	if (ret)
		goto err_fence;
2088

2089 2090 2091 2092 2093 2094
	/* Mark as being mmapped into userspace for later revocation */
	assert_rpm_wakelock_held(dev_priv);
	if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
		list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
	GEM_BUG_ON(!obj->userfault_count);

2095 2096
	i915_vma_set_ggtt_write(vma);

2097
err_fence:
2098
	i915_vma_unpin_fence(vma);
2099
err_unpin:
C
Chris Wilson 已提交
2100
	__i915_vma_unpin(vma);
2101
err_unlock:
2102
	mutex_unlock(&dev->struct_mutex);
2103 2104
err_rpm:
	intel_runtime_pm_put(dev_priv);
2105
	i915_gem_object_unpin_pages(obj);
2106
err:
2107
	switch (ret) {
2108
	case -EIO:
2109 2110 2111 2112 2113 2114
		/*
		 * We eat errors when the gpu is terminally wedged to avoid
		 * userspace unduly crashing (gl has no provisions for mmaps to
		 * fail). But any other -EIO isn't ours (e.g. swap in failure)
		 * and so needs to be reported.
		 */
2115 2116
		if (!i915_terminally_wedged(&dev_priv->gpu_error))
			return VM_FAULT_SIGBUS;
2117
	case -EAGAIN:
D
Daniel Vetter 已提交
2118 2119 2120 2121
		/*
		 * EAGAIN means the gpu is hung and we'll wait for the error
		 * handler to reset everything when re-faulting in
		 * i915_mutex_lock_interruptible.
2122
		 */
2123 2124
	case 0:
	case -ERESTARTSYS:
2125
	case -EINTR:
2126 2127 2128 2129 2130
	case -EBUSY:
		/*
		 * EBUSY is ok: this just means that another thread
		 * already did the job.
		 */
2131
		return VM_FAULT_NOPAGE;
2132
	case -ENOMEM:
2133
		return VM_FAULT_OOM;
2134
	case -ENOSPC:
2135
	case -EFAULT:
2136
		return VM_FAULT_SIGBUS;
2137
	default:
2138
		WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
2139
		return VM_FAULT_SIGBUS;
2140 2141 2142
	}
}

2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153
static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
{
	struct i915_vma *vma;

	GEM_BUG_ON(!obj->userfault_count);

	obj->userfault_count = 0;
	list_del(&obj->userfault_link);
	drm_vma_node_unmap(&obj->base.vma_node,
			   obj->base.dev->anon_inode->i_mapping);

2154
	for_each_ggtt_vma(vma, obj)
2155 2156 2157
		i915_vma_unset_userfault(vma);
}

2158 2159 2160 2161
/**
 * i915_gem_release_mmap - remove physical page mappings
 * @obj: obj in question
 *
2162
 * Preserve the reservation of the mmapping with the DRM core code, but
2163 2164 2165 2166 2167 2168 2169 2170 2171
 * relinquish ownership of the pages back to the system.
 *
 * It is vital that we remove the page mapping if we have mapped a tiled
 * object through the GTT and then lose the fence register due to
 * resource pressure. Similarly if the object has been moved out of the
 * aperture, than pages mapped into userspace must be revoked. Removing the
 * mapping will then trigger a page fault on the next user access, allowing
 * fixup by i915_gem_fault().
 */
2172
void
2173
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
2174
{
2175 2176
	struct drm_i915_private *i915 = to_i915(obj->base.dev);

2177 2178 2179
	/* Serialisation between user GTT access and our code depends upon
	 * revoking the CPU's PTE whilst the mutex is held. The next user
	 * pagefault then has to wait until we release the mutex.
2180 2181 2182 2183
	 *
	 * Note that RPM complicates somewhat by adding an additional
	 * requirement that operations to the GGTT be made holding the RPM
	 * wakeref.
2184
	 */
2185
	lockdep_assert_held(&i915->drm.struct_mutex);
2186
	intel_runtime_pm_get(i915);
2187

2188
	if (!obj->userfault_count)
2189
		goto out;
2190

2191
	__i915_gem_object_release_mmap(obj);
2192 2193 2194 2195 2196 2197 2198 2199 2200

	/* Ensure that the CPU's PTE are revoked and there are not outstanding
	 * memory transactions from userspace before we return. The TLB
	 * flushing implied above by changing the PTE above *should* be
	 * sufficient, an extra barrier here just provides us with a bit
	 * of paranoid documentation about our requirement to serialise
	 * memory writes before touching registers / GSM.
	 */
	wmb();
2201 2202 2203

out:
	intel_runtime_pm_put(i915);
2204 2205
}

2206
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
2207
{
2208
	struct drm_i915_gem_object *obj, *on;
2209
	int i;
2210

2211 2212 2213 2214 2215 2216
	/*
	 * Only called during RPM suspend. All users of the userfault_list
	 * must be holding an RPM wakeref to ensure that this can not
	 * run concurrently with themselves (and use the struct_mutex for
	 * protection between themselves).
	 */
2217

2218
	list_for_each_entry_safe(obj, on,
2219 2220
				 &dev_priv->mm.userfault_list, userfault_link)
		__i915_gem_object_release_mmap(obj);
2221 2222 2223 2224 2225 2226 2227 2228

	/* The fence will be lost when the device powers down. If any were
	 * in use by hardware (i.e. they are pinned), we should not be powering
	 * down! All other fences will be reacquired by the user upon waking.
	 */
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];

2229 2230 2231 2232 2233 2234 2235 2236 2237 2238
		/* Ideally we want to assert that the fence register is not
		 * live at this point (i.e. that no piece of code will be
		 * trying to write through fence + GTT, as that both violates
		 * our tracking of activity and associated locking/barriers,
		 * but also is illegal given that the hw is powered down).
		 *
		 * Previously we used reg->pin_count as a "liveness" indicator.
		 * That is not sufficient, and we need a more fine-grained
		 * tool if we want to have a sanity check here.
		 */
2239 2240 2241 2242

		if (!reg->vma)
			continue;

2243
		GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
2244 2245
		reg->dirty = true;
	}
2246 2247
}

2248 2249
static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
{
2250
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2251
	int err;
2252

2253
	err = drm_gem_create_mmap_offset(&obj->base);
2254
	if (likely(!err))
2255
		return 0;
2256

2257 2258 2259 2260 2261
	/* Attempt to reap some mmap space from dead objects */
	do {
		err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
		if (err)
			break;
2262

2263
		i915_gem_drain_freed_objects(dev_priv);
2264
		err = drm_gem_create_mmap_offset(&obj->base);
2265 2266 2267 2268
		if (!err)
			break;

	} while (flush_delayed_work(&dev_priv->gt.retire_work));
2269

2270
	return err;
2271 2272 2273 2274 2275 2276 2277
}

static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
{
	drm_gem_free_mmap_offset(&obj->base);
}

2278
int
2279 2280
i915_gem_mmap_gtt(struct drm_file *file,
		  struct drm_device *dev,
2281
		  uint32_t handle,
2282
		  uint64_t *offset)
2283
{
2284
	struct drm_i915_gem_object *obj;
2285 2286
	int ret;

2287
	obj = i915_gem_object_lookup(file, handle);
2288 2289
	if (!obj)
		return -ENOENT;
2290

2291
	ret = i915_gem_object_create_mmap_offset(obj);
2292 2293
	if (ret == 0)
		*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2294

C
Chris Wilson 已提交
2295
	i915_gem_object_put(obj);
2296
	return ret;
2297 2298
}

2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319
/**
 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
 * @dev: DRM device
 * @data: GTT mapping ioctl data
 * @file: GEM object info
 *
 * Simply returns the fake offset to userspace so it can mmap it.
 * The mmap call will end up in drm_gem_mmap(), which will set things
 * up so we can get faults in the handler above.
 *
 * The fault handler will take care of binding the object into the GTT
 * (since it may have been evicted to make room for something), allocating
 * a fence register, and mapping the appropriate aperture address into
 * userspace.
 */
int
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file)
{
	struct drm_i915_gem_mmap_gtt *args = data;

2320
	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2321 2322
}

D
Daniel Vetter 已提交
2323 2324 2325
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2326
{
2327
	i915_gem_object_free_mmap_offset(obj);
2328

2329 2330
	if (obj->base.filp == NULL)
		return;
2331

D
Daniel Vetter 已提交
2332 2333 2334 2335 2336
	/* Our goal here is to return as much of the memory as
	 * is possible back to the system as we are called from OOM.
	 * To do this we must instruct the shmfs to drop all of its
	 * backing pages, *now*.
	 */
2337
	shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
C
Chris Wilson 已提交
2338
	obj->mm.madv = __I915_MADV_PURGED;
2339
	obj->mm.pages = ERR_PTR(-EFAULT);
D
Daniel Vetter 已提交
2340
}
2341

2342
/* Try to discard unwanted pages */
2343
void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
D
Daniel Vetter 已提交
2344
{
2345 2346
	struct address_space *mapping;

2347
	lockdep_assert_held(&obj->mm.lock);
2348
	GEM_BUG_ON(i915_gem_object_has_pages(obj));
2349

C
Chris Wilson 已提交
2350
	switch (obj->mm.madv) {
2351 2352 2353 2354 2355 2356 2357 2358 2359
	case I915_MADV_DONTNEED:
		i915_gem_object_truncate(obj);
	case __I915_MADV_PURGED:
		return;
	}

	if (obj->base.filp == NULL)
		return;

2360
	mapping = obj->base.filp->f_mapping,
2361
	invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2362 2363
}

2364
static void
2365 2366
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
			      struct sg_table *pages)
2367
{
2368 2369
	struct sgt_iter sgt_iter;
	struct page *page;
2370

2371
	__i915_gem_object_release_shmem(obj, pages, true);
2372

2373
	i915_gem_gtt_finish_pages(obj, pages);
I
Imre Deak 已提交
2374

2375
	if (i915_gem_object_needs_bit17_swizzle(obj))
2376
		i915_gem_object_save_bit_17_swizzle(obj, pages);
2377

2378
	for_each_sgt_page(page, sgt_iter, pages) {
C
Chris Wilson 已提交
2379
		if (obj->mm.dirty)
2380
			set_page_dirty(page);
2381

C
Chris Wilson 已提交
2382
		if (obj->mm.madv == I915_MADV_WILLNEED)
2383
			mark_page_accessed(page);
2384

2385
		put_page(page);
2386
	}
C
Chris Wilson 已提交
2387
	obj->mm.dirty = false;
2388

2389 2390
	sg_free_table(pages);
	kfree(pages);
2391
}
C
Chris Wilson 已提交
2392

2393 2394 2395
static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
{
	struct radix_tree_iter iter;
2396
	void __rcu **slot;
2397

2398
	rcu_read_lock();
C
Chris Wilson 已提交
2399 2400
	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
2401
	rcu_read_unlock();
2402 2403
}

2404 2405
static struct sg_table *
__i915_gem_object_unset_pages(struct drm_i915_gem_object *obj)
2406
{
2407
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
2408
	struct sg_table *pages;
2409

2410
	pages = fetch_and_zero(&obj->mm.pages);
2411 2412
	if (!pages)
		return NULL;
2413

2414 2415 2416 2417
	spin_lock(&i915->mm.obj_lock);
	list_del(&obj->mm.link);
	spin_unlock(&i915->mm.obj_lock);

C
Chris Wilson 已提交
2418
	if (obj->mm.mapping) {
2419 2420
		void *ptr;

2421
		ptr = page_mask_bits(obj->mm.mapping);
2422 2423
		if (is_vmalloc_addr(ptr))
			vunmap(ptr);
2424
		else
2425 2426
			kunmap(kmap_to_page(ptr));

C
Chris Wilson 已提交
2427
		obj->mm.mapping = NULL;
2428 2429
	}

2430
	__i915_gem_object_reset_page_iter(obj);
2431 2432 2433 2434
	obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;

	return pages;
}
2435

2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458
void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
				 enum i915_mm_subclass subclass)
{
	struct sg_table *pages;

	if (i915_gem_object_has_pinned_pages(obj))
		return;

	GEM_BUG_ON(obj->bind_count);
	if (!i915_gem_object_has_pages(obj))
		return;

	/* May be called by shrinker from within get_pages() (on another bo) */
	mutex_lock_nested(&obj->mm.lock, subclass);
	if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
		goto unlock;

	/*
	 * ->put_pages might need to allocate memory for the bit17 swizzle
	 * array, hence protect them from being reaped by removing them from gtt
	 * lists early.
	 */
	pages = __i915_gem_object_unset_pages(obj);
2459 2460 2461
	if (!IS_ERR(pages))
		obj->ops->put_pages(obj, pages);

2462 2463
unlock:
	mutex_unlock(&obj->mm.lock);
C
Chris Wilson 已提交
2464 2465
}

2466
static bool i915_sg_trim(struct sg_table *orig_st)
2467 2468 2469 2470 2471 2472
{
	struct sg_table new_st;
	struct scatterlist *sg, *new_sg;
	unsigned int i;

	if (orig_st->nents == orig_st->orig_nents)
2473
		return false;
2474

2475
	if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
2476
		return false;
2477 2478 2479 2480 2481 2482 2483

	new_sg = new_st.sgl;
	for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
		sg_set_page(new_sg, sg_page(sg), sg->length, 0);
		/* called before being DMA mapped, no need to copy sg->dma_* */
		new_sg = sg_next(new_sg);
	}
2484
	GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
2485 2486 2487 2488

	sg_free_table(orig_st);

	*orig_st = new_st;
2489
	return true;
2490 2491
}

2492
static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2493
{
2494
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2495 2496
	const unsigned long page_count = obj->base.size / PAGE_SIZE;
	unsigned long i;
2497
	struct address_space *mapping;
2498 2499
	struct sg_table *st;
	struct scatterlist *sg;
2500
	struct sgt_iter sgt_iter;
2501
	struct page *page;
2502
	unsigned long last_pfn = 0;	/* suppress gcc warning */
2503
	unsigned int max_segment = i915_sg_segment_size();
M
Matthew Auld 已提交
2504
	unsigned int sg_page_sizes;
2505
	gfp_t noreclaim;
I
Imre Deak 已提交
2506
	int ret;
2507

C
Chris Wilson 已提交
2508 2509 2510 2511
	/* Assert that the object is not currently in any GPU domain. As it
	 * wasn't in the GTT, there shouldn't be any way it could have been in
	 * a GPU cache
	 */
2512 2513
	GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
	GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
C
Chris Wilson 已提交
2514

2515 2516
	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
2517
		return -ENOMEM;
2518

2519
rebuild_st:
2520 2521
	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
		kfree(st);
2522
		return -ENOMEM;
2523
	}
2524

2525 2526 2527 2528 2529
	/* Get the list of pages out of our struct file.  They'll be pinned
	 * at this point until we release them.
	 *
	 * Fail silently without starting the shrinker
	 */
2530
	mapping = obj->base.filp->f_mapping;
2531
	noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
2532 2533
	noreclaim |= __GFP_NORETRY | __GFP_NOWARN;

2534 2535
	sg = st->sgl;
	st->nents = 0;
M
Matthew Auld 已提交
2536
	sg_page_sizes = 0;
2537
	for (i = 0; i < page_count; i++) {
2538 2539 2540 2541 2542 2543 2544
		const unsigned int shrink[] = {
			I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
			0,
		}, *s = shrink;
		gfp_t gfp = noreclaim;

		do {
C
Chris Wilson 已提交
2545
			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2546 2547 2548 2549 2550 2551 2552 2553
			if (likely(!IS_ERR(page)))
				break;

			if (!*s) {
				ret = PTR_ERR(page);
				goto err_sg;
			}

2554
			i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
2555
			cond_resched();
2556

C
Chris Wilson 已提交
2557 2558 2559
			/* We've tried hard to allocate the memory by reaping
			 * our own buffer, now let the real VM do its job and
			 * go down in flames if truly OOM.
2560 2561 2562 2563
			 *
			 * However, since graphics tend to be disposable,
			 * defer the oom here by reporting the ENOMEM back
			 * to userspace.
C
Chris Wilson 已提交
2564
			 */
2565 2566 2567
			if (!*s) {
				/* reclaim and warn, but no oom */
				gfp = mapping_gfp_mask(mapping);
2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579

				/* Our bo are always dirty and so we require
				 * kswapd to reclaim our pages (direct reclaim
				 * does not effectively begin pageout of our
				 * buffers on its own). However, direct reclaim
				 * only waits for kswapd when under allocation
				 * congestion. So as a result __GFP_RECLAIM is
				 * unreliable and fails to actually reclaim our
				 * dirty pages -- unless you try over and over
				 * again with !__GFP_NORETRY. However, we still
				 * want to fail this allocation rather than
				 * trigger the out-of-memory killer and for
M
Michal Hocko 已提交
2580
				 * this we want __GFP_RETRY_MAYFAIL.
2581
				 */
M
Michal Hocko 已提交
2582
				gfp |= __GFP_RETRY_MAYFAIL;
I
Imre Deak 已提交
2583
			}
2584 2585
		} while (1);

2586 2587 2588
		if (!i ||
		    sg->length >= max_segment ||
		    page_to_pfn(page) != last_pfn + 1) {
2589
			if (i) {
M
Matthew Auld 已提交
2590
				sg_page_sizes |= sg->length;
2591
				sg = sg_next(sg);
2592
			}
2593 2594 2595 2596 2597 2598
			st->nents++;
			sg_set_page(sg, page, PAGE_SIZE, 0);
		} else {
			sg->length += PAGE_SIZE;
		}
		last_pfn = page_to_pfn(page);
2599 2600 2601

		/* Check that the i965g/gm workaround works. */
		WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2602
	}
2603
	if (sg) { /* loop terminated early; short sg table */
M
Matthew Auld 已提交
2604
		sg_page_sizes |= sg->length;
2605
		sg_mark_end(sg);
2606
	}
2607

2608 2609 2610
	/* Trim unused sg entries to avoid wasting memory. */
	i915_sg_trim(st);

2611
	ret = i915_gem_gtt_prepare_pages(obj, st);
2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630
	if (ret) {
		/* DMA remapping failed? One possible cause is that
		 * it could not reserve enough large entries, asking
		 * for PAGE_SIZE chunks instead may be helpful.
		 */
		if (max_segment > PAGE_SIZE) {
			for_each_sgt_page(page, sgt_iter, st)
				put_page(page);
			sg_free_table(st);

			max_segment = PAGE_SIZE;
			goto rebuild_st;
		} else {
			dev_warn(&dev_priv->drm.pdev->dev,
				 "Failed to DMA remap %lu pages\n",
				 page_count);
			goto err_pages;
		}
	}
I
Imre Deak 已提交
2631

2632
	if (i915_gem_object_needs_bit17_swizzle(obj))
2633
		i915_gem_object_do_bit_17_swizzle(obj, st);
2634

M
Matthew Auld 已提交
2635
	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
2636 2637

	return 0;
2638

2639
err_sg:
2640
	sg_mark_end(sg);
2641
err_pages:
2642 2643
	for_each_sgt_page(page, sgt_iter, st)
		put_page(page);
2644 2645
	sg_free_table(st);
	kfree(st);
2646 2647 2648 2649 2650 2651 2652 2653 2654

	/* shmemfs first checks if there is enough memory to allocate the page
	 * and reports ENOSPC should there be insufficient, along with the usual
	 * ENOMEM for a genuine allocation failure.
	 *
	 * We use ENOSPC in our driver to mean that we have run out of aperture
	 * space and so want to translate the error from shmemfs back to our
	 * usual understanding of ENOMEM.
	 */
I
Imre Deak 已提交
2655 2656 2657
	if (ret == -ENOSPC)
		ret = -ENOMEM;

2658
	return ret;
2659 2660 2661
}

void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
2662
				 struct sg_table *pages,
M
Matthew Auld 已提交
2663
				 unsigned int sg_page_sizes)
2664
{
2665 2666 2667 2668
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	unsigned long supported = INTEL_INFO(i915)->page_sizes;
	int i;

2669
	lockdep_assert_held(&obj->mm.lock);
2670 2671 2672 2673 2674

	obj->mm.get_page.sg_pos = pages->sgl;
	obj->mm.get_page.sg_idx = 0;

	obj->mm.pages = pages;
2675 2676

	if (i915_gem_object_is_tiled(obj) &&
2677
	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
2678 2679 2680 2681
		GEM_BUG_ON(obj->mm.quirked);
		__i915_gem_object_pin_pages(obj);
		obj->mm.quirked = true;
	}
2682

M
Matthew Auld 已提交
2683 2684
	GEM_BUG_ON(!sg_page_sizes);
	obj->mm.page_sizes.phys = sg_page_sizes;
2685 2686

	/*
M
Matthew Auld 已提交
2687 2688 2689 2690 2691 2692
	 * Calculate the supported page-sizes which fit into the given
	 * sg_page_sizes. This will give us the page-sizes which we may be able
	 * to use opportunistically when later inserting into the GTT. For
	 * example if phys=2G, then in theory we should be able to use 1G, 2M,
	 * 64K or 4K pages, although in practice this will depend on a number of
	 * other factors.
2693 2694 2695 2696 2697 2698 2699
	 */
	obj->mm.page_sizes.sg = 0;
	for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
		if (obj->mm.page_sizes.phys & ~0u << i)
			obj->mm.page_sizes.sg |= BIT(i);
	}
	GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
2700 2701 2702 2703

	spin_lock(&i915->mm.obj_lock);
	list_add(&obj->mm.link, &i915->mm.unbound_list);
	spin_unlock(&i915->mm.obj_lock);
2704 2705 2706 2707
}

static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
2708
	int err;
2709 2710 2711 2712 2713 2714

	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
		DRM_DEBUG("Attempting to obtain a purgeable object\n");
		return -EFAULT;
	}

2715
	err = obj->ops->get_pages(obj);
2716
	GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
2717

2718
	return err;
2719 2720
}

2721
/* Ensure that the associated pages are gathered from the backing storage
2722
 * and pinned into our object. i915_gem_object_pin_pages() may be called
2723
 * multiple times before they are released by a single call to
2724
 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
2725 2726 2727
 * either as a result of memory pressure (reaping pages under the shrinker)
 * or as the object is itself released.
 */
C
Chris Wilson 已提交
2728
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2729
{
2730
	int err;
2731

2732 2733 2734
	err = mutex_lock_interruptible(&obj->mm.lock);
	if (err)
		return err;
2735

2736
	if (unlikely(!i915_gem_object_has_pages(obj))) {
2737 2738
		GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));

2739 2740 2741
		err = ____i915_gem_object_get_pages(obj);
		if (err)
			goto unlock;
2742

2743 2744 2745
		smp_mb__before_atomic();
	}
	atomic_inc(&obj->mm.pages_pin_count);
2746

2747 2748
unlock:
	mutex_unlock(&obj->mm.lock);
2749
	return err;
2750 2751
}

2752
/* The 'mapping' part of i915_gem_object_pin_map() below */
2753 2754
static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
				 enum i915_map_type type)
2755 2756
{
	unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
C
Chris Wilson 已提交
2757
	struct sg_table *sgt = obj->mm.pages;
2758 2759
	struct sgt_iter sgt_iter;
	struct page *page;
2760 2761
	struct page *stack_pages[32];
	struct page **pages = stack_pages;
2762
	unsigned long i = 0;
2763
	pgprot_t pgprot;
2764 2765 2766
	void *addr;

	/* A single page can always be kmapped */
2767
	if (n_pages == 1 && type == I915_MAP_WB)
2768 2769
		return kmap(sg_page(sgt->sgl));

2770 2771
	if (n_pages > ARRAY_SIZE(stack_pages)) {
		/* Too big for stack -- allocate temporary array instead */
2772
		pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
2773 2774 2775
		if (!pages)
			return NULL;
	}
2776

2777 2778
	for_each_sgt_page(page, sgt_iter, sgt)
		pages[i++] = page;
2779 2780 2781 2782

	/* Check that we have the expected number of pages */
	GEM_BUG_ON(i != n_pages);

2783
	switch (type) {
2784 2785 2786
	default:
		MISSING_CASE(type);
		/* fallthrough to use PAGE_KERNEL anyway */
2787 2788 2789 2790 2791 2792 2793 2794
	case I915_MAP_WB:
		pgprot = PAGE_KERNEL;
		break;
	case I915_MAP_WC:
		pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
		break;
	}
	addr = vmap(pages, n_pages, 0, pgprot);
2795

2796
	if (pages != stack_pages)
M
Michal Hocko 已提交
2797
		kvfree(pages);
2798 2799 2800 2801 2802

	return addr;
}

/* get, pin, and map the pages of the object into kernel space */
2803 2804
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
			      enum i915_map_type type)
2805
{
2806 2807 2808
	enum i915_map_type has_type;
	bool pinned;
	void *ptr;
2809 2810
	int ret;

T
Tina Zhang 已提交
2811 2812
	if (unlikely(!i915_gem_object_has_struct_page(obj)))
		return ERR_PTR(-ENXIO);
2813

2814
	ret = mutex_lock_interruptible(&obj->mm.lock);
2815 2816 2817
	if (ret)
		return ERR_PTR(ret);

2818 2819 2820
	pinned = !(type & I915_MAP_OVERRIDE);
	type &= ~I915_MAP_OVERRIDE;

2821
	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
2822
		if (unlikely(!i915_gem_object_has_pages(obj))) {
2823 2824
			GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));

2825 2826 2827
			ret = ____i915_gem_object_get_pages(obj);
			if (ret)
				goto err_unlock;
2828

2829 2830 2831
			smp_mb__before_atomic();
		}
		atomic_inc(&obj->mm.pages_pin_count);
2832 2833
		pinned = false;
	}
2834
	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
2835

2836
	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
2837 2838 2839
	if (ptr && has_type != type) {
		if (pinned) {
			ret = -EBUSY;
2840
			goto err_unpin;
2841
		}
2842 2843 2844 2845 2846 2847

		if (is_vmalloc_addr(ptr))
			vunmap(ptr);
		else
			kunmap(kmap_to_page(ptr));

C
Chris Wilson 已提交
2848
		ptr = obj->mm.mapping = NULL;
2849 2850
	}

2851 2852 2853 2854
	if (!ptr) {
		ptr = i915_gem_object_map(obj, type);
		if (!ptr) {
			ret = -ENOMEM;
2855
			goto err_unpin;
2856 2857
		}

2858
		obj->mm.mapping = page_pack_bits(ptr, type);
2859 2860
	}

2861 2862
out_unlock:
	mutex_unlock(&obj->mm.lock);
2863 2864
	return ptr;

2865 2866 2867 2868 2869
err_unpin:
	atomic_dec(&obj->mm.pages_pin_count);
err_unlock:
	ptr = ERR_PTR(ret);
	goto out_unlock;
2870 2871
}

2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886 2887 2888
static int
i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
			   const struct drm_i915_gem_pwrite *arg)
{
	struct address_space *mapping = obj->base.filp->f_mapping;
	char __user *user_data = u64_to_user_ptr(arg->data_ptr);
	u64 remain, offset;
	unsigned int pg;

	/* Before we instantiate/pin the backing store for our use, we
	 * can prepopulate the shmemfs filp efficiently using a write into
	 * the pagecache. We avoid the penalty of instantiating all the
	 * pages, important if the user is just writing to a few and never
	 * uses the object on the GPU, and using a direct write into shmemfs
	 * allows it to avoid the cost of retrieving a page (either swapin
	 * or clearing-before-use) before it is overwritten.
	 */
2889
	if (i915_gem_object_has_pages(obj))
2890 2891
		return -ENODEV;

2892 2893 2894
	if (obj->mm.madv != I915_MADV_WILLNEED)
		return -EFAULT;

2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942 2943
	/* Before the pages are instantiated the object is treated as being
	 * in the CPU domain. The pages will be clflushed as required before
	 * use, and we can freely write into the pages directly. If userspace
	 * races pwrite with any other operation; corruption will ensue -
	 * that is userspace's prerogative!
	 */

	remain = arg->size;
	offset = arg->offset;
	pg = offset_in_page(offset);

	do {
		unsigned int len, unwritten;
		struct page *page;
		void *data, *vaddr;
		int err;

		len = PAGE_SIZE - pg;
		if (len > remain)
			len = remain;

		err = pagecache_write_begin(obj->base.filp, mapping,
					    offset, len, 0,
					    &page, &data);
		if (err < 0)
			return err;

		vaddr = kmap(page);
		unwritten = copy_from_user(vaddr + pg, user_data, len);
		kunmap(page);

		err = pagecache_write_end(obj->base.filp, mapping,
					  offset, len, len - unwritten,
					  page, data);
		if (err < 0)
			return err;

		if (unwritten)
			return -EFAULT;

		remain -= len;
		user_data += len;
		offset += len;
		pg = 0;
	} while (remain);

	return 0;
}

2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967
static void i915_gem_client_mark_guilty(struct drm_i915_file_private *file_priv,
					const struct i915_gem_context *ctx)
{
	unsigned int score;
	unsigned long prev_hang;

	if (i915_gem_context_is_banned(ctx))
		score = I915_CLIENT_SCORE_CONTEXT_BAN;
	else
		score = 0;

	prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
	if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
		score += I915_CLIENT_SCORE_HANG_FAST;

	if (score) {
		atomic_add(score, &file_priv->ban_score);

		DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
				 ctx->name, score,
				 atomic_read(&file_priv->ban_score));
	}
}

2968
static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
2969
{
2970 2971
	unsigned int score;
	bool banned, bannable;
2972

2973
	atomic_inc(&ctx->guilty_count);
2974

2975 2976 2977
	bannable = i915_gem_context_is_bannable(ctx);
	score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
	banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
2978

2979 2980 2981
	DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, ban %s\n",
			 ctx->name, atomic_read(&ctx->guilty_count),
			 score, yesno(banned && bannable));
2982

2983 2984
	/* Cool contexts don't accumulate client ban score */
	if (!bannable)
2985 2986
		return;

2987 2988 2989 2990 2991
	if (banned)
		i915_gem_context_set_banned(ctx);

	if (!IS_ERR_OR_NULL(ctx->file_priv))
		i915_gem_client_mark_guilty(ctx->file_priv, ctx);
2992 2993 2994 2995
}

static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
{
2996
	atomic_inc(&ctx->active_count);
2997 2998
}

2999
struct i915_request *
3000
i915_gem_find_active_request(struct intel_engine_cs *engine)
3001
{
3002
	struct i915_request *request, *active = NULL;
3003
	unsigned long flags;
3004

3005 3006 3007 3008 3009 3010
	/*
	 * We are called by the error capture, reset and to dump engine
	 * state at random points in time. In particular, note that neither is
	 * crucially ordered with an interrupt. After a hang, the GPU is dead
	 * and we assume that no more writes can happen (we waited long enough
	 * for all writes that were in transaction to be flushed) - adding an
3011 3012
	 * extra delay for a recent interrupt is pointless. Hence, we do
	 * not need an engine->irq_seqno_barrier() before the seqno reads.
3013 3014
	 * At all other times, we must assume the GPU is still running, but
	 * we only care about the snapshot of this moment.
3015
	 */
3016 3017
	spin_lock_irqsave(&engine->timeline.lock, flags);
	list_for_each_entry(request, &engine->timeline.requests, link) {
3018
		if (__i915_request_completed(request, request->global_seqno))
3019
			continue;
3020

3021 3022
		active = request;
		break;
3023
	}
3024
	spin_unlock_irqrestore(&engine->timeline.lock, flags);
3025

3026
	return active;
3027 3028
}

3029 3030 3031 3032
/*
 * Ensure irq handler finishes, and not run again.
 * Also return the active request so that we only search for it once.
 */
3033
struct i915_request *
3034 3035
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
{
3036
	struct i915_request *request;
3037

3038 3039 3040 3041 3042 3043 3044 3045 3046
	/*
	 * During the reset sequence, we must prevent the engine from
	 * entering RC6. As the context state is undefined until we restart
	 * the engine, if it does enter RC6 during the reset, the state
	 * written to the powercontext is undefined and so we may lose
	 * GPU state upon resume, i.e. fail to restart after a reset.
	 */
	intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);

3047
	request = engine->reset.prepare(engine);
3048 3049
	if (request && request->fence.error == -EIO)
		request = ERR_PTR(-EIO); /* Previous reset failed! */
3050 3051 3052 3053

	return request;
}

3054
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
3055 3056
{
	struct intel_engine_cs *engine;
3057
	struct i915_request *request;
3058
	enum intel_engine_id id;
3059
	int err = 0;
3060

3061
	for_each_engine(engine, dev_priv, id) {
3062 3063 3064 3065
		request = i915_gem_reset_prepare_engine(engine);
		if (IS_ERR(request)) {
			err = PTR_ERR(request);
			continue;
3066
		}
3067 3068

		engine->hangcheck.active_request = request;
3069 3070
	}

3071
	i915_gem_revoke_fences(dev_priv);
3072
	intel_uc_sanitize(dev_priv);
3073 3074

	return err;
3075 3076
}

3077
static void skip_request(struct i915_request *request)
3078 3079 3080 3081 3082 3083 3084 3085 3086 3087 3088 3089 3090 3091
{
	void *vaddr = request->ring->vaddr;
	u32 head;

	/* As this request likely depends on state from the lost
	 * context, clear out all the user operations leaving the
	 * breadcrumb at the end (so we get the fence notifications).
	 */
	head = request->head;
	if (request->postfix < head) {
		memset(vaddr + head, 0, request->ring->size - head);
		head = 0;
	}
	memset(vaddr + head, 0, request->postfix - head);
3092 3093

	dma_fence_set_error(&request->fence, -EIO);
3094 3095
}

3096
static void engine_skip_context(struct i915_request *request)
3097 3098
{
	struct intel_engine_cs *engine = request->engine;
C
Chris Wilson 已提交
3099
	struct i915_gem_context *hung_ctx = request->gem_context;
3100
	struct i915_timeline *timeline = request->timeline;
3101 3102
	unsigned long flags;

3103
	GEM_BUG_ON(timeline == &engine->timeline);
3104

3105
	spin_lock_irqsave(&engine->timeline.lock, flags);
3106
	spin_lock_nested(&timeline->lock, SINGLE_DEPTH_NESTING);
3107

3108
	list_for_each_entry_continue(request, &engine->timeline.requests, link)
C
Chris Wilson 已提交
3109
		if (request->gem_context == hung_ctx)
3110 3111 3112 3113 3114 3115
			skip_request(request);

	list_for_each_entry(request, &timeline->requests, link)
		skip_request(request);

	spin_unlock(&timeline->lock);
3116
	spin_unlock_irqrestore(&engine->timeline.lock, flags);
3117 3118
}

3119
/* Returns the request if it was guilty of the hang */
3120
static struct i915_request *
3121
i915_gem_reset_request(struct intel_engine_cs *engine,
3122 3123
		       struct i915_request *request,
		       bool stalled)
3124
{
3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139 3140 3141 3142 3143 3144 3145
	/* The guilty request will get skipped on a hung engine.
	 *
	 * Users of client default contexts do not rely on logical
	 * state preserved between batches so it is safe to execute
	 * queued requests following the hang. Non default contexts
	 * rely on preserved state, so skipping a batch loses the
	 * evolution of the state and it needs to be considered corrupted.
	 * Executing more queued batches on top of corrupted state is
	 * risky. But we take the risk by trying to advance through
	 * the queued requests in order to make the client behaviour
	 * more predictable around resets, by not throwing away random
	 * amount of batches it has prepared for execution. Sophisticated
	 * clients can use gem_reset_stats_ioctl and dma fence status
	 * (exported via sync_file info ioctl on explicit fences) to observe
	 * when it loses the context state and should rebuild accordingly.
	 *
	 * The context ban, and ultimately the client ban, mechanism are safety
	 * valves if client submission ends up resulting in nothing more than
	 * subsequent hangs.
	 */

3146 3147 3148 3149 3150 3151 3152 3153 3154
	if (i915_request_completed(request)) {
		GEM_TRACE("%s pardoned global=%d (fence %llx:%d), current %d\n",
			  engine->name, request->global_seqno,
			  request->fence.context, request->fence.seqno,
			  intel_engine_get_seqno(engine));
		stalled = false;
	}

	if (stalled) {
C
Chris Wilson 已提交
3155
		i915_gem_context_mark_guilty(request->gem_context);
3156
		skip_request(request);
3157 3158

		/* If this context is now banned, skip all pending requests. */
C
Chris Wilson 已提交
3159
		if (i915_gem_context_is_banned(request->gem_context))
3160
			engine_skip_context(request);
3161
	} else {
3162 3163 3164 3165 3166 3167 3168
		/*
		 * Since this is not the hung engine, it may have advanced
		 * since the hang declaration. Double check by refinding
		 * the active request at the time of the reset.
		 */
		request = i915_gem_find_active_request(engine);
		if (request) {
C
Chris Wilson 已提交
3169
			i915_gem_context_mark_innocent(request->gem_context);
3170 3171 3172
			dma_fence_set_error(&request->fence, -EAGAIN);

			/* Rewind the engine to replay the incomplete rq */
3173
			spin_lock_irq(&engine->timeline.lock);
3174
			request = list_prev_entry(request, link);
3175
			if (&request->link == &engine->timeline.requests)
3176
				request = NULL;
3177
			spin_unlock_irq(&engine->timeline.lock);
3178
		}
3179 3180
	}

3181
	return request;
3182 3183
}

3184
void i915_gem_reset_engine(struct intel_engine_cs *engine,
3185 3186
			   struct i915_request *request,
			   bool stalled)
3187
{
3188 3189 3190 3191 3192 3193
	/*
	 * Make sure this write is visible before we re-enable the interrupt
	 * handlers on another CPU, as tasklet_enable() resolves to just
	 * a compiler barrier which is insufficient for our purpose here.
	 */
	smp_store_mb(engine->irq_posted, 0);
3194

3195
	if (request)
3196
		request = i915_gem_reset_request(engine, request, stalled);
3197

3198
	/* Setup the CS to resume from the breadcrumb of the hung request */
3199
	engine->reset.reset(engine, request);
3200
}
3201

3202 3203
void i915_gem_reset(struct drm_i915_private *dev_priv,
		    unsigned int stalled_mask)
3204
{
3205
	struct intel_engine_cs *engine;
3206
	enum intel_engine_id id;
3207

3208 3209
	lockdep_assert_held(&dev_priv->drm.struct_mutex);

3210
	i915_retire_requests(dev_priv);
3211

3212
	for_each_engine(engine, dev_priv, id) {
3213
		struct intel_context *ce;
3214

3215 3216
		i915_gem_reset_engine(engine,
				      engine->hangcheck.active_request,
3217
				      stalled_mask & ENGINE_MASK(id));
3218 3219 3220
		ce = fetch_and_zero(&engine->last_retired_context);
		if (ce)
			intel_context_unpin(ce);
3221 3222 3223 3224 3225 3226 3227 3228 3229 3230 3231

		/*
		 * Ostensibily, we always want a context loaded for powersaving,
		 * so if the engine is idle after the reset, send a request
		 * to load our scratch kernel_context.
		 *
		 * More mysteriously, if we leave the engine idle after a reset,
		 * the next userspace batch may hang, with what appears to be
		 * an incoherent read by the CS (presumably stale TLB). An
		 * empty request appears sufficient to paper over the glitch.
		 */
3232
		if (intel_engine_is_idle(engine)) {
3233
			struct i915_request *rq;
3234

3235 3236
			rq = i915_request_alloc(engine,
						dev_priv->kernel_context);
3237
			if (!IS_ERR(rq))
3238
				i915_request_add(rq);
3239
		}
3240
	}
3241

3242
	i915_gem_restore_fences(dev_priv);
3243 3244
}

3245 3246
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
{
3247 3248
	engine->reset.finish(engine);

3249
	intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
3250 3251
}

3252 3253
void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
{
3254 3255 3256
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

3257
	lockdep_assert_held(&dev_priv->drm.struct_mutex);
3258

3259
	for_each_engine(engine, dev_priv, id) {
3260
		engine->hangcheck.active_request = NULL;
3261
		i915_gem_reset_finish_engine(engine);
3262
	}
3263 3264
}

3265
static void nop_submit_request(struct i915_request *request)
3266
{
3267 3268 3269
	GEM_TRACE("%s fence %llx:%d -> -EIO\n",
		  request->engine->name,
		  request->fence.context, request->fence.seqno);
3270 3271
	dma_fence_set_error(&request->fence, -EIO);

3272
	i915_request_submit(request);
3273 3274
}

3275
static void nop_complete_submit_request(struct i915_request *request)
3276
{
3277 3278
	unsigned long flags;

3279 3280 3281
	GEM_TRACE("%s fence %llx:%d -> -EIO\n",
		  request->engine->name,
		  request->fence.context, request->fence.seqno);
3282
	dma_fence_set_error(&request->fence, -EIO);
3283

3284
	spin_lock_irqsave(&request->engine->timeline.lock, flags);
3285
	__i915_request_submit(request);
3286
	intel_engine_init_global_seqno(request->engine, request->global_seqno);
3287
	spin_unlock_irqrestore(&request->engine->timeline.lock, flags);
3288 3289
}

3290
void i915_gem_set_wedged(struct drm_i915_private *i915)
3291
{
3292 3293 3294
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

3295 3296
	GEM_TRACE("start\n");

3297
	if (GEM_SHOW_DEBUG()) {
3298 3299 3300 3301 3302 3303
		struct drm_printer p = drm_debug_printer(__func__);

		for_each_engine(engine, i915, id)
			intel_engine_dump(engine, &p, "%s\n", engine->name);
	}

3304 3305 3306
	set_bit(I915_WEDGED, &i915->gpu_error.flags);
	smp_mb__after_atomic();

3307 3308 3309 3310 3311
	/*
	 * First, stop submission to hw, but do not yet complete requests by
	 * rolling the global seqno forward (since this would complete requests
	 * for which we haven't set the fence error to EIO yet).
	 */
3312 3313
	for_each_engine(engine, i915, id) {
		i915_gem_reset_prepare_engine(engine);
3314

3315
		engine->submit_request = nop_submit_request;
3316
		engine->schedule = NULL;
3317
	}
3318
	i915->caps.scheduler = 0;
3319

3320 3321 3322
	/* Even if the GPU reset fails, it should still stop the engines */
	intel_gpu_reset(i915, ALL_ENGINES);

3323 3324 3325 3326
	/*
	 * Make sure no one is running the old callback before we proceed with
	 * cancelling requests and resetting the completion tracking. Otherwise
	 * we might submit a request to the hardware which never completes.
3327
	 */
3328
	synchronize_rcu();
3329

3330 3331 3332
	for_each_engine(engine, i915, id) {
		/* Mark all executing requests as skipped */
		engine->cancel_requests(engine);
3333

3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344
		/*
		 * Only once we've force-cancelled all in-flight requests can we
		 * start to complete all requests.
		 */
		engine->submit_request = nop_complete_submit_request;
	}

	/*
	 * Make sure no request can slip through without getting completed by
	 * either this call here to intel_engine_init_global_seqno, or the one
	 * in nop_complete_submit_request.
3345
	 */
3346
	synchronize_rcu();
3347

3348 3349
	for_each_engine(engine, i915, id) {
		unsigned long flags;
3350

3351 3352
		/*
		 * Mark all pending requests as complete so that any concurrent
3353 3354 3355
		 * (lockless) lookup doesn't try and wait upon the request as we
		 * reset it.
		 */
3356
		spin_lock_irqsave(&engine->timeline.lock, flags);
3357 3358
		intel_engine_init_global_seqno(engine,
					       intel_engine_last_submit(engine));
3359
		spin_unlock_irqrestore(&engine->timeline.lock, flags);
3360 3361

		i915_gem_reset_finish_engine(engine);
3362
	}
3363

3364 3365
	GEM_TRACE("end\n");

3366
	wake_up_all(&i915->gpu_error.reset_queue);
3367 3368
}

3369 3370
bool i915_gem_unset_wedged(struct drm_i915_private *i915)
{
3371
	struct i915_timeline *tl;
3372 3373 3374 3375 3376

	lockdep_assert_held(&i915->drm.struct_mutex);
	if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
		return true;

3377 3378
	GEM_TRACE("start\n");

3379 3380
	/*
	 * Before unwedging, make sure that all pending operations
3381 3382 3383 3384 3385 3386 3387 3388 3389
	 * are flushed and errored out - we may have requests waiting upon
	 * third party fences. We marked all inflight requests as EIO, and
	 * every execbuf since returned EIO, for consistency we want all
	 * the currently pending requests to also be marked as EIO, which
	 * is done inside our nop_submit_request - and so we must wait.
	 *
	 * No more can be submitted until we reset the wedged bit.
	 */
	list_for_each_entry(tl, &i915->gt.timelines, link) {
3390
		struct i915_request *rq;
3391

3392 3393 3394 3395
		rq = i915_gem_active_peek(&tl->last_request,
					  &i915->drm.struct_mutex);
		if (!rq)
			continue;
3396

3397 3398 3399 3400 3401 3402 3403 3404 3405 3406 3407 3408 3409 3410
		/*
		 * We can't use our normal waiter as we want to
		 * avoid recursively trying to handle the current
		 * reset. The basic dma_fence_default_wait() installs
		 * a callback for dma_fence_signal(), which is
		 * triggered by our nop handler (indirectly, the
		 * callback enables the signaler thread which is
		 * woken by the nop_submit_request() advancing the seqno
		 * and when the seqno passes the fence, the signaler
		 * then signals the fence waking us up).
		 */
		if (dma_fence_default_wait(&rq->fence, true,
					   MAX_SCHEDULE_TIMEOUT) < 0)
			return false;
3411
	}
3412 3413
	i915_retire_requests(i915);
	GEM_BUG_ON(i915->gt.active_requests);
3414

3415 3416
	/*
	 * Undo nop_submit_request. We prevent all new i915 requests from
3417 3418 3419 3420 3421 3422 3423 3424
	 * being queued (by disallowing execbuf whilst wedged) so having
	 * waited for all active requests above, we know the system is idle
	 * and do not have to worry about a thread being inside
	 * engine->submit_request() as we swap over. So unlike installing
	 * the nop_submit_request on reset, we can do this from normal
	 * context and do not require stop_machine().
	 */
	intel_engines_reset_default_submission(i915);
3425
	i915_gem_contexts_lost(i915);
3426

3427 3428
	GEM_TRACE("end\n");

3429 3430 3431 3432 3433 3434
	smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
	clear_bit(I915_WEDGED, &i915->gpu_error.flags);

	return true;
}

3435
static void
3436 3437
i915_gem_retire_work_handler(struct work_struct *work)
{
3438
	struct drm_i915_private *dev_priv =
3439
		container_of(work, typeof(*dev_priv), gt.retire_work.work);
3440
	struct drm_device *dev = &dev_priv->drm;
3441

3442
	/* Come back later if the device is busy... */
3443
	if (mutex_trylock(&dev->struct_mutex)) {
3444
		i915_retire_requests(dev_priv);
3445
		mutex_unlock(&dev->struct_mutex);
3446
	}
3447

3448 3449
	/*
	 * Keep the retire handler running until we are finally idle.
3450 3451 3452
	 * We do not need to do this test under locking as in the worst-case
	 * we queue the retire worker once too often.
	 */
3453
	if (READ_ONCE(dev_priv->gt.awake))
3454 3455
		queue_delayed_work(dev_priv->wq,
				   &dev_priv->gt.retire_work,
3456
				   round_jiffies_up_relative(HZ));
3457
}
3458

3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502 3503 3504 3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517
static void shrink_caches(struct drm_i915_private *i915)
{
	/*
	 * kmem_cache_shrink() discards empty slabs and reorders partially
	 * filled slabs to prioritise allocating from the mostly full slabs,
	 * with the aim of reducing fragmentation.
	 */
	kmem_cache_shrink(i915->priorities);
	kmem_cache_shrink(i915->dependencies);
	kmem_cache_shrink(i915->requests);
	kmem_cache_shrink(i915->luts);
	kmem_cache_shrink(i915->vmas);
	kmem_cache_shrink(i915->objects);
}

struct sleep_rcu_work {
	union {
		struct rcu_head rcu;
		struct work_struct work;
	};
	struct drm_i915_private *i915;
	unsigned int epoch;
};

static inline bool
same_epoch(struct drm_i915_private *i915, unsigned int epoch)
{
	/*
	 * There is a small chance that the epoch wrapped since we started
	 * sleeping. If we assume that epoch is at least a u32, then it will
	 * take at least 2^32 * 100ms for it to wrap, or about 326 years.
	 */
	return epoch == READ_ONCE(i915->gt.epoch);
}

static void __sleep_work(struct work_struct *work)
{
	struct sleep_rcu_work *s = container_of(work, typeof(*s), work);
	struct drm_i915_private *i915 = s->i915;
	unsigned int epoch = s->epoch;

	kfree(s);
	if (same_epoch(i915, epoch))
		shrink_caches(i915);
}

static void __sleep_rcu(struct rcu_head *rcu)
{
	struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
	struct drm_i915_private *i915 = s->i915;

	if (same_epoch(i915, s->epoch)) {
		INIT_WORK(&s->work, __sleep_work);
		queue_work(i915->wq, &s->work);
	} else {
		kfree(s);
	}
}

3518 3519 3520 3521 3522 3523 3524
static inline bool
new_requests_since_last_retire(const struct drm_i915_private *i915)
{
	return (READ_ONCE(i915->gt.active_requests) ||
		work_pending(&i915->gt.idle_work.work));
}

3525 3526 3527 3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540
static void assert_kernel_context_is_current(struct drm_i915_private *i915)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	if (i915_terminally_wedged(&i915->gpu_error))
		return;

	GEM_BUG_ON(i915->gt.active_requests);
	for_each_engine(engine, i915, id) {
		GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
		GEM_BUG_ON(engine->last_retired_context !=
			   to_intel_context(i915->kernel_context, engine));
	}
}

3541 3542 3543 3544
static void
i915_gem_idle_work_handler(struct work_struct *work)
{
	struct drm_i915_private *dev_priv =
3545
		container_of(work, typeof(*dev_priv), gt.idle_work.work);
3546
	unsigned int epoch = I915_EPOCH_INVALID;
3547 3548 3549 3550 3551
	bool rearm_hangcheck;

	if (!READ_ONCE(dev_priv->gt.awake))
		return;

3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568 3569
	if (READ_ONCE(dev_priv->gt.active_requests))
		return;

	/*
	 * Flush out the last user context, leaving only the pinned
	 * kernel context resident. When we are idling on the kernel_context,
	 * no more new requests (with a context switch) are emitted and we
	 * can finally rest. A consequence is that the idle work handler is
	 * always called at least twice before idling (and if the system is
	 * idle that implies a round trip through the retire worker).
	 */
	mutex_lock(&dev_priv->drm.struct_mutex);
	i915_gem_switch_to_kernel_context(dev_priv);
	mutex_unlock(&dev_priv->drm.struct_mutex);

	GEM_TRACE("active_requests=%d (after switch-to-kernel-context)\n",
		  READ_ONCE(dev_priv->gt.active_requests));

3570 3571
	/*
	 * Wait for last execlists context complete, but bail out in case a
3572 3573 3574 3575 3576
	 * new request is submitted. As we don't trust the hardware, we
	 * continue on if the wait times out. This is necessary to allow
	 * the machine to suspend even if the hardware dies, and we will
	 * try to recover in resume (after depriving the hardware of power,
	 * it may be in a better mmod).
3577
	 */
3578 3579 3580 3581
	__wait_for(if (new_requests_since_last_retire(dev_priv)) return,
		   intel_engines_are_idle(dev_priv),
		   I915_IDLE_ENGINES_TIMEOUT * 1000,
		   10, 500);
3582 3583 3584 3585

	rearm_hangcheck =
		cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);

3586
	if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
3587 3588 3589 3590 3591 3592 3593
		/* Currently busy, come back later */
		mod_delayed_work(dev_priv->wq,
				 &dev_priv->gt.idle_work,
				 msecs_to_jiffies(50));
		goto out_rearm;
	}

3594 3595 3596 3597
	/*
	 * New request retired after this work handler started, extend active
	 * period until next instance of the work.
	 */
3598
	if (new_requests_since_last_retire(dev_priv))
3599
		goto out_unlock;
3600

3601
	epoch = __i915_gem_park(dev_priv);
3602

3603 3604
	assert_kernel_context_is_current(dev_priv);

3605 3606
	rearm_hangcheck = false;
out_unlock:
3607
	mutex_unlock(&dev_priv->drm.struct_mutex);
3608

3609 3610 3611 3612
out_rearm:
	if (rearm_hangcheck) {
		GEM_BUG_ON(!dev_priv->gt.awake);
		i915_queue_hangcheck(dev_priv);
3613
	}
3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630

	/*
	 * When we are idle, it is an opportune time to reap our caches.
	 * However, we have many objects that utilise RCU and the ordered
	 * i915->wq that this work is executing on. To try and flush any
	 * pending frees now we are idle, we first wait for an RCU grace
	 * period, and then queue a task (that will run last on the wq) to
	 * shrink and re-optimize the caches.
	 */
	if (same_epoch(dev_priv, epoch)) {
		struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
		if (s) {
			s->i915 = dev_priv;
			s->epoch = epoch;
			call_rcu(&s->rcu, __sleep_rcu);
		}
	}
3631 3632
}

3633 3634
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
3635
	struct drm_i915_private *i915 = to_i915(gem->dev);
3636 3637
	struct drm_i915_gem_object *obj = to_intel_bo(gem);
	struct drm_i915_file_private *fpriv = file->driver_priv;
3638
	struct i915_lut_handle *lut, *ln;
3639

3640 3641 3642 3643 3644 3645
	mutex_lock(&i915->drm.struct_mutex);

	list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
		struct i915_gem_context *ctx = lut->ctx;
		struct i915_vma *vma;

3646
		GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF));
3647 3648 3649 3650
		if (ctx->file_priv != fpriv)
			continue;

		vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
3651 3652 3653 3654 3655 3656 3657
		GEM_BUG_ON(vma->obj != obj);

		/* We allow the process to have multiple handles to the same
		 * vma, in the same fd namespace, by virtue of flink/open.
		 */
		GEM_BUG_ON(!vma->open_count);
		if (!--vma->open_count && !i915_vma_is_ggtt(vma))
3658
			i915_vma_close(vma);
3659

3660 3661
		list_del(&lut->obj_link);
		list_del(&lut->ctx_link);
3662

3663 3664
		kmem_cache_free(i915->luts, lut);
		__i915_gem_object_release_unless_active(obj);
3665
	}
3666 3667

	mutex_unlock(&i915->drm.struct_mutex);
3668 3669
}

3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680
static unsigned long to_wait_timeout(s64 timeout_ns)
{
	if (timeout_ns < 0)
		return MAX_SCHEDULE_TIMEOUT;

	if (timeout_ns == 0)
		return 0;

	return nsecs_to_jiffies_timeout(timeout_ns);
}

3681 3682
/**
 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
3683 3684 3685
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
3686 3687 3688 3689 3690 3691 3692
 *
 * Returns 0 if successful, else an error is returned with the remaining time in
 * the timeout parameter.
 *  -ETIME: object is still busy after timeout
 *  -ERESTARTSYS: signal interrupted the wait
 *  -ENONENT: object doesn't exist
 * Also possible, but rare:
3693
 *  -EAGAIN: incomplete, restart syscall
3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709
 *  -ENOMEM: damn
 *  -ENODEV: Internal IRQ fail
 *  -E?: The add request failed
 *
 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
 * non-zero timeout parameter the wait ioctl will wait for the given number of
 * nanoseconds on an object becoming unbusy. Since the wait itself does so
 * without holding struct_mutex the object may become re-busied before this
 * function completes. A similar but shorter * race condition exists in the busy
 * ioctl
 */
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
	struct drm_i915_gem_wait *args = data;
	struct drm_i915_gem_object *obj;
3710 3711
	ktime_t start;
	long ret;
3712

3713 3714 3715
	if (args->flags != 0)
		return -EINVAL;

3716
	obj = i915_gem_object_lookup(file, args->bo_handle);
3717
	if (!obj)
3718 3719
		return -ENOENT;

3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730
	start = ktime_get();

	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
				   to_wait_timeout(args->timeout_ns),
				   to_rps_client(file));

	if (args->timeout_ns > 0) {
		args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
		if (args->timeout_ns < 0)
			args->timeout_ns = 0;
3731 3732 3733 3734 3735 3736 3737 3738 3739 3740

		/*
		 * Apparently ktime isn't accurate enough and occasionally has a
		 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
		 * things up to make the test happy. We allow up to 1 jiffy.
		 *
		 * This is a regression from the timespec->ktime conversion.
		 */
		if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
			args->timeout_ns = 0;
3741 3742 3743 3744

		/* Asked to wait beyond the jiffie/scheduler precision? */
		if (ret == -ETIME && args->timeout_ns)
			ret = -EAGAIN;
3745 3746
	}

C
Chris Wilson 已提交
3747
	i915_gem_object_put(obj);
3748
	return ret;
3749 3750
}

3751
static int wait_for_timeline(struct i915_timeline *tl, unsigned int flags)
3752
{
3753 3754 3755 3756 3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775
	struct i915_request *rq;
	long ret;

	rq = i915_gem_active_get_unlocked(&tl->last_request);
	if (!rq)
		return 0;

	/*
	 * "Race-to-idle".
	 *
	 * Switching to the kernel context is often used a synchronous
	 * step prior to idling, e.g. in suspend for flushing all
	 * current operations to memory before sleeping. These we
	 * want to complete as quickly as possible to avoid prolonged
	 * stalls, so allow the gpu to boost to maximum clocks.
	 */
	if (flags & I915_WAIT_FOR_IDLE_BOOST)
		gen6_rps_boost(rq, NULL);

	ret = i915_request_wait(rq, flags, MAX_SCHEDULE_TIMEOUT);
	i915_request_put(rq);

	return ret < 0 ? ret : 0;
3776 3777
}

3778 3779
static int wait_for_engines(struct drm_i915_private *i915)
{
3780
	if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
3781 3782
		dev_err(i915->drm.dev,
			"Failed to idle engines, declaring wedged!\n");
3783
		GEM_TRACE_DUMP();
3784 3785
		i915_gem_set_wedged(i915);
		return -EIO;
3786 3787 3788 3789 3790
	}

	return 0;
}

3791 3792
int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
{
3793 3794 3795
	GEM_TRACE("flags=%x (%s)\n",
		  flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked");

3796 3797 3798 3799
	/* If the device is asleep, we have no requests outstanding */
	if (!READ_ONCE(i915->gt.awake))
		return 0;

3800
	if (flags & I915_WAIT_LOCKED) {
3801 3802
		struct i915_timeline *tl;
		int err;
3803 3804 3805 3806

		lockdep_assert_held(&i915->drm.struct_mutex);

		list_for_each_entry(tl, &i915->gt.timelines, link) {
3807 3808 3809
			err = wait_for_timeline(tl, flags);
			if (err)
				return err;
3810
		}
3811
		i915_retire_requests(i915);
3812
		GEM_BUG_ON(i915->gt.active_requests);
3813

3814
		return wait_for_engines(i915);
3815
	} else {
3816 3817 3818
		struct intel_engine_cs *engine;
		enum intel_engine_id id;
		int err;
3819

3820 3821 3822 3823 3824 3825 3826 3827
		for_each_engine(engine, i915, id) {
			err = wait_for_timeline(&engine->timeline, flags);
			if (err)
				return err;
		}

		return 0;
	}
3828 3829
}

3830 3831
static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
{
3832 3833 3834 3835 3836 3837 3838
	/*
	 * We manually flush the CPU domain so that we can override and
	 * force the flush for the display, and perform it asyncrhonously.
	 */
	flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
	if (obj->cache_dirty)
		i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
3839
	obj->write_domain = 0;
3840 3841 3842 3843
}

void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
{
3844
	if (!READ_ONCE(obj->pin_global))
3845 3846 3847 3848 3849 3850 3851
		return;

	mutex_lock(&obj->base.dev->struct_mutex);
	__i915_gem_object_flush_for_display(obj);
	mutex_unlock(&obj->base.dev->struct_mutex);
}

3852 3853 3854 3855 3856 3857 3858 3859 3860 3861 3862 3863 3864 3865 3866 3867 3868 3869 3870 3871 3872 3873 3874 3875
/**
 * Moves a single object to the WC read, and possibly write domain.
 * @obj: object to act on
 * @write: ask for write access or read only
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
int
i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
{
	int ret;

	lockdep_assert_held(&obj->base.dev->struct_mutex);

	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   (write ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
	if (ret)
		return ret;

3876
	if (obj->write_domain == I915_GEM_DOMAIN_WC)
3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896
		return 0;

	/* Flush and acquire obj->pages so that we are coherent through
	 * direct access in memory with previous cached writes through
	 * shmemfs and that our cache domain tracking remains valid.
	 * For example, if the obj->filp was moved to swap without us
	 * being notified and releasing the pages, we would mistakenly
	 * continue to assume that the obj remained out of the CPU cached
	 * domain.
	 */
	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		return ret;

	flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);

	/* Serialise direct access to this object with the barriers for
	 * coherent writes from the GPU, by effectively invalidating the
	 * WC domain upon first access.
	 */
3897
	if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0)
3898 3899 3900 3901 3902
		mb();

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3903 3904
	GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0);
	obj->read_domains |= I915_GEM_DOMAIN_WC;
3905
	if (write) {
3906 3907
		obj->read_domains = I915_GEM_DOMAIN_WC;
		obj->write_domain = I915_GEM_DOMAIN_WC;
3908 3909 3910 3911 3912 3913 3914
		obj->mm.dirty = true;
	}

	i915_gem_object_unpin_pages(obj);
	return 0;
}

3915 3916
/**
 * Moves a single object to the GTT read, and possibly write domain.
3917 3918
 * @obj: object to act on
 * @write: ask for write access or read only
3919 3920 3921 3922
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
J
Jesse Barnes 已提交
3923
int
3924
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3925
{
3926
	int ret;
3927

3928
	lockdep_assert_held(&obj->base.dev->struct_mutex);
3929

3930 3931 3932 3933 3934 3935
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   (write ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
3936 3937 3938
	if (ret)
		return ret;

3939
	if (obj->write_domain == I915_GEM_DOMAIN_GTT)
3940 3941
		return 0;

3942 3943 3944 3945 3946 3947 3948 3949
	/* Flush and acquire obj->pages so that we are coherent through
	 * direct access in memory with previous cached writes through
	 * shmemfs and that our cache domain tracking remains valid.
	 * For example, if the obj->filp was moved to swap without us
	 * being notified and releasing the pages, we would mistakenly
	 * continue to assume that the obj remained out of the CPU cached
	 * domain.
	 */
C
Chris Wilson 已提交
3950
	ret = i915_gem_object_pin_pages(obj);
3951 3952 3953
	if (ret)
		return ret;

3954
	flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
C
Chris Wilson 已提交
3955

3956 3957 3958 3959
	/* Serialise direct access to this object with the barriers for
	 * coherent writes from the GPU, by effectively invalidating the
	 * GTT domain upon first access.
	 */
3960
	if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0)
3961 3962
		mb();

3963 3964 3965
	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3966 3967
	GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
	obj->read_domains |= I915_GEM_DOMAIN_GTT;
3968
	if (write) {
3969 3970
		obj->read_domains = I915_GEM_DOMAIN_GTT;
		obj->write_domain = I915_GEM_DOMAIN_GTT;
C
Chris Wilson 已提交
3971
		obj->mm.dirty = true;
3972 3973
	}

C
Chris Wilson 已提交
3974
	i915_gem_object_unpin_pages(obj);
3975 3976 3977
	return 0;
}

3978 3979
/**
 * Changes the cache-level of an object across all VMA.
3980 3981
 * @obj: object to act on
 * @cache_level: new cache level to set for the object
3982 3983 3984 3985 3986 3987 3988 3989 3990 3991 3992
 *
 * After this function returns, the object will be in the new cache-level
 * across all GTT and the contents of the backing storage will be coherent,
 * with respect to the new cache-level. In order to keep the backing storage
 * coherent for all users, we only allow a single cache level to be set
 * globally on the object and prevent it from being changed whilst the
 * hardware is reading from the object. That is if the object is currently
 * on the scanout it will be set to uncached (or equivalent display
 * cache coherency) and all non-MOCS GPU access will also be uncached so
 * that all direct access to the scanout remains coherent.
 */
3993 3994 3995
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
				    enum i915_cache_level cache_level)
{
3996
	struct i915_vma *vma;
3997
	int ret;
3998

3999 4000
	lockdep_assert_held(&obj->base.dev->struct_mutex);

4001
	if (obj->cache_level == cache_level)
4002
		return 0;
4003

4004 4005 4006 4007 4008
	/* Inspect the list of currently bound VMA and unbind any that would
	 * be invalid given the new cache-level. This is principally to
	 * catch the issue of the CS prefetch crossing page boundaries and
	 * reading an invalid PTE on older architectures.
	 */
4009 4010
restart:
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
4011 4012 4013
		if (!drm_mm_node_allocated(&vma->node))
			continue;

4014
		if (i915_vma_is_pinned(vma)) {
4015 4016 4017 4018
			DRM_DEBUG("can not change the cache level of pinned objects\n");
			return -EBUSY;
		}

4019 4020
		if (!i915_vma_is_closed(vma) &&
		    i915_gem_valid_gtt_space(vma, cache_level))
4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031
			continue;

		ret = i915_vma_unbind(vma);
		if (ret)
			return ret;

		/* As unbinding may affect other elements in the
		 * obj->vma_list (due to side-effects from retiring
		 * an active vma), play safe and restart the iterator.
		 */
		goto restart;
4032 4033
	}

4034 4035 4036 4037 4038 4039 4040
	/* We can reuse the existing drm_mm nodes but need to change the
	 * cache-level on the PTE. We could simply unbind them all and
	 * rebind with the correct cache-level on next use. However since
	 * we already have a valid slot, dma mapping, pages etc, we may as
	 * rewrite the PTE in the belief that doing so tramples upon less
	 * state and so involves less work.
	 */
4041
	if (obj->bind_count) {
4042 4043 4044 4045
		/* Before we change the PTE, the GPU must not be accessing it.
		 * If we wait upon the object, we know that all the bound
		 * VMA are no longer active.
		 */
4046 4047 4048 4049 4050 4051
		ret = i915_gem_object_wait(obj,
					   I915_WAIT_INTERRUPTIBLE |
					   I915_WAIT_LOCKED |
					   I915_WAIT_ALL,
					   MAX_SCHEDULE_TIMEOUT,
					   NULL);
4052 4053 4054
		if (ret)
			return ret;

4055 4056
		if (!HAS_LLC(to_i915(obj->base.dev)) &&
		    cache_level != I915_CACHE_NONE) {
4057 4058 4059 4060 4061 4062 4063 4064 4065 4066 4067 4068 4069 4070 4071 4072
			/* Access to snoopable pages through the GTT is
			 * incoherent and on some machines causes a hard
			 * lockup. Relinquish the CPU mmaping to force
			 * userspace to refault in the pages and we can
			 * then double check if the GTT mapping is still
			 * valid for that pointer access.
			 */
			i915_gem_release_mmap(obj);

			/* As we no longer need a fence for GTT access,
			 * we can relinquish it now (and so prevent having
			 * to steal a fence from someone else on the next
			 * fence request). Note GPU activity would have
			 * dropped the fence as all snoopable access is
			 * supposed to be linear.
			 */
4073
			for_each_ggtt_vma(vma, obj) {
4074 4075 4076 4077
				ret = i915_vma_put_fence(vma);
				if (ret)
					return ret;
			}
4078 4079 4080 4081 4082 4083 4084 4085
		} else {
			/* We either have incoherent backing store and
			 * so no GTT access or the architecture is fully
			 * coherent. In such cases, existing GTT mmaps
			 * ignore the cache bit in the PTE and we can
			 * rewrite it without confusing the GPU or having
			 * to force userspace to fault back in its mmaps.
			 */
4086 4087
		}

4088
		list_for_each_entry(vma, &obj->vma_list, obj_link) {
4089 4090 4091 4092 4093 4094 4095
			if (!drm_mm_node_allocated(&vma->node))
				continue;

			ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
			if (ret)
				return ret;
		}
4096 4097
	}

4098
	list_for_each_entry(vma, &obj->vma_list, obj_link)
4099
		vma->node.color = cache_level;
4100
	i915_gem_object_set_cache_coherency(obj, cache_level);
4101
	obj->cache_dirty = true; /* Always invalidate stale cachelines */
4102

4103 4104 4105
	return 0;
}

B
Ben Widawsky 已提交
4106 4107
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file)
4108
{
B
Ben Widawsky 已提交
4109
	struct drm_i915_gem_caching *args = data;
4110
	struct drm_i915_gem_object *obj;
4111
	int err = 0;
4112

4113 4114 4115 4116 4117 4118
	rcu_read_lock();
	obj = i915_gem_object_lookup_rcu(file, args->handle);
	if (!obj) {
		err = -ENOENT;
		goto out;
	}
4119

4120 4121 4122 4123 4124 4125
	switch (obj->cache_level) {
	case I915_CACHE_LLC:
	case I915_CACHE_L3_LLC:
		args->caching = I915_CACHING_CACHED;
		break;

4126 4127 4128 4129
	case I915_CACHE_WT:
		args->caching = I915_CACHING_DISPLAY;
		break;

4130 4131 4132 4133
	default:
		args->caching = I915_CACHING_NONE;
		break;
	}
4134 4135 4136
out:
	rcu_read_unlock();
	return err;
4137 4138
}

B
Ben Widawsky 已提交
4139 4140
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file)
4141
{
4142
	struct drm_i915_private *i915 = to_i915(dev);
B
Ben Widawsky 已提交
4143
	struct drm_i915_gem_caching *args = data;
4144 4145
	struct drm_i915_gem_object *obj;
	enum i915_cache_level level;
4146
	int ret = 0;
4147

B
Ben Widawsky 已提交
4148 4149
	switch (args->caching) {
	case I915_CACHING_NONE:
4150 4151
		level = I915_CACHE_NONE;
		break;
B
Ben Widawsky 已提交
4152
	case I915_CACHING_CACHED:
4153 4154 4155 4156 4157 4158
		/*
		 * Due to a HW issue on BXT A stepping, GPU stores via a
		 * snooped mapping may leave stale data in a corresponding CPU
		 * cacheline, whereas normally such cachelines would get
		 * invalidated.
		 */
4159
		if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
4160 4161
			return -ENODEV;

4162 4163
		level = I915_CACHE_LLC;
		break;
4164
	case I915_CACHING_DISPLAY:
4165
		level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
4166
		break;
4167 4168 4169 4170
	default:
		return -EINVAL;
	}

4171 4172 4173 4174
	obj = i915_gem_object_lookup(file, args->handle);
	if (!obj)
		return -ENOENT;

T
Tina Zhang 已提交
4175 4176 4177 4178 4179 4180 4181 4182 4183
	/*
	 * The caching mode of proxy object is handled by its generator, and
	 * not allowed to be changed by userspace.
	 */
	if (i915_gem_object_is_proxy(obj)) {
		ret = -ENXIO;
		goto out;
	}

4184 4185 4186 4187 4188 4189 4190
	if (obj->cache_level == level)
		goto out;

	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE,
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
B
Ben Widawsky 已提交
4191
	if (ret)
4192
		goto out;
B
Ben Widawsky 已提交
4193

4194 4195 4196
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto out;
4197 4198 4199

	ret = i915_gem_object_set_cache_level(obj, level);
	mutex_unlock(&dev->struct_mutex);
4200 4201 4202

out:
	i915_gem_object_put(obj);
4203 4204 4205
	return ret;
}

4206
/*
4207 4208 4209 4210
 * Prepare buffer for display plane (scanout, cursors, etc). Can be called from
 * an uninterruptible phase (modesetting) and allows any flushes to be pipelined
 * (for pageflips). We only flush the caches while preparing the buffer for
 * display, the callers are responsible for frontbuffer flush.
4211
 */
C
Chris Wilson 已提交
4212
struct i915_vma *
4213 4214
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
				     u32 alignment,
4215 4216
				     const struct i915_ggtt_view *view,
				     unsigned int flags)
4217
{
C
Chris Wilson 已提交
4218
	struct i915_vma *vma;
4219 4220
	int ret;

4221 4222
	lockdep_assert_held(&obj->base.dev->struct_mutex);

4223
	/* Mark the global pin early so that we account for the
4224 4225
	 * display coherency whilst setting up the cache domains.
	 */
4226
	obj->pin_global++;
4227

4228 4229 4230 4231 4232 4233 4234 4235 4236
	/* The display engine is not coherent with the LLC cache on gen6.  As
	 * a result, we make sure that the pinning that is about to occur is
	 * done with uncached PTEs. This is lowest common denominator for all
	 * chipsets.
	 *
	 * However for gen6+, we could do better by using the GFDT bit instead
	 * of uncaching, which would allow us to flush all the LLC-cached data
	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
	 */
4237
	ret = i915_gem_object_set_cache_level(obj,
4238 4239
					      HAS_WT(to_i915(obj->base.dev)) ?
					      I915_CACHE_WT : I915_CACHE_NONE);
C
Chris Wilson 已提交
4240 4241
	if (ret) {
		vma = ERR_PTR(ret);
4242
		goto err_unpin_global;
C
Chris Wilson 已提交
4243
	}
4244

4245 4246
	/* As the user may map the buffer once pinned in the display plane
	 * (e.g. libkms for the bootup splash), we have to ensure that we
4247 4248 4249 4250
	 * always use map_and_fenceable for all scanout buffers. However,
	 * it may simply be too big to fit into mappable, in which case
	 * put it anyway and hope that userspace can cope (but always first
	 * try to preserve the existing ABI).
4251
	 */
4252
	vma = ERR_PTR(-ENOSPC);
4253 4254
	if ((flags & PIN_MAPPABLE) == 0 &&
	    (!view || view->type == I915_GGTT_VIEW_NORMAL))
4255
		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
4256 4257 4258 4259
					       flags |
					       PIN_MAPPABLE |
					       PIN_NONBLOCK);
	if (IS_ERR(vma))
4260
		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
C
Chris Wilson 已提交
4261
	if (IS_ERR(vma))
4262
		goto err_unpin_global;
4263

4264 4265
	vma->display_alignment = max_t(u64, vma->display_alignment, alignment);

4266
	__i915_gem_object_flush_for_display(obj);
4267

4268 4269 4270
	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
4271
	obj->read_domains |= I915_GEM_DOMAIN_GTT;
4272

C
Chris Wilson 已提交
4273
	return vma;
4274

4275 4276
err_unpin_global:
	obj->pin_global--;
C
Chris Wilson 已提交
4277
	return vma;
4278 4279 4280
}

void
C
Chris Wilson 已提交
4281
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
4282
{
4283
	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
4284

4285
	if (WARN_ON(vma->obj->pin_global == 0))
4286 4287
		return;

4288
	if (--vma->obj->pin_global == 0)
4289
		vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
4290

4291
	/* Bump the LRU to try and avoid premature eviction whilst flipping  */
4292
	i915_gem_object_bump_inactive_ggtt(vma->obj);
4293

C
Chris Wilson 已提交
4294
	i915_vma_unpin(vma);
4295 4296
}

4297 4298
/**
 * Moves a single object to the CPU read, and possibly write domain.
4299 4300
 * @obj: object to act on
 * @write: requesting write or read-only access
4301 4302 4303 4304
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
4305
int
4306
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
4307 4308 4309
{
	int ret;

4310
	lockdep_assert_held(&obj->base.dev->struct_mutex);
4311

4312 4313 4314 4315 4316 4317
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   (write ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
4318 4319 4320
	if (ret)
		return ret;

4321
	flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
4322

4323
	/* Flush the CPU cache if it's still invalid. */
4324
	if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
4325
		i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
4326
		obj->read_domains |= I915_GEM_DOMAIN_CPU;
4327 4328 4329 4330 4331
	}

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
4332
	GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
4333 4334 4335 4336

	/* If we're writing through the CPU, then the GPU read domains will
	 * need to be invalidated at next use.
	 */
4337 4338
	if (write)
		__start_cpu_write(obj);
4339 4340 4341 4342

	return 0;
}

4343 4344 4345
/* Throttle our rendering by waiting until the ring has completed our requests
 * emitted over 20 msec ago.
 *
4346 4347 4348 4349
 * Note that if we were to use the current jiffies each time around the loop,
 * we wouldn't escape the function with any frames outstanding if the time to
 * render a frame was over 20ms.
 *
4350 4351 4352
 * This should get us reasonable parallelism between CPU and GPU but also
 * relatively low latency when blocking on a particular request to finish.
 */
4353
static int
4354
i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4355
{
4356
	struct drm_i915_private *dev_priv = to_i915(dev);
4357
	struct drm_i915_file_private *file_priv = file->driver_priv;
4358
	unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
4359
	struct i915_request *request, *target = NULL;
4360
	long ret;
4361

4362 4363 4364
	/* ABI: return -EIO if already wedged */
	if (i915_terminally_wedged(&dev_priv->gpu_error))
		return -EIO;
4365

4366
	spin_lock(&file_priv->mm.lock);
4367
	list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
4368 4369
		if (time_after_eq(request->emitted_jiffies, recent_enough))
			break;
4370

4371 4372 4373 4374
		if (target) {
			list_del(&target->client_link);
			target->file_priv = NULL;
		}
4375

4376
		target = request;
4377
	}
4378
	if (target)
4379
		i915_request_get(target);
4380
	spin_unlock(&file_priv->mm.lock);
4381

4382
	if (target == NULL)
4383
		return 0;
4384

4385
	ret = i915_request_wait(target,
4386 4387
				I915_WAIT_INTERRUPTIBLE,
				MAX_SCHEDULE_TIMEOUT);
4388
	i915_request_put(target);
4389

4390
	return ret < 0 ? ret : 0;
4391 4392
}

C
Chris Wilson 已提交
4393
struct i915_vma *
4394 4395
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
			 const struct i915_ggtt_view *view,
4396
			 u64 size,
4397 4398
			 u64 alignment,
			 u64 flags)
4399
{
4400
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
4401
	struct i915_address_space *vm = &dev_priv->ggtt.vm;
4402 4403
	struct i915_vma *vma;
	int ret;
4404

4405 4406
	lockdep_assert_held(&obj->base.dev->struct_mutex);

4407 4408
	if (flags & PIN_MAPPABLE &&
	    (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438
		/* If the required space is larger than the available
		 * aperture, we will not able to find a slot for the
		 * object and unbinding the object now will be in
		 * vain. Worse, doing so may cause us to ping-pong
		 * the object in and out of the Global GTT and
		 * waste a lot of cycles under the mutex.
		 */
		if (obj->base.size > dev_priv->ggtt.mappable_end)
			return ERR_PTR(-E2BIG);

		/* If NONBLOCK is set the caller is optimistically
		 * trying to cache the full object within the mappable
		 * aperture, and *must* have a fallback in place for
		 * situations where we cannot bind the object. We
		 * can be a little more lax here and use the fallback
		 * more often to avoid costly migrations of ourselves
		 * and other objects within the aperture.
		 *
		 * Half-the-aperture is used as a simple heuristic.
		 * More interesting would to do search for a free
		 * block prior to making the commitment to unbind.
		 * That caters for the self-harm case, and with a
		 * little more heuristics (e.g. NOFAULT, NOEVICT)
		 * we could try to minimise harm to others.
		 */
		if (flags & PIN_NONBLOCK &&
		    obj->base.size > dev_priv->ggtt.mappable_end / 2)
			return ERR_PTR(-ENOSPC);
	}

4439
	vma = i915_vma_instance(obj, vm, view);
4440
	if (unlikely(IS_ERR(vma)))
C
Chris Wilson 已提交
4441
		return vma;
4442 4443

	if (i915_vma_misplaced(vma, size, alignment, flags)) {
4444 4445 4446
		if (flags & PIN_NONBLOCK) {
			if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
				return ERR_PTR(-ENOSPC);
4447

4448
			if (flags & PIN_MAPPABLE &&
4449
			    vma->fence_size > dev_priv->ggtt.mappable_end / 2)
4450 4451 4452
				return ERR_PTR(-ENOSPC);
		}

4453 4454
		WARN(i915_vma_is_pinned(vma),
		     "bo is already pinned in ggtt with incorrect alignment:"
4455 4456 4457
		     " offset=%08x, req.alignment=%llx,"
		     " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
		     i915_ggtt_offset(vma), alignment,
4458
		     !!(flags & PIN_MAPPABLE),
4459
		     i915_vma_is_map_and_fenceable(vma));
4460 4461
		ret = i915_vma_unbind(vma);
		if (ret)
C
Chris Wilson 已提交
4462
			return ERR_PTR(ret);
4463 4464
	}

C
Chris Wilson 已提交
4465 4466 4467
	ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
	if (ret)
		return ERR_PTR(ret);
4468

C
Chris Wilson 已提交
4469
	return vma;
4470 4471
}

4472
static __always_inline unsigned int __busy_read_flag(unsigned int id)
4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486
{
	/* Note that we could alias engines in the execbuf API, but
	 * that would be very unwise as it prevents userspace from
	 * fine control over engine selection. Ahem.
	 *
	 * This should be something like EXEC_MAX_ENGINE instead of
	 * I915_NUM_ENGINES.
	 */
	BUILD_BUG_ON(I915_NUM_ENGINES > 16);
	return 0x10000 << id;
}

static __always_inline unsigned int __busy_write_id(unsigned int id)
{
4487 4488 4489 4490 4491 4492 4493 4494 4495
	/* The uABI guarantees an active writer is also amongst the read
	 * engines. This would be true if we accessed the activity tracking
	 * under the lock, but as we perform the lookup of the object and
	 * its activity locklessly we can not guarantee that the last_write
	 * being active implies that we have set the same engine flag from
	 * last_read - hence we always set both read and write busy for
	 * last_write.
	 */
	return id | __busy_read_flag(id);
4496 4497
}

4498
static __always_inline unsigned int
4499
__busy_set_if_active(const struct dma_fence *fence,
4500 4501
		     unsigned int (*flag)(unsigned int id))
{
4502
	struct i915_request *rq;
4503

4504 4505 4506 4507
	/* We have to check the current hw status of the fence as the uABI
	 * guarantees forward progress. We could rely on the idle worker
	 * to eventually flush us, but to minimise latency just ask the
	 * hardware.
4508
	 *
4509
	 * Note we only report on the status of native fences.
4510
	 */
4511 4512 4513 4514
	if (!dma_fence_is_i915(fence))
		return 0;

	/* opencode to_request() in order to avoid const warnings */
4515 4516
	rq = container_of(fence, struct i915_request, fence);
	if (i915_request_completed(rq))
4517 4518
		return 0;

4519
	return flag(rq->engine->uabi_id);
4520 4521
}

4522
static __always_inline unsigned int
4523
busy_check_reader(const struct dma_fence *fence)
4524
{
4525
	return __busy_set_if_active(fence, __busy_read_flag);
4526 4527
}

4528
static __always_inline unsigned int
4529
busy_check_writer(const struct dma_fence *fence)
4530
{
4531 4532 4533 4534
	if (!fence)
		return 0;

	return __busy_set_if_active(fence, __busy_write_id);
4535 4536
}

4537 4538
int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4539
		    struct drm_file *file)
4540 4541
{
	struct drm_i915_gem_busy *args = data;
4542
	struct drm_i915_gem_object *obj;
4543 4544
	struct reservation_object_list *list;
	unsigned int seq;
4545
	int err;
4546

4547
	err = -ENOENT;
4548 4549
	rcu_read_lock();
	obj = i915_gem_object_lookup_rcu(file, args->handle);
4550
	if (!obj)
4551
		goto out;
4552

4553 4554 4555 4556 4557 4558 4559 4560 4561 4562 4563 4564 4565 4566 4567 4568 4569 4570
	/* A discrepancy here is that we do not report the status of
	 * non-i915 fences, i.e. even though we may report the object as idle,
	 * a call to set-domain may still stall waiting for foreign rendering.
	 * This also means that wait-ioctl may report an object as busy,
	 * where busy-ioctl considers it idle.
	 *
	 * We trade the ability to warn of foreign fences to report on which
	 * i915 engines are active for the object.
	 *
	 * Alternatively, we can trade that extra information on read/write
	 * activity with
	 *	args->busy =
	 *		!reservation_object_test_signaled_rcu(obj->resv, true);
	 * to report the overall busyness. This is what the wait-ioctl does.
	 *
	 */
retry:
	seq = raw_read_seqcount(&obj->resv->seq);
4571

4572 4573
	/* Translate the exclusive fence to the READ *and* WRITE engine */
	args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
4574

4575 4576 4577 4578
	/* Translate shared fences to READ set of engines */
	list = rcu_dereference(obj->resv->fence);
	if (list) {
		unsigned int shared_count = list->shared_count, i;
4579

4580 4581 4582 4583 4584 4585
		for (i = 0; i < shared_count; ++i) {
			struct dma_fence *fence =
				rcu_dereference(list->shared[i]);

			args->busy |= busy_check_reader(fence);
		}
4586
	}
4587

4588 4589 4590 4591
	if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
		goto retry;

	err = 0;
4592 4593 4594
out:
	rcu_read_unlock();
	return err;
4595 4596 4597 4598 4599 4600
}

int
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file_priv)
{
4601
	return i915_gem_ring_throttle(dev, file_priv);
4602 4603
}

4604 4605 4606 4607
int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
4608
	struct drm_i915_private *dev_priv = to_i915(dev);
4609
	struct drm_i915_gem_madvise *args = data;
4610
	struct drm_i915_gem_object *obj;
4611
	int err;
4612 4613 4614 4615 4616 4617 4618 4619 4620

	switch (args->madv) {
	case I915_MADV_DONTNEED:
	case I915_MADV_WILLNEED:
	    break;
	default:
	    return -EINVAL;
	}

4621
	obj = i915_gem_object_lookup(file_priv, args->handle);
4622 4623 4624 4625 4626 4627
	if (!obj)
		return -ENOENT;

	err = mutex_lock_interruptible(&obj->mm.lock);
	if (err)
		goto out;
4628

4629
	if (i915_gem_object_has_pages(obj) &&
4630
	    i915_gem_object_is_tiled(obj) &&
4631
	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4632 4633
		if (obj->mm.madv == I915_MADV_WILLNEED) {
			GEM_BUG_ON(!obj->mm.quirked);
C
Chris Wilson 已提交
4634
			__i915_gem_object_unpin_pages(obj);
4635 4636 4637
			obj->mm.quirked = false;
		}
		if (args->madv == I915_MADV_WILLNEED) {
4638
			GEM_BUG_ON(obj->mm.quirked);
C
Chris Wilson 已提交
4639
			__i915_gem_object_pin_pages(obj);
4640 4641
			obj->mm.quirked = true;
		}
4642 4643
	}

C
Chris Wilson 已提交
4644 4645
	if (obj->mm.madv != __I915_MADV_PURGED)
		obj->mm.madv = args->madv;
4646

C
Chris Wilson 已提交
4647
	/* if the object is no longer attached, discard its backing storage */
4648 4649
	if (obj->mm.madv == I915_MADV_DONTNEED &&
	    !i915_gem_object_has_pages(obj))
4650 4651
		i915_gem_object_truncate(obj);

C
Chris Wilson 已提交
4652
	args->retained = obj->mm.madv != __I915_MADV_PURGED;
4653
	mutex_unlock(&obj->mm.lock);
C
Chris Wilson 已提交
4654

4655
out:
4656
	i915_gem_object_put(obj);
4657
	return err;
4658 4659
}

4660
static void
4661
frontbuffer_retire(struct i915_gem_active *active, struct i915_request *request)
4662 4663 4664 4665
{
	struct drm_i915_gem_object *obj =
		container_of(active, typeof(*obj), frontbuffer_write);

4666
	intel_fb_obj_flush(obj, ORIGIN_CS);
4667 4668
}

4669 4670
void i915_gem_object_init(struct drm_i915_gem_object *obj,
			  const struct drm_i915_gem_object_ops *ops)
4671
{
4672 4673
	mutex_init(&obj->mm.lock);

B
Ben Widawsky 已提交
4674
	INIT_LIST_HEAD(&obj->vma_list);
4675
	INIT_LIST_HEAD(&obj->lut_list);
4676
	INIT_LIST_HEAD(&obj->batch_pool_link);
4677

4678 4679
	obj->ops = ops;

4680 4681 4682
	reservation_object_init(&obj->__builtin_resv);
	obj->resv = &obj->__builtin_resv;

4683
	obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
4684
	init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
C
Chris Wilson 已提交
4685 4686 4687 4688

	obj->mm.madv = I915_MADV_WILLNEED;
	INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
	mutex_init(&obj->mm.get_page.lock);
4689

4690
	i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
4691 4692
}

4693
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4694 4695
	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
		 I915_GEM_OBJECT_IS_SHRINKABLE,
4696

4697 4698
	.get_pages = i915_gem_object_get_pages_gtt,
	.put_pages = i915_gem_object_put_pages_gtt,
4699 4700

	.pwrite = i915_gem_object_pwrite_gtt,
4701 4702
};

M
Matthew Auld 已提交
4703 4704 4705 4706 4707 4708 4709 4710 4711 4712 4713 4714 4715 4716 4717 4718 4719 4720 4721 4722 4723 4724 4725 4726
static int i915_gem_object_create_shmem(struct drm_device *dev,
					struct drm_gem_object *obj,
					size_t size)
{
	struct drm_i915_private *i915 = to_i915(dev);
	unsigned long flags = VM_NORESERVE;
	struct file *filp;

	drm_gem_private_object_init(dev, obj, size);

	if (i915->mm.gemfs)
		filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
						 flags);
	else
		filp = shmem_file_setup("i915", size, flags);

	if (IS_ERR(filp))
		return PTR_ERR(filp);

	obj->filp = filp;

	return 0;
}

4727
struct drm_i915_gem_object *
4728
i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
4729
{
4730
	struct drm_i915_gem_object *obj;
4731
	struct address_space *mapping;
4732
	unsigned int cache_level;
D
Daniel Vetter 已提交
4733
	gfp_t mask;
4734
	int ret;
4735

4736 4737 4738 4739 4740
	/* There is a prevalence of the assumption that we fit the object's
	 * page count inside a 32bit _signed_ variable. Let's document this and
	 * catch if we ever need to fix it. In the meantime, if you do spot
	 * such a local variable, please consider fixing!
	 */
4741
	if (size >> PAGE_SHIFT > INT_MAX)
4742 4743 4744 4745 4746
		return ERR_PTR(-E2BIG);

	if (overflows_type(size, obj->base.size))
		return ERR_PTR(-E2BIG);

4747
	obj = i915_gem_object_alloc(dev_priv);
4748
	if (obj == NULL)
4749
		return ERR_PTR(-ENOMEM);
4750

M
Matthew Auld 已提交
4751
	ret = i915_gem_object_create_shmem(&dev_priv->drm, &obj->base, size);
4752 4753
	if (ret)
		goto fail;
4754

4755
	mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4756
	if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) {
4757 4758 4759 4760 4761
		/* 965gm cannot relocate objects above 4GiB. */
		mask &= ~__GFP_HIGHMEM;
		mask |= __GFP_DMA32;
	}

4762
	mapping = obj->base.filp->f_mapping;
4763
	mapping_set_gfp_mask(mapping, mask);
4764
	GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
4765

4766
	i915_gem_object_init(obj, &i915_gem_object_ops);
4767

4768 4769
	obj->write_domain = I915_GEM_DOMAIN_CPU;
	obj->read_domains = I915_GEM_DOMAIN_CPU;
4770

4771
	if (HAS_LLC(dev_priv))
4772
		/* On some devices, we can have the GPU use the LLC (the CPU
4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783
		 * cache) for about a 10% performance improvement
		 * compared to uncached.  Graphics requests other than
		 * display scanout are coherent with the CPU in
		 * accessing this cache.  This means in this mode we
		 * don't need to clflush on the CPU side, and on the
		 * GPU side we only need to flush internal caches to
		 * get data visible to the CPU.
		 *
		 * However, we maintain the display planes as UC, and so
		 * need to rebind when first used as such.
		 */
4784 4785 4786
		cache_level = I915_CACHE_LLC;
	else
		cache_level = I915_CACHE_NONE;
4787

4788
	i915_gem_object_set_cache_coherency(obj, cache_level);
4789

4790 4791
	trace_i915_gem_object_create(obj);

4792
	return obj;
4793 4794 4795 4796

fail:
	i915_gem_object_free(obj);
	return ERR_PTR(ret);
4797 4798
}

4799 4800 4801 4802 4803 4804 4805 4806
static bool discard_backing_storage(struct drm_i915_gem_object *obj)
{
	/* If we are the last user of the backing storage (be it shmemfs
	 * pages or stolen etc), we know that the pages are going to be
	 * immediately released. In this case, we can then skip copying
	 * back the contents from the GPU.
	 */

C
Chris Wilson 已提交
4807
	if (obj->mm.madv != I915_MADV_WILLNEED)
4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822
		return false;

	if (obj->base.filp == NULL)
		return true;

	/* At first glance, this looks racy, but then again so would be
	 * userspace racing mmap against close. However, the first external
	 * reference to the filp can only be obtained through the
	 * i915_gem_mmap_ioctl() which safeguards us against the user
	 * acquiring such a reference whilst we are in the middle of
	 * freeing the object.
	 */
	return atomic_long_read(&obj->base.filp->f_count) == 1;
}

4823 4824
static void __i915_gem_free_objects(struct drm_i915_private *i915,
				    struct llist_node *freed)
4825
{
4826
	struct drm_i915_gem_object *obj, *on;
4827

4828
	intel_runtime_pm_get(i915);
4829
	llist_for_each_entry_safe(obj, on, freed, freed) {
4830 4831 4832 4833
		struct i915_vma *vma, *vn;

		trace_i915_gem_object_destroy(obj);

4834 4835
		mutex_lock(&i915->drm.struct_mutex);

4836 4837 4838 4839 4840
		GEM_BUG_ON(i915_gem_object_is_active(obj));
		list_for_each_entry_safe(vma, vn,
					 &obj->vma_list, obj_link) {
			GEM_BUG_ON(i915_vma_is_active(vma));
			vma->flags &= ~I915_VMA_PIN_MASK;
4841
			i915_vma_destroy(vma);
4842
		}
4843 4844
		GEM_BUG_ON(!list_empty(&obj->vma_list));
		GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
4845

4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857
		/* This serializes freeing with the shrinker. Since the free
		 * is delayed, first by RCU then by the workqueue, we want the
		 * shrinker to be able to free pages of unreferenced objects,
		 * or else we may oom whilst there are plenty of deferred
		 * freed objects.
		 */
		if (i915_gem_object_has_pages(obj)) {
			spin_lock(&i915->mm.obj_lock);
			list_del_init(&obj->mm.link);
			spin_unlock(&i915->mm.obj_lock);
		}

4858
		mutex_unlock(&i915->drm.struct_mutex);
4859 4860

		GEM_BUG_ON(obj->bind_count);
4861
		GEM_BUG_ON(obj->userfault_count);
4862
		GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
4863
		GEM_BUG_ON(!list_empty(&obj->lut_list));
4864 4865 4866

		if (obj->ops->release)
			obj->ops->release(obj);
4867

4868 4869
		if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
			atomic_set(&obj->mm.pages_pin_count, 0);
4870
		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
4871
		GEM_BUG_ON(i915_gem_object_has_pages(obj));
4872 4873 4874 4875

		if (obj->base.import_attach)
			drm_prime_gem_destroy(&obj->base, NULL);

4876
		reservation_object_fini(&obj->__builtin_resv);
4877 4878 4879 4880 4881
		drm_gem_object_release(&obj->base);
		i915_gem_info_remove_obj(i915, obj->base.size);

		kfree(obj->bit_17);
		i915_gem_object_free(obj);
4882

4883 4884 4885
		GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
		atomic_dec(&i915->mm.free_count);

4886 4887
		if (on)
			cond_resched();
4888
	}
4889
	intel_runtime_pm_put(i915);
4890 4891 4892 4893 4894 4895
}

static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
{
	struct llist_node *freed;

4896 4897 4898 4899 4900 4901 4902 4903 4904 4905
	/* Free the oldest, most stale object to keep the free_list short */
	freed = NULL;
	if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
		/* Only one consumer of llist_del_first() allowed */
		spin_lock(&i915->mm.free_lock);
		freed = llist_del_first(&i915->mm.free_list);
		spin_unlock(&i915->mm.free_lock);
	}
	if (unlikely(freed)) {
		freed->next = NULL;
4906
		__i915_gem_free_objects(i915, freed);
4907
	}
4908 4909 4910 4911 4912 4913 4914
}

static void __i915_gem_free_work(struct work_struct *work)
{
	struct drm_i915_private *i915 =
		container_of(work, struct drm_i915_private, mm.free_work);
	struct llist_node *freed;
4915

4916 4917
	/*
	 * All file-owned VMA should have been released by this point through
4918 4919 4920 4921 4922 4923
	 * i915_gem_close_object(), or earlier by i915_gem_context_close().
	 * However, the object may also be bound into the global GTT (e.g.
	 * older GPUs without per-process support, or for direct access through
	 * the GTT either for the user or for scanout). Those VMA still need to
	 * unbound now.
	 */
4924

4925
	spin_lock(&i915->mm.free_lock);
4926
	while ((freed = llist_del_all(&i915->mm.free_list))) {
4927 4928
		spin_unlock(&i915->mm.free_lock);

4929
		__i915_gem_free_objects(i915, freed);
4930
		if (need_resched())
4931 4932 4933
			return;

		spin_lock(&i915->mm.free_lock);
4934
	}
4935
	spin_unlock(&i915->mm.free_lock);
4936
}
4937

4938 4939 4940 4941 4942 4943
static void __i915_gem_free_object_rcu(struct rcu_head *head)
{
	struct drm_i915_gem_object *obj =
		container_of(head, typeof(*obj), rcu);
	struct drm_i915_private *i915 = to_i915(obj->base.dev);

4944 4945 4946 4947 4948 4949 4950 4951 4952
	/*
	 * Since we require blocking on struct_mutex to unbind the freed
	 * object from the GPU before releasing resources back to the
	 * system, we can not do that directly from the RCU callback (which may
	 * be a softirq context), but must instead then defer that work onto a
	 * kthread. We use the RCU callback rather than move the freed object
	 * directly onto the work queue so that we can mix between using the
	 * worker and performing frees directly from subsequent allocations for
	 * crude but effective memory throttling.
4953 4954
	 */
	if (llist_add(&obj->freed, &i915->mm.free_list))
4955
		queue_work(i915->wq, &i915->mm.free_work);
4956
}
4957

4958 4959 4960
void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
C
Chris Wilson 已提交
4961

4962 4963 4964
	if (obj->mm.quirked)
		__i915_gem_object_unpin_pages(obj);

4965
	if (discard_backing_storage(obj))
C
Chris Wilson 已提交
4966
		obj->mm.madv = I915_MADV_DONTNEED;
4967

4968 4969
	/*
	 * Before we free the object, make sure any pure RCU-only
4970 4971 4972 4973
	 * read-side critical sections are complete, e.g.
	 * i915_gem_busy_ioctl(). For the corresponding synchronized
	 * lookup see i915_gem_object_lookup_rcu().
	 */
4974
	atomic_inc(&to_i915(obj->base.dev)->mm.free_count);
4975
	call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
4976 4977
}

4978 4979 4980 4981
void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
{
	lockdep_assert_held(&obj->base.dev->struct_mutex);

4982 4983
	if (!i915_gem_object_has_active_reference(obj) &&
	    i915_gem_object_is_active(obj))
4984 4985 4986 4987 4988
		i915_gem_object_set_active_reference(obj);
	else
		i915_gem_object_put(obj);
}

4989 4990
void i915_gem_sanitize(struct drm_i915_private *i915)
{
4991 4992 4993 4994 4995
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	GEM_TRACE("\n");

4996
	mutex_lock(&i915->drm.struct_mutex);
4997 4998 4999 5000 5001 5002 5003 5004 5005 5006

	intel_runtime_pm_get(i915);
	intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);

	/*
	 * As we have just resumed the machine and woken the device up from
	 * deep PCI sleep (presumably D3_cold), assume the HW has been reset
	 * back to defaults, recovering from whatever wedged state we left it
	 * in and so worth trying to use the device once more.
	 */
5007
	if (i915_terminally_wedged(&i915->gpu_error))
5008 5009
		i915_gem_unset_wedged(i915);

5010 5011 5012 5013 5014 5015
	/*
	 * If we inherit context state from the BIOS or earlier occupants
	 * of the GPU, the GPU may be in an inconsistent state when we
	 * try to take over. The only way to remove the earlier state
	 * is by resetting. However, resetting on earlier gen is tricky as
	 * it may impact the display and we are uncertain about the stability
5016
	 * of the reset, so this could be applied to even earlier gen.
5017
	 */
5018 5019
	if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915))
		WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
5020

5021 5022 5023 5024 5025 5026 5027 5028 5029
	/* Reset the submission backend after resume as well as the GPU reset */
	for_each_engine(engine, i915, id) {
		if (engine->reset.reset)
			engine->reset.reset(engine, NULL);
	}

	intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
	intel_runtime_pm_put(i915);

5030 5031
	i915_gem_contexts_lost(i915);
	mutex_unlock(&i915->drm.struct_mutex);
5032 5033
}

5034
int i915_gem_suspend(struct drm_i915_private *dev_priv)
5035
{
5036
	struct drm_device *dev = &dev_priv->drm;
5037
	int ret;
5038

5039 5040
	GEM_TRACE("\n");

5041
	intel_runtime_pm_get(dev_priv);
5042 5043
	intel_suspend_gt_powersave(dev_priv);

5044
	mutex_lock(&dev->struct_mutex);
5045 5046 5047 5048 5049 5050 5051 5052 5053

	/* We have to flush all the executing contexts to main memory so
	 * that they can saved in the hibernation image. To ensure the last
	 * context image is coherent, we have to switch away from it. That
	 * leaves the dev_priv->kernel_context still active when
	 * we actually suspend, and its image in memory may not match the GPU
	 * state. Fortunately, the kernel_context is disposable and we do
	 * not rely on its state.
	 */
5054 5055 5056 5057
	if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
		ret = i915_gem_switch_to_kernel_context(dev_priv);
		if (ret)
			goto err_unlock;
5058

5059 5060
		ret = i915_gem_wait_for_idle(dev_priv,
					     I915_WAIT_INTERRUPTIBLE |
5061 5062
					     I915_WAIT_LOCKED |
					     I915_WAIT_FOR_IDLE_BOOST);
5063 5064
		if (ret && ret != -EIO)
			goto err_unlock;
5065

5066 5067
		assert_kernel_context_is_current(dev_priv);
	}
5068 5069
	mutex_unlock(&dev->struct_mutex);

5070
	intel_uc_suspend(dev_priv);
5071

5072
	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
5073
	cancel_delayed_work_sync(&dev_priv->gt.retire_work);
5074 5075 5076 5077

	/* As the idle_work is rearming if it detects a race, play safe and
	 * repeat the flush until it is definitely idle.
	 */
5078
	drain_delayed_work(&dev_priv->gt.idle_work);
5079

5080 5081 5082
	/* Assert that we sucessfully flushed all the work and
	 * reset the GPU back to its idle, low power state.
	 */
5083
	WARN_ON(dev_priv->gt.awake);
5084 5085
	if (WARN_ON(!intel_engines_are_idle(dev_priv)))
		i915_gem_set_wedged(dev_priv); /* no hope, discard everything */
5086

5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097
	intel_runtime_pm_put(dev_priv);
	return 0;

err_unlock:
	mutex_unlock(&dev->struct_mutex);
	intel_runtime_pm_put(dev_priv);
	return ret;
}

void i915_gem_suspend_late(struct drm_i915_private *i915)
{
5098 5099 5100 5101 5102 5103 5104
	struct drm_i915_gem_object *obj;
	struct list_head *phases[] = {
		&i915->mm.unbound_list,
		&i915->mm.bound_list,
		NULL
	}, **phase;

5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124
	/*
	 * Neither the BIOS, ourselves or any other kernel
	 * expects the system to be in execlists mode on startup,
	 * so we need to reset the GPU back to legacy mode. And the only
	 * known way to disable logical contexts is through a GPU reset.
	 *
	 * So in order to leave the system in a known default configuration,
	 * always reset the GPU upon unload and suspend. Afterwards we then
	 * clean up the GEM state tracking, flushing off the requests and
	 * leaving the system in a known idle state.
	 *
	 * Note that is of the upmost importance that the GPU is idle and
	 * all stray writes are flushed *before* we dismantle the backing
	 * storage for the pinned objects.
	 *
	 * However, since we are uncertain that resetting the GPU on older
	 * machines is a good idea, we don't - just in case it leaves the
	 * machine in an unusable condition.
	 */

5125 5126 5127 5128 5129 5130 5131
	mutex_lock(&i915->drm.struct_mutex);
	for (phase = phases; *phase; phase++) {
		list_for_each_entry(obj, *phase, mm.link)
			WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
	}
	mutex_unlock(&i915->drm.struct_mutex);

5132 5133
	intel_uc_sanitize(i915);
	i915_gem_sanitize(i915);
5134 5135
}

5136
void i915_gem_resume(struct drm_i915_private *i915)
5137
{
5138 5139
	GEM_TRACE("\n");

5140
	WARN_ON(i915->gt.awake);
5141

5142 5143
	mutex_lock(&i915->drm.struct_mutex);
	intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
5144

5145 5146
	i915_gem_restore_gtt_mappings(i915);
	i915_gem_restore_fences(i915);
5147

5148 5149
	/*
	 * As we didn't flush the kernel context before suspend, we cannot
5150 5151 5152
	 * guarantee that the context image is complete. So let's just reset
	 * it and start again.
	 */
5153
	i915->gt.resume(i915);
5154

5155 5156 5157
	if (i915_gem_init_hw(i915))
		goto err_wedged;

5158
	intel_uc_resume(i915);
5159

5160 5161 5162 5163 5164 5165 5166 5167 5168 5169
	/* Always reload a context for powersaving. */
	if (i915_gem_switch_to_kernel_context(i915))
		goto err_wedged;

out_unlock:
	intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
	mutex_unlock(&i915->drm.struct_mutex);
	return;

err_wedged:
5170 5171 5172 5173
	if (!i915_terminally_wedged(&i915->gpu_error)) {
		DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
		i915_gem_set_wedged(i915);
	}
5174
	goto out_unlock;
5175 5176
}

5177
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
5178
{
5179
	if (INTEL_GEN(dev_priv) < 5 ||
5180 5181 5182 5183 5184 5185
	    dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
		return;

	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
				 DISP_TILE_SURFACE_SWIZZLING);

5186
	if (IS_GEN5(dev_priv))
5187 5188
		return;

5189
	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
5190
	if (IS_GEN6(dev_priv))
5191
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
5192
	else if (IS_GEN7(dev_priv))
5193
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
5194
	else if (IS_GEN8(dev_priv))
B
Ben Widawsky 已提交
5195
		I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
5196 5197
	else
		BUG();
5198
}
D
Daniel Vetter 已提交
5199

5200
static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
5201 5202 5203 5204 5205 5206 5207
{
	I915_WRITE(RING_CTL(base), 0);
	I915_WRITE(RING_HEAD(base), 0);
	I915_WRITE(RING_TAIL(base), 0);
	I915_WRITE(RING_START(base), 0);
}

5208
static void init_unused_rings(struct drm_i915_private *dev_priv)
5209
{
5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221
	if (IS_I830(dev_priv)) {
		init_unused_ring(dev_priv, PRB1_BASE);
		init_unused_ring(dev_priv, SRB0_BASE);
		init_unused_ring(dev_priv, SRB1_BASE);
		init_unused_ring(dev_priv, SRB2_BASE);
		init_unused_ring(dev_priv, SRB3_BASE);
	} else if (IS_GEN2(dev_priv)) {
		init_unused_ring(dev_priv, SRB0_BASE);
		init_unused_ring(dev_priv, SRB1_BASE);
	} else if (IS_GEN3(dev_priv)) {
		init_unused_ring(dev_priv, PRB1_BASE);
		init_unused_ring(dev_priv, PRB2_BASE);
5222 5223 5224
	}
}

5225
static int __i915_gem_restart_engines(void *data)
5226
{
5227
	struct drm_i915_private *i915 = data;
5228
	struct intel_engine_cs *engine;
5229
	enum intel_engine_id id;
5230 5231 5232 5233
	int err;

	for_each_engine(engine, i915, id) {
		err = engine->init_hw(engine);
5234 5235 5236
		if (err) {
			DRM_ERROR("Failed to restart %s (%d)\n",
				  engine->name, err);
5237
			return err;
5238
		}
5239 5240 5241 5242 5243 5244 5245
	}

	return 0;
}

int i915_gem_init_hw(struct drm_i915_private *dev_priv)
{
C
Chris Wilson 已提交
5246
	int ret;
5247

5248 5249
	dev_priv->gt.last_init_time = ktime_get();

5250 5251 5252
	/* Double layer security blanket, see i915_gem_init() */
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

5253
	if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
5254
		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
5255

5256
	if (IS_HASWELL(dev_priv))
5257
		I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
5258
			   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
5259

5260
	if (HAS_PCH_NOP(dev_priv)) {
5261
		if (IS_IVYBRIDGE(dev_priv)) {
5262 5263 5264
			u32 temp = I915_READ(GEN7_MSG_CTL);
			temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
			I915_WRITE(GEN7_MSG_CTL, temp);
5265
		} else if (INTEL_GEN(dev_priv) >= 7) {
5266 5267 5268 5269
			u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
			temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
			I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
		}
5270 5271
	}

5272 5273
	intel_gt_workarounds_apply(dev_priv);

5274
	i915_gem_init_swizzling(dev_priv);
5275

5276 5277 5278 5279 5280 5281
	/*
	 * At least 830 can leave some of the unused rings
	 * "active" (ie. head != tail) after resume which
	 * will prevent c3 entry. Makes sure all unused rings
	 * are totally idle.
	 */
5282
	init_unused_rings(dev_priv);
5283

5284
	BUG_ON(!dev_priv->kernel_context);
5285 5286 5287 5288
	if (i915_terminally_wedged(&dev_priv->gpu_error)) {
		ret = -EIO;
		goto out;
	}
5289

5290
	ret = i915_ppgtt_init_hw(dev_priv);
5291
	if (ret) {
5292
		DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
5293 5294 5295
		goto out;
	}

5296 5297 5298 5299 5300 5301
	ret = intel_wopcm_init_hw(&dev_priv->wopcm);
	if (ret) {
		DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
		goto out;
	}

5302 5303
	/* We can't enable contexts until all firmware is loaded */
	ret = intel_uc_init_hw(dev_priv);
5304 5305
	if (ret) {
		DRM_ERROR("Enabling uc failed (%d)\n", ret);
5306
		goto out;
5307
	}
5308

5309
	intel_mocs_init_l3cc_table(dev_priv);
5310

5311 5312
	/* Only when the HW is re-initialised, can we replay the requests */
	ret = __i915_gem_restart_engines(dev_priv);
5313 5314
	if (ret)
		goto cleanup_uc;
5315 5316
out:
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5317
	return ret;
5318 5319 5320 5321

cleanup_uc:
	intel_uc_fini_hw(dev_priv);
	goto out;
5322 5323
}

5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344
static int __intel_engines_record_defaults(struct drm_i915_private *i915)
{
	struct i915_gem_context *ctx;
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	int err;

	/*
	 * As we reset the gpu during very early sanitisation, the current
	 * register state on the GPU should reflect its defaults values.
	 * We load a context onto the hw (with restore-inhibit), then switch
	 * over to a second context to save that default register state. We
	 * can then prime every new context with that state so they all start
	 * from the same default HW values.
	 */

	ctx = i915_gem_context_create_kernel(i915, 0);
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);

	for_each_engine(engine, i915, id) {
5345
		struct i915_request *rq;
5346

5347
		rq = i915_request_alloc(engine, ctx);
5348 5349 5350 5351 5352
		if (IS_ERR(rq)) {
			err = PTR_ERR(rq);
			goto out_ctx;
		}

5353
		err = 0;
5354 5355 5356
		if (engine->init_context)
			err = engine->init_context(rq);

5357
		i915_request_add(rq);
5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374
		if (err)
			goto err_active;
	}

	err = i915_gem_switch_to_kernel_context(i915);
	if (err)
		goto err_active;

	err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
	if (err)
		goto err_active;

	assert_kernel_context_is_current(i915);

	for_each_engine(engine, i915, id) {
		struct i915_vma *state;

5375
		state = to_intel_context(ctx, engine)->state;
5376 5377 5378 5379 5380 5381 5382 5383 5384 5385 5386 5387 5388 5389 5390 5391 5392 5393 5394 5395 5396 5397 5398 5399 5400 5401 5402 5403 5404 5405 5406 5407 5408 5409 5410 5411 5412 5413 5414 5415 5416 5417 5418 5419 5420 5421 5422 5423 5424 5425 5426 5427 5428 5429 5430 5431 5432 5433 5434 5435 5436 5437
		if (!state)
			continue;

		/*
		 * As we will hold a reference to the logical state, it will
		 * not be torn down with the context, and importantly the
		 * object will hold onto its vma (making it possible for a
		 * stray GTT write to corrupt our defaults). Unmap the vma
		 * from the GTT to prevent such accidents and reclaim the
		 * space.
		 */
		err = i915_vma_unbind(state);
		if (err)
			goto err_active;

		err = i915_gem_object_set_to_cpu_domain(state->obj, false);
		if (err)
			goto err_active;

		engine->default_state = i915_gem_object_get(state->obj);
	}

	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
		unsigned int found = intel_engines_has_context_isolation(i915);

		/*
		 * Make sure that classes with multiple engine instances all
		 * share the same basic configuration.
		 */
		for_each_engine(engine, i915, id) {
			unsigned int bit = BIT(engine->uabi_class);
			unsigned int expected = engine->default_state ? bit : 0;

			if ((found & bit) != expected) {
				DRM_ERROR("mismatching default context state for class %d on engine %s\n",
					  engine->uabi_class, engine->name);
			}
		}
	}

out_ctx:
	i915_gem_context_set_closed(ctx);
	i915_gem_context_put(ctx);
	return err;

err_active:
	/*
	 * If we have to abandon now, we expect the engines to be idle
	 * and ready to be torn-down. First try to flush any remaining
	 * request, ensure we are pointing at the kernel context and
	 * then remove it.
	 */
	if (WARN_ON(i915_gem_switch_to_kernel_context(i915)))
		goto out_ctx;

	if (WARN_ON(i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED)))
		goto out_ctx;

	i915_gem_contexts_lost(i915);
	goto out_ctx;
}

5438
int i915_gem_init(struct drm_i915_private *dev_priv)
5439 5440 5441
{
	int ret;

5442 5443
	/* We need to fallback to 4K pages if host doesn't support huge gtt. */
	if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
5444 5445 5446
		mkwrite_device_info(dev_priv)->page_sizes =
			I915_GTT_PAGE_SIZE_4K;

5447
	dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
5448

5449
	if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
5450
		dev_priv->gt.resume = intel_lr_context_resume;
5451
		dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
5452 5453 5454
	} else {
		dev_priv->gt.resume = intel_legacy_submission_resume;
		dev_priv->gt.cleanup_engine = intel_engine_cleanup;
5455 5456
	}

5457 5458 5459 5460
	ret = i915_gem_init_userptr(dev_priv);
	if (ret)
		return ret;

5461 5462 5463 5464
	ret = intel_wopcm_init(&dev_priv->wopcm);
	if (ret)
		return ret;

5465
	ret = intel_uc_init_misc(dev_priv);
5466 5467 5468
	if (ret)
		return ret;

5469 5470 5471 5472 5473 5474
	/* This is just a security blanket to placate dragons.
	 * On some systems, we very sporadically observe that the first TLBs
	 * used by the CS may be stale, despite us poking the TLB reset. If
	 * we hold the forcewake during initialisation these problems
	 * just magically go away.
	 */
5475
	mutex_lock(&dev_priv->drm.struct_mutex);
5476 5477
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

5478
	ret = i915_gem_init_ggtt(dev_priv);
5479 5480 5481 5482
	if (ret) {
		GEM_BUG_ON(ret == -EIO);
		goto err_unlock;
	}
5483

5484
	ret = i915_gem_contexts_init(dev_priv);
5485 5486 5487 5488
	if (ret) {
		GEM_BUG_ON(ret == -EIO);
		goto err_ggtt;
	}
5489

5490
	ret = intel_engines_init(dev_priv);
5491 5492 5493 5494
	if (ret) {
		GEM_BUG_ON(ret == -EIO);
		goto err_context;
	}
5495

5496 5497
	intel_init_gt_powersave(dev_priv);

5498
	ret = intel_uc_init(dev_priv);
5499
	if (ret)
5500
		goto err_pm;
5501

5502 5503 5504 5505
	ret = i915_gem_init_hw(dev_priv);
	if (ret)
		goto err_uc_init;

5506 5507 5508 5509 5510 5511 5512 5513 5514 5515 5516
	/*
	 * Despite its name intel_init_clock_gating applies both display
	 * clock gating workarounds; GT mmio workarounds and the occasional
	 * GT power context workaround. Worse, sometimes it includes a context
	 * register workaround which we need to apply before we record the
	 * default HW state for all contexts.
	 *
	 * FIXME: break up the workarounds and apply them at the right time!
	 */
	intel_init_clock_gating(dev_priv);

5517
	ret = __intel_engines_record_defaults(dev_priv);
5518 5519 5520 5521 5522 5523 5524 5525 5526 5527 5528 5529 5530 5531 5532 5533 5534 5535 5536 5537 5538 5539 5540 5541 5542
	if (ret)
		goto err_init_hw;

	if (i915_inject_load_failure()) {
		ret = -ENODEV;
		goto err_init_hw;
	}

	if (i915_inject_load_failure()) {
		ret = -EIO;
		goto err_init_hw;
	}

	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
	mutex_unlock(&dev_priv->drm.struct_mutex);

	return 0;

	/*
	 * Unwinding is complicated by that we want to handle -EIO to mean
	 * disable GPU submission but keep KMS alive. We want to mark the
	 * HW as irrevisibly wedged, but keep enough state around that the
	 * driver doesn't explode during runtime.
	 */
err_init_hw:
5543 5544 5545 5546 5547 5548
	mutex_unlock(&dev_priv->drm.struct_mutex);

	WARN_ON(i915_gem_suspend(dev_priv));
	i915_gem_suspend_late(dev_priv);

	mutex_lock(&dev_priv->drm.struct_mutex);
5549
	intel_uc_fini_hw(dev_priv);
5550 5551
err_uc_init:
	intel_uc_fini(dev_priv);
5552 5553 5554 5555 5556 5557 5558 5559 5560 5561 5562 5563 5564
err_pm:
	if (ret != -EIO) {
		intel_cleanup_gt_powersave(dev_priv);
		i915_gem_cleanup_engines(dev_priv);
	}
err_context:
	if (ret != -EIO)
		i915_gem_contexts_fini(dev_priv);
err_ggtt:
err_unlock:
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
	mutex_unlock(&dev_priv->drm.struct_mutex);

5565
	intel_uc_fini_misc(dev_priv);
5566

5567 5568 5569
	if (ret != -EIO)
		i915_gem_cleanup_userptr(dev_priv);

5570
	if (ret == -EIO) {
5571 5572
		/*
		 * Allow engine initialisation to fail by marking the GPU as
5573 5574 5575
		 * wedged. But we only want to do this where the GPU is angry,
		 * for all other failure, such as an allocation failure, bail.
		 */
5576
		if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
5577 5578
			i915_load_error(dev_priv,
					"Failed to initialize GPU, declaring it wedged!\n");
5579 5580
			i915_gem_set_wedged(dev_priv);
		}
5581
		ret = 0;
5582 5583
	}

5584
	i915_gem_drain_freed_objects(dev_priv);
5585
	return ret;
5586 5587
}

5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598 5599 5600 5601 5602 5603 5604 5605 5606 5607 5608 5609
void i915_gem_fini(struct drm_i915_private *dev_priv)
{
	i915_gem_suspend_late(dev_priv);

	/* Flush any outstanding unpin_work. */
	i915_gem_drain_workqueue(dev_priv);

	mutex_lock(&dev_priv->drm.struct_mutex);
	intel_uc_fini_hw(dev_priv);
	intel_uc_fini(dev_priv);
	i915_gem_cleanup_engines(dev_priv);
	i915_gem_contexts_fini(dev_priv);
	mutex_unlock(&dev_priv->drm.struct_mutex);

	intel_uc_fini_misc(dev_priv);
	i915_gem_cleanup_userptr(dev_priv);

	i915_gem_drain_freed_objects(dev_priv);

	WARN_ON(!list_empty(&dev_priv->contexts.list));
}

5610 5611 5612 5613 5614
void i915_gem_init_mmio(struct drm_i915_private *i915)
{
	i915_gem_sanitize(i915);
}

5615
void
5616
i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
5617
{
5618
	struct intel_engine_cs *engine;
5619
	enum intel_engine_id id;
5620

5621
	for_each_engine(engine, dev_priv, id)
5622
		dev_priv->gt.cleanup_engine(engine);
5623 5624
}

5625 5626 5627
void
i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
5628
	int i;
5629

5630
	if (INTEL_GEN(dev_priv) >= 7 && !IS_VALLEYVIEW(dev_priv) &&
5631 5632
	    !IS_CHERRYVIEW(dev_priv))
		dev_priv->num_fence_regs = 32;
5633
	else if (INTEL_GEN(dev_priv) >= 4 ||
5634 5635
		 IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
		 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
5636 5637 5638 5639
		dev_priv->num_fence_regs = 16;
	else
		dev_priv->num_fence_regs = 8;

5640
	if (intel_vgpu_active(dev_priv))
5641 5642 5643 5644
		dev_priv->num_fence_regs =
				I915_READ(vgtif_reg(avail_rs.fence_num));

	/* Initialize fence registers to zero */
5645 5646 5647 5648 5649 5650 5651
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
		struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];

		fence->i915 = dev_priv;
		fence->id = i;
		list_add_tail(&fence->link, &dev_priv->mm.fence_list);
	}
5652
	i915_gem_restore_fences(dev_priv);
5653

5654
	i915_gem_detect_bit_6_swizzle(dev_priv);
5655 5656
}

5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672
static void i915_gem_init__mm(struct drm_i915_private *i915)
{
	spin_lock_init(&i915->mm.object_stat_lock);
	spin_lock_init(&i915->mm.obj_lock);
	spin_lock_init(&i915->mm.free_lock);

	init_llist_head(&i915->mm.free_list);

	INIT_LIST_HEAD(&i915->mm.unbound_list);
	INIT_LIST_HEAD(&i915->mm.bound_list);
	INIT_LIST_HEAD(&i915->mm.fence_list);
	INIT_LIST_HEAD(&i915->mm.userfault_list);

	INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
}

5673
int i915_gem_init_early(struct drm_i915_private *dev_priv)
5674
{
5675
	int err = -ENOMEM;
5676

5677 5678
	dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
	if (!dev_priv->objects)
5679 5680
		goto err_out;

5681 5682
	dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
	if (!dev_priv->vmas)
5683 5684
		goto err_objects;

5685 5686 5687 5688
	dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0);
	if (!dev_priv->luts)
		goto err_vmas;

5689
	dev_priv->requests = KMEM_CACHE(i915_request,
5690 5691
					SLAB_HWCACHE_ALIGN |
					SLAB_RECLAIM_ACCOUNT |
5692
					SLAB_TYPESAFE_BY_RCU);
5693
	if (!dev_priv->requests)
5694
		goto err_luts;
5695

5696 5697 5698 5699 5700 5701
	dev_priv->dependencies = KMEM_CACHE(i915_dependency,
					    SLAB_HWCACHE_ALIGN |
					    SLAB_RECLAIM_ACCOUNT);
	if (!dev_priv->dependencies)
		goto err_requests;

5702 5703 5704 5705
	dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
	if (!dev_priv->priorities)
		goto err_dependencies;

5706
	INIT_LIST_HEAD(&dev_priv->gt.timelines);
5707
	INIT_LIST_HEAD(&dev_priv->gt.active_rings);
5708
	INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
5709

5710
	i915_gem_init__mm(dev_priv);
5711

5712
	INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
5713
			  i915_gem_retire_work_handler);
5714
	INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
5715
			  i915_gem_idle_work_handler);
5716
	init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
5717
	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
5718

5719 5720
	atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);

5721
	spin_lock_init(&dev_priv->fb_tracking.lock);
5722

M
Matthew Auld 已提交
5723 5724 5725 5726
	err = i915_gemfs_init(dev_priv);
	if (err)
		DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);

5727 5728
	return 0;

5729 5730
err_dependencies:
	kmem_cache_destroy(dev_priv->dependencies);
5731 5732
err_requests:
	kmem_cache_destroy(dev_priv->requests);
5733 5734
err_luts:
	kmem_cache_destroy(dev_priv->luts);
5735 5736 5737 5738 5739 5740
err_vmas:
	kmem_cache_destroy(dev_priv->vmas);
err_objects:
	kmem_cache_destroy(dev_priv->objects);
err_out:
	return err;
5741
}
5742

5743
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
5744
{
5745
	i915_gem_drain_freed_objects(dev_priv);
5746 5747
	GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
	GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
5748
	WARN_ON(dev_priv->mm.object_count);
5749 5750
	WARN_ON(!list_empty(&dev_priv->gt.timelines));

5751
	kmem_cache_destroy(dev_priv->priorities);
5752
	kmem_cache_destroy(dev_priv->dependencies);
5753
	kmem_cache_destroy(dev_priv->requests);
5754
	kmem_cache_destroy(dev_priv->luts);
5755 5756
	kmem_cache_destroy(dev_priv->vmas);
	kmem_cache_destroy(dev_priv->objects);
5757 5758 5759

	/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
	rcu_barrier();
M
Matthew Auld 已提交
5760 5761

	i915_gemfs_fini(dev_priv);
5762 5763
}

5764 5765
int i915_gem_freeze(struct drm_i915_private *dev_priv)
{
5766 5767 5768
	/* Discard all purgeable objects, let userspace recover those as
	 * required after resuming.
	 */
5769 5770 5771 5772 5773
	i915_gem_shrink_all(dev_priv);

	return 0;
}

5774
int i915_gem_freeze_late(struct drm_i915_private *i915)
5775 5776
{
	struct drm_i915_gem_object *obj;
5777
	struct list_head *phases[] = {
5778 5779
		&i915->mm.unbound_list,
		&i915->mm.bound_list,
5780
		NULL
5781
	}, **phase;
5782

5783 5784
	/*
	 * Called just before we write the hibernation image.
5785 5786 5787 5788 5789 5790 5791 5792
	 *
	 * We need to update the domain tracking to reflect that the CPU
	 * will be accessing all the pages to create and restore from the
	 * hibernation, and so upon restoration those pages will be in the
	 * CPU domain.
	 *
	 * To make sure the hibernation image contains the latest state,
	 * we update that state just before writing out the image.
5793 5794
	 *
	 * To try and reduce the hibernation image, we manually shrink
5795
	 * the objects as well, see i915_gem_freeze()
5796 5797
	 */

5798 5799
	i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND);
	i915_gem_drain_freed_objects(i915);
5800

5801 5802 5803 5804
	mutex_lock(&i915->drm.struct_mutex);
	for (phase = phases; *phase; phase++) {
		list_for_each_entry(obj, *phase, mm.link)
			WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
5805
	}
5806
	mutex_unlock(&i915->drm.struct_mutex);
5807 5808 5809 5810

	return 0;
}

5811
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5812
{
5813
	struct drm_i915_file_private *file_priv = file->driver_priv;
5814
	struct i915_request *request;
5815 5816 5817 5818 5819

	/* Clean up our request list when the client is going away, so that
	 * later retire_requests won't dereference our soon-to-be-gone
	 * file_priv.
	 */
5820
	spin_lock(&file_priv->mm.lock);
5821
	list_for_each_entry(request, &file_priv->mm.request_list, client_link)
5822
		request->file_priv = NULL;
5823
	spin_unlock(&file_priv->mm.lock);
5824 5825
}

5826
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
5827 5828
{
	struct drm_i915_file_private *file_priv;
5829
	int ret;
5830

5831
	DRM_DEBUG("\n");
5832 5833 5834 5835 5836 5837

	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
	if (!file_priv)
		return -ENOMEM;

	file->driver_priv = file_priv;
5838
	file_priv->dev_priv = i915;
5839
	file_priv->file = file;
5840 5841 5842 5843

	spin_lock_init(&file_priv->mm.lock);
	INIT_LIST_HEAD(&file_priv->mm.request_list);

5844
	file_priv->bsd_engine = -1;
5845
	file_priv->hang_timestamp = jiffies;
5846

5847
	ret = i915_gem_context_open(i915, file);
5848 5849
	if (ret)
		kfree(file_priv);
5850

5851
	return ret;
5852 5853
}

5854 5855
/**
 * i915_gem_track_fb - update frontbuffer tracking
5856 5857 5858
 * @old: current GEM buffer for the frontbuffer slots
 * @new: new GEM buffer for the frontbuffer slots
 * @frontbuffer_bits: bitmask of frontbuffer slots
5859 5860 5861 5862
 *
 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
 * from @old and setting them in @new. Both @old and @new can be NULL.
 */
5863 5864 5865 5866
void i915_gem_track_fb(struct drm_i915_gem_object *old,
		       struct drm_i915_gem_object *new,
		       unsigned frontbuffer_bits)
{
5867 5868 5869 5870 5871 5872 5873 5874 5875
	/* Control of individual bits within the mask are guarded by
	 * the owning plane->mutex, i.e. we can never see concurrent
	 * manipulation of individual bits. But since the bitfield as a whole
	 * is updated using RMW, we need to use atomics in order to update
	 * the bits.
	 */
	BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
		     sizeof(atomic_t) * BITS_PER_BYTE);

5876
	if (old) {
5877 5878
		WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
		atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
5879 5880 5881
	}

	if (new) {
5882 5883
		WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
		atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
5884 5885 5886
	}
}

5887 5888
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
5889
i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
5890 5891 5892
			         const void *data, size_t size)
{
	struct drm_i915_gem_object *obj;
5893 5894 5895
	struct file *file;
	size_t offset;
	int err;
5896

5897
	obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
5898
	if (IS_ERR(obj))
5899 5900
		return obj;

5901
	GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
5902

5903 5904 5905 5906 5907 5908
	file = obj->base.filp;
	offset = 0;
	do {
		unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
		struct page *page;
		void *pgdata, *vaddr;
5909

5910 5911 5912 5913 5914
		err = pagecache_write_begin(file, file->f_mapping,
					    offset, len, 0,
					    &page, &pgdata);
		if (err < 0)
			goto fail;
5915

5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929
		vaddr = kmap(page);
		memcpy(vaddr, data, len);
		kunmap(page);

		err = pagecache_write_end(file, file->f_mapping,
					  offset, len, len,
					  page, pgdata);
		if (err < 0)
			goto fail;

		size -= len;
		data += len;
		offset += len;
	} while (size);
5930 5931 5932 5933

	return obj;

fail:
5934
	i915_gem_object_put(obj);
5935
	return ERR_PTR(err);
5936
}
5937 5938 5939 5940 5941 5942

struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
		       unsigned int n,
		       unsigned int *offset)
{
C
Chris Wilson 已提交
5943
	struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
5944 5945 5946 5947 5948
	struct scatterlist *sg;
	unsigned int idx, count;

	might_sleep();
	GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
C
Chris Wilson 已提交
5949
	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995 5996 5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011 6012 6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073

	/* As we iterate forward through the sg, we record each entry in a
	 * radixtree for quick repeated (backwards) lookups. If we have seen
	 * this index previously, we will have an entry for it.
	 *
	 * Initial lookup is O(N), but this is amortized to O(1) for
	 * sequential page access (where each new request is consecutive
	 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
	 * i.e. O(1) with a large constant!
	 */
	if (n < READ_ONCE(iter->sg_idx))
		goto lookup;

	mutex_lock(&iter->lock);

	/* We prefer to reuse the last sg so that repeated lookup of this
	 * (or the subsequent) sg are fast - comparing against the last
	 * sg is faster than going through the radixtree.
	 */

	sg = iter->sg_pos;
	idx = iter->sg_idx;
	count = __sg_page_count(sg);

	while (idx + count <= n) {
		unsigned long exception, i;
		int ret;

		/* If we cannot allocate and insert this entry, or the
		 * individual pages from this range, cancel updating the
		 * sg_idx so that on this lookup we are forced to linearly
		 * scan onwards, but on future lookups we will try the
		 * insertion again (in which case we need to be careful of
		 * the error return reporting that we have already inserted
		 * this index).
		 */
		ret = radix_tree_insert(&iter->radix, idx, sg);
		if (ret && ret != -EEXIST)
			goto scan;

		exception =
			RADIX_TREE_EXCEPTIONAL_ENTRY |
			idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
		for (i = 1; i < count; i++) {
			ret = radix_tree_insert(&iter->radix, idx + i,
						(void *)exception);
			if (ret && ret != -EEXIST)
				goto scan;
		}

		idx += count;
		sg = ____sg_next(sg);
		count = __sg_page_count(sg);
	}

scan:
	iter->sg_pos = sg;
	iter->sg_idx = idx;

	mutex_unlock(&iter->lock);

	if (unlikely(n < idx)) /* insertion completed by another thread */
		goto lookup;

	/* In case we failed to insert the entry into the radixtree, we need
	 * to look beyond the current sg.
	 */
	while (idx + count <= n) {
		idx += count;
		sg = ____sg_next(sg);
		count = __sg_page_count(sg);
	}

	*offset = n - idx;
	return sg;

lookup:
	rcu_read_lock();

	sg = radix_tree_lookup(&iter->radix, n);
	GEM_BUG_ON(!sg);

	/* If this index is in the middle of multi-page sg entry,
	 * the radixtree will contain an exceptional entry that points
	 * to the start of that range. We will return the pointer to
	 * the base page and the offset of this page within the
	 * sg entry's range.
	 */
	*offset = 0;
	if (unlikely(radix_tree_exception(sg))) {
		unsigned long base =
			(unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;

		sg = radix_tree_lookup(&iter->radix, base);
		GEM_BUG_ON(!sg);

		*offset = n - base;
	}

	rcu_read_unlock();

	return sg;
}

struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
{
	struct scatterlist *sg;
	unsigned int offset;

	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));

	sg = i915_gem_object_get_sg(obj, n, &offset);
	return nth_page(sg_page(sg), offset);
}

/* Like i915_gem_object_get_page(), but mark the returned page dirty */
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
			       unsigned int n)
{
	struct page *page;

	page = i915_gem_object_get_page(obj, n);
C
Chris Wilson 已提交
6074
	if (!obj->mm.dirty)
6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089
		set_page_dirty(page);

	return page;
}

dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
				unsigned long n)
{
	struct scatterlist *sg;
	unsigned int offset;

	sg = i915_gem_object_get_sg(obj, n, &offset);
	return sg_dma_address(sg) + (offset << PAGE_SHIFT);
}
6090

6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108 6109 6110 6111 6112 6113 6114 6115 6116 6117 6118 6119 6120 6121 6122 6123 6124 6125
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
{
	struct sg_table *pages;
	int err;

	if (align > obj->base.size)
		return -EINVAL;

	if (obj->ops == &i915_gem_phys_ops)
		return 0;

	if (obj->ops != &i915_gem_object_ops)
		return -EINVAL;

	err = i915_gem_object_unbind(obj);
	if (err)
		return err;

	mutex_lock(&obj->mm.lock);

	if (obj->mm.madv != I915_MADV_WILLNEED) {
		err = -EFAULT;
		goto err_unlock;
	}

	if (obj->mm.quirked) {
		err = -EFAULT;
		goto err_unlock;
	}

	if (obj->mm.mapping) {
		err = -EBUSY;
		goto err_unlock;
	}

6126
	pages = __i915_gem_object_unset_pages(obj);
6127

6128 6129
	obj->ops = &i915_gem_phys_ops;

6130
	err = ____i915_gem_object_get_pages(obj);
6131 6132 6133 6134 6135 6136 6137 6138 6139 6140 6141 6142 6143
	if (err)
		goto err_xfer;

	/* Perma-pin (until release) the physical set of pages */
	__i915_gem_object_pin_pages(obj);

	if (!IS_ERR_OR_NULL(pages))
		i915_gem_object_ops.put_pages(obj, pages);
	mutex_unlock(&obj->mm.lock);
	return 0;

err_xfer:
	obj->ops = &i915_gem_object_ops;
6144 6145 6146 6147 6148
	if (!IS_ERR_OR_NULL(pages)) {
		unsigned int sg_page_sizes = i915_sg_page_sizes(pages->sgl);

		__i915_gem_object_set_pages(obj, pages, sg_page_sizes);
	}
6149 6150 6151 6152 6153
err_unlock:
	mutex_unlock(&obj->mm.lock);
	return err;
}

6154 6155
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/scatterlist.c"
6156
#include "selftests/mock_gem_device.c"
6157
#include "selftests/huge_gem_object.c"
M
Matthew Auld 已提交
6158
#include "selftests/huge_pages.c"
6159
#include "selftests/i915_gem_object.c"
6160
#include "selftests/i915_gem_coherency.c"
6161
#endif