i915_gem.c 161.9 KB
Newer Older
1
/*
2
 * Copyright © 2008-2015 Intel Corporation
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *
 */

28
#include <drm/drmP.h>
29
#include <drm/drm_vma_manager.h>
30
#include <drm/i915_drm.h>
31
#include "i915_drv.h"
32
#include "i915_gem_clflush.h"
33
#include "i915_vgpu.h"
C
Chris Wilson 已提交
34
#include "i915_trace.h"
35
#include "intel_drv.h"
36
#include "intel_frontbuffer.h"
37
#include "intel_mocs.h"
38
#include "intel_workarounds.h"
M
Matthew Auld 已提交
39
#include "i915_gemfs.h"
40
#include <linux/dma-fence-array.h>
41
#include <linux/kthread.h>
42
#include <linux/reservation.h>
43
#include <linux/shmem_fs.h>
44
#include <linux/slab.h>
45
#include <linux/stop_machine.h>
46
#include <linux/swap.h>
J
Jesse Barnes 已提交
47
#include <linux/pci.h>
48
#include <linux/dma-buf.h>
49

50
static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
51

52 53
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
54
	if (obj->cache_dirty)
55 56
		return false;

57
	if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
58 59
		return true;

60
	return obj->pin_global; /* currently in use by HW, keep flushed */
61 62
}

63
static int
64
insert_mappable_node(struct i915_ggtt *ggtt,
65 66 67
                     struct drm_mm_node *node, u32 size)
{
	memset(node, 0, sizeof(*node));
68 69 70 71
	return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
					   size, 0, I915_COLOR_UNEVICTABLE,
					   0, ggtt->mappable_end,
					   DRM_MM_INSERT_LOW);
72 73 74 75 76 77 78 79
}

static void
remove_mappable_node(struct drm_mm_node *node)
{
	drm_mm_remove_node(node);
}

80 81
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
82
				  u64 size)
83
{
84
	spin_lock(&dev_priv->mm.object_stat_lock);
85 86
	dev_priv->mm.object_count++;
	dev_priv->mm.object_memory += size;
87
	spin_unlock(&dev_priv->mm.object_stat_lock);
88 89 90
}

static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
91
				     u64 size)
92
{
93
	spin_lock(&dev_priv->mm.object_stat_lock);
94 95
	dev_priv->mm.object_count--;
	dev_priv->mm.object_memory -= size;
96
	spin_unlock(&dev_priv->mm.object_stat_lock);
97 98
}

99
static int
100
i915_gem_wait_for_error(struct i915_gpu_error *error)
101 102 103
{
	int ret;

104 105
	might_sleep();

106 107 108 109 110
	/*
	 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
	 * userspace. If it takes that long something really bad is going on and
	 * we should simply try to bail out and fail as gracefully as possible.
	 */
111
	ret = wait_event_interruptible_timeout(error->reset_queue,
112
					       !i915_reset_backoff(error),
113
					       I915_RESET_TIMEOUT);
114 115 116 117
	if (ret == 0) {
		DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
		return -EIO;
	} else if (ret < 0) {
118
		return ret;
119 120
	} else {
		return 0;
121
	}
122 123
}

124
int i915_mutex_lock_interruptible(struct drm_device *dev)
125
{
126
	struct drm_i915_private *dev_priv = to_i915(dev);
127 128
	int ret;

129
	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
130 131 132 133 134 135 136 137 138
	if (ret)
		return ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	return 0;
}
139

140 141
static u32 __i915_gem_park(struct drm_i915_private *i915)
{
142 143
	GEM_TRACE("\n");

144 145
	lockdep_assert_held(&i915->drm.struct_mutex);
	GEM_BUG_ON(i915->gt.active_requests);
146
	GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166

	if (!i915->gt.awake)
		return I915_EPOCH_INVALID;

	GEM_BUG_ON(i915->gt.epoch == I915_EPOCH_INVALID);

	/*
	 * Be paranoid and flush a concurrent interrupt to make sure
	 * we don't reactivate any irq tasklets after parking.
	 *
	 * FIXME: Note that even though we have waited for execlists to be idle,
	 * there may still be an in-flight interrupt even though the CSB
	 * is now empty. synchronize_irq() makes sure that a residual interrupt
	 * is completed before we continue, but it doesn't prevent the HW from
	 * raising a spurious interrupt later. To complete the shield we should
	 * coordinate disabling the CS irq with flushing the interrupts.
	 */
	synchronize_irq(i915->drm.irq);

	intel_engines_park(i915);
167
	i915_timelines_park(i915);
168 169

	i915_pmu_gt_parked(i915);
170
	i915_vma_parked(i915);
171 172 173 174 175 176 177 178 179 180 181 182 183 184 185

	i915->gt.awake = false;

	if (INTEL_GEN(i915) >= 6)
		gen6_rps_idle(i915);

	intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ);

	intel_runtime_pm_put(i915);

	return i915->gt.epoch;
}

void i915_gem_park(struct drm_i915_private *i915)
{
186 187
	GEM_TRACE("\n");

188 189 190 191 192 193 194 195 196 197 198 199
	lockdep_assert_held(&i915->drm.struct_mutex);
	GEM_BUG_ON(i915->gt.active_requests);

	if (!i915->gt.awake)
		return;

	/* Defer the actual call to __i915_gem_park() to prevent ping-pongs */
	mod_delayed_work(i915->wq, &i915->gt.idle_work, msecs_to_jiffies(100));
}

void i915_gem_unpark(struct drm_i915_private *i915)
{
200 201
	GEM_TRACE("\n");

202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241
	lockdep_assert_held(&i915->drm.struct_mutex);
	GEM_BUG_ON(!i915->gt.active_requests);

	if (i915->gt.awake)
		return;

	intel_runtime_pm_get_noresume(i915);

	/*
	 * It seems that the DMC likes to transition between the DC states a lot
	 * when there are no connected displays (no active power domains) during
	 * command submission.
	 *
	 * This activity has negative impact on the performance of the chip with
	 * huge latencies observed in the interrupt handler and elsewhere.
	 *
	 * Work around it by grabbing a GT IRQ power domain whilst there is any
	 * GT activity, preventing any DC state transitions.
	 */
	intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);

	i915->gt.awake = true;
	if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
		i915->gt.epoch = 1;

	intel_enable_gt_powersave(i915);
	i915_update_gfx_val(i915);
	if (INTEL_GEN(i915) >= 6)
		gen6_rps_busy(i915);
	i915_pmu_gt_unparked(i915);

	intel_engines_unpark(i915);

	i915_queue_hangcheck(i915);

	queue_delayed_work(i915->wq,
			   &i915->gt.retire_work,
			   round_jiffies_up_relative(HZ));
}

242 243
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
244
			    struct drm_file *file)
245
{
246
	struct drm_i915_private *dev_priv = to_i915(dev);
247
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
248
	struct drm_i915_gem_get_aperture *args = data;
249
	struct i915_vma *vma;
250
	u64 pinned;
251

252
	pinned = ggtt->base.reserved;
253
	mutex_lock(&dev->struct_mutex);
254
	list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
255
		if (i915_vma_is_pinned(vma))
256
			pinned += vma->node.size;
257
	list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
258
		if (i915_vma_is_pinned(vma))
259
			pinned += vma->node.size;
260
	mutex_unlock(&dev->struct_mutex);
261

262
	args->aper_size = ggtt->base.total;
263
	args->aper_available_size = args->aper_size - pinned;
264

265 266 267
	return 0;
}

268
static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
269
{
270
	struct address_space *mapping = obj->base.filp->f_mapping;
271
	drm_dma_handle_t *phys;
272 273
	struct sg_table *st;
	struct scatterlist *sg;
274
	char *vaddr;
275
	int i;
276
	int err;
277

278
	if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
279
		return -EINVAL;
280

281 282 283 284 285
	/* Always aligning to the object size, allows a single allocation
	 * to handle all possible callers, and given typical object sizes,
	 * the alignment of the buddy allocation will naturally match.
	 */
	phys = drm_pci_alloc(obj->base.dev,
286
			     roundup_pow_of_two(obj->base.size),
287 288
			     roundup_pow_of_two(obj->base.size));
	if (!phys)
289
		return -ENOMEM;
290 291

	vaddr = phys->vaddr;
292 293 294 295 296
	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
		struct page *page;
		char *src;

		page = shmem_read_mapping_page(mapping, i);
297
		if (IS_ERR(page)) {
298
			err = PTR_ERR(page);
299 300
			goto err_phys;
		}
301 302 303 304 305 306

		src = kmap_atomic(page);
		memcpy(vaddr, src, PAGE_SIZE);
		drm_clflush_virt_range(vaddr, PAGE_SIZE);
		kunmap_atomic(src);

307
		put_page(page);
308 309 310
		vaddr += PAGE_SIZE;
	}

311
	i915_gem_chipset_flush(to_i915(obj->base.dev));
312 313

	st = kmalloc(sizeof(*st), GFP_KERNEL);
314
	if (!st) {
315
		err = -ENOMEM;
316 317
		goto err_phys;
	}
318 319 320

	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
		kfree(st);
321
		err = -ENOMEM;
322
		goto err_phys;
323 324 325 326 327
	}

	sg = st->sgl;
	sg->offset = 0;
	sg->length = obj->base.size;
328

329
	sg_dma_address(sg) = phys->busaddr;
330 331
	sg_dma_len(sg) = obj->base.size;

332
	obj->phys_handle = phys;
333

334
	__i915_gem_object_set_pages(obj, st, sg->length);
335 336

	return 0;
337 338 339

err_phys:
	drm_pci_free(obj->base.dev, phys);
340 341

	return err;
342 343
}

344 345
static void __start_cpu_write(struct drm_i915_gem_object *obj)
{
346 347
	obj->read_domains = I915_GEM_DOMAIN_CPU;
	obj->write_domain = I915_GEM_DOMAIN_CPU;
348 349 350 351
	if (cpu_write_needs_clflush(obj))
		obj->cache_dirty = true;
}

352
static void
353
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
354 355
				struct sg_table *pages,
				bool needs_clflush)
356
{
C
Chris Wilson 已提交
357
	GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
358

C
Chris Wilson 已提交
359 360
	if (obj->mm.madv == I915_MADV_DONTNEED)
		obj->mm.dirty = false;
361

362
	if (needs_clflush &&
363
	    (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
364
	    !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
365
		drm_clflush_sg(pages);
366

367
	__start_cpu_write(obj);
368 369 370 371 372 373
}

static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
			       struct sg_table *pages)
{
374
	__i915_gem_object_release_shmem(obj, pages, false);
375

C
Chris Wilson 已提交
376
	if (obj->mm.dirty) {
377
		struct address_space *mapping = obj->base.filp->f_mapping;
378
		char *vaddr = obj->phys_handle->vaddr;
379 380 381
		int i;

		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
382 383 384 385 386 387 388 389 390 391 392 393 394
			struct page *page;
			char *dst;

			page = shmem_read_mapping_page(mapping, i);
			if (IS_ERR(page))
				continue;

			dst = kmap_atomic(page);
			drm_clflush_virt_range(vaddr, PAGE_SIZE);
			memcpy(dst, vaddr, PAGE_SIZE);
			kunmap_atomic(dst);

			set_page_dirty(page);
C
Chris Wilson 已提交
395
			if (obj->mm.madv == I915_MADV_WILLNEED)
396
				mark_page_accessed(page);
397
			put_page(page);
398 399
			vaddr += PAGE_SIZE;
		}
C
Chris Wilson 已提交
400
		obj->mm.dirty = false;
401 402
	}

403 404
	sg_free_table(pages);
	kfree(pages);
405 406

	drm_pci_free(obj->base.dev, obj->phys_handle);
407 408 409 410 411
}

static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{
C
Chris Wilson 已提交
412
	i915_gem_object_unpin_pages(obj);
413 414 415 416 417 418 419 420
}

static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
	.get_pages = i915_gem_object_get_pages_phys,
	.put_pages = i915_gem_object_put_pages_phys,
	.release = i915_gem_object_release_phys,
};

421 422
static const struct drm_i915_gem_object_ops i915_gem_object_ops;

423
int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
424 425 426
{
	struct i915_vma *vma;
	LIST_HEAD(still_in_list);
427 428 429
	int ret;

	lockdep_assert_held(&obj->base.dev->struct_mutex);
430

431 432 433 434
	/* Closed vma are removed from the obj->vma_list - but they may
	 * still have an active binding on the object. To remove those we
	 * must wait for all rendering to complete to the object (as unbinding
	 * must anyway), and retire the requests.
435
	 */
436
	ret = i915_gem_object_set_to_cpu_domain(obj, false);
437 438 439
	if (ret)
		return ret;

440 441 442 443 444 445 446 447 448 449 450 451 452
	while ((vma = list_first_entry_or_null(&obj->vma_list,
					       struct i915_vma,
					       obj_link))) {
		list_move_tail(&vma->obj_link, &still_in_list);
		ret = i915_vma_unbind(vma);
		if (ret)
			break;
	}
	list_splice(&still_in_list, &obj->vma_list);

	return ret;
}

453 454 455 456
static long
i915_gem_object_wait_fence(struct dma_fence *fence,
			   unsigned int flags,
			   long timeout,
457
			   struct intel_rps_client *rps_client)
458
{
459
	struct i915_request *rq;
460

461
	BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
462

463 464 465 466 467 468 469 470 471
	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
		return timeout;

	if (!dma_fence_is_i915(fence))
		return dma_fence_wait_timeout(fence,
					      flags & I915_WAIT_INTERRUPTIBLE,
					      timeout);

	rq = to_request(fence);
472
	if (i915_request_completed(rq))
473 474
		goto out;

475 476
	/*
	 * This client is about to stall waiting for the GPU. In many cases
477 478 479 480 481 482 483 484 485 486 487 488 489 490
	 * this is undesirable and limits the throughput of the system, as
	 * many clients cannot continue processing user input/output whilst
	 * blocked. RPS autotuning may take tens of milliseconds to respond
	 * to the GPU load and thus incurs additional latency for the client.
	 * We can circumvent that by promoting the GPU frequency to maximum
	 * before we wait. This makes the GPU throttle up much more quickly
	 * (good for benchmarks and user experience, e.g. window animations),
	 * but at a cost of spending more power processing the workload
	 * (bad for battery). Not all clients even want their results
	 * immediately and for them we should just let the GPU select its own
	 * frequency to maximise efficiency. To prevent a single client from
	 * forcing the clocks too high for the whole system, we only allow
	 * each client to waitboost once in a busy period.
	 */
491
	if (rps_client && !i915_request_started(rq)) {
492
		if (INTEL_GEN(rq->i915) >= 6)
493
			gen6_rps_boost(rq, rps_client);
494 495
	}

496
	timeout = i915_request_wait(rq, flags, timeout);
497 498

out:
499 500
	if (flags & I915_WAIT_LOCKED && i915_request_completed(rq))
		i915_request_retire_upto(rq);
501 502 503 504 505 506 507 508

	return timeout;
}

static long
i915_gem_object_wait_reservation(struct reservation_object *resv,
				 unsigned int flags,
				 long timeout,
509
				 struct intel_rps_client *rps_client)
510
{
511
	unsigned int seq = __read_seqcount_begin(&resv->seq);
512
	struct dma_fence *excl;
513
	bool prune_fences = false;
514 515 516 517

	if (flags & I915_WAIT_ALL) {
		struct dma_fence **shared;
		unsigned int count, i;
518 519
		int ret;

520 521
		ret = reservation_object_get_fences_rcu(resv,
							&excl, &count, &shared);
522 523 524
		if (ret)
			return ret;

525 526 527
		for (i = 0; i < count; i++) {
			timeout = i915_gem_object_wait_fence(shared[i],
							     flags, timeout,
528
							     rps_client);
529
			if (timeout < 0)
530
				break;
531

532 533 534 535 536 537
			dma_fence_put(shared[i]);
		}

		for (; i < count; i++)
			dma_fence_put(shared[i]);
		kfree(shared);
538

539 540 541 542 543 544 545 546 547
		/*
		 * If both shared fences and an exclusive fence exist,
		 * then by construction the shared fences must be later
		 * than the exclusive fence. If we successfully wait for
		 * all the shared fences, we know that the exclusive fence
		 * must all be signaled. If all the shared fences are
		 * signaled, we can prune the array and recover the
		 * floating references on the fences/requests.
		 */
548
		prune_fences = count && timeout >= 0;
549 550
	} else {
		excl = reservation_object_get_excl_rcu(resv);
551 552
	}

553
	if (excl && timeout >= 0)
554 555
		timeout = i915_gem_object_wait_fence(excl, flags, timeout,
						     rps_client);
556 557 558

	dma_fence_put(excl);

559 560
	/*
	 * Opportunistically prune the fences iff we know they have *all* been
561 562 563
	 * signaled and that the reservation object has not been changed (i.e.
	 * no new fences have been added).
	 */
564
	if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
565 566 567 568 569
		if (reservation_object_trylock(resv)) {
			if (!__read_seqcount_retry(&resv->seq, seq))
				reservation_object_add_excl_fence(resv, NULL);
			reservation_object_unlock(resv);
		}
570 571
	}

572
	return timeout;
573 574
}

575 576
static void __fence_set_priority(struct dma_fence *fence,
				 const struct i915_sched_attr *attr)
577
{
578
	struct i915_request *rq;
579 580
	struct intel_engine_cs *engine;

581
	if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
582 583 584 585 586
		return;

	rq = to_request(fence);
	engine = rq->engine;

587 588
	local_bh_disable();
	rcu_read_lock(); /* RCU serialisation for set-wedged protection */
589
	if (engine->schedule)
590
		engine->schedule(rq, attr);
591
	rcu_read_unlock();
592
	local_bh_enable(); /* kick the tasklets if queues were reprioritised */
593 594
}

595 596
static void fence_set_priority(struct dma_fence *fence,
			       const struct i915_sched_attr *attr)
597 598 599 600 601 602 603
{
	/* Recurse once into a fence-array */
	if (dma_fence_is_array(fence)) {
		struct dma_fence_array *array = to_dma_fence_array(fence);
		int i;

		for (i = 0; i < array->num_fences; i++)
604
			__fence_set_priority(array->fences[i], attr);
605
	} else {
606
		__fence_set_priority(fence, attr);
607 608 609 610 611 612
	}
}

int
i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
			      unsigned int flags,
613
			      const struct i915_sched_attr *attr)
614 615 616 617 618 619 620 621 622 623 624 625 626 627
{
	struct dma_fence *excl;

	if (flags & I915_WAIT_ALL) {
		struct dma_fence **shared;
		unsigned int count, i;
		int ret;

		ret = reservation_object_get_fences_rcu(obj->resv,
							&excl, &count, &shared);
		if (ret)
			return ret;

		for (i = 0; i < count; i++) {
628
			fence_set_priority(shared[i], attr);
629 630 631 632 633 634 635 636 637
			dma_fence_put(shared[i]);
		}

		kfree(shared);
	} else {
		excl = reservation_object_get_excl_rcu(obj->resv);
	}

	if (excl) {
638
		fence_set_priority(excl, attr);
639 640 641 642 643
		dma_fence_put(excl);
	}
	return 0;
}

644 645 646 647 648
/**
 * Waits for rendering to the object to be completed
 * @obj: i915 gem object
 * @flags: how to wait (under a lock, for all rendering or just for writes etc)
 * @timeout: how long to wait
649
 * @rps_client: client (user process) to charge for any waitboosting
650
 */
651 652 653 654
int
i915_gem_object_wait(struct drm_i915_gem_object *obj,
		     unsigned int flags,
		     long timeout,
655
		     struct intel_rps_client *rps_client)
656
{
657 658 659 660 661 662 663
	might_sleep();
#if IS_ENABLED(CONFIG_LOCKDEP)
	GEM_BUG_ON(debug_locks &&
		   !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
		   !!(flags & I915_WAIT_LOCKED));
#endif
	GEM_BUG_ON(timeout < 0);
664

665 666
	timeout = i915_gem_object_wait_reservation(obj->resv,
						   flags, timeout,
667
						   rps_client);
668
	return timeout < 0 ? timeout : 0;
669 670 671 672 673 674
}

static struct intel_rps_client *to_rps_client(struct drm_file *file)
{
	struct drm_i915_file_private *fpriv = file->driver_priv;

675
	return &fpriv->rps_client;
676 677
}

678 679 680
static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pwrite *args,
681
		     struct drm_file *file)
682 683
{
	void *vaddr = obj->phys_handle->vaddr + args->offset;
684
	char __user *user_data = u64_to_user_ptr(args->data_ptr);
685 686 687 688

	/* We manually control the domain here and pretend that it
	 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
	 */
689
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
690 691
	if (copy_from_user(vaddr, user_data, args->size))
		return -EFAULT;
692

693
	drm_clflush_virt_range(vaddr, args->size);
694
	i915_gem_chipset_flush(to_i915(obj->base.dev));
695

696
	intel_fb_obj_flush(obj, ORIGIN_CPU);
697
	return 0;
698 699
}

700
void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
701
{
702
	return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
703 704 705 706
}

void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
707
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
708
	kmem_cache_free(dev_priv->objects, obj);
709 710
}

711 712
static int
i915_gem_create(struct drm_file *file,
713
		struct drm_i915_private *dev_priv,
714 715
		uint64_t size,
		uint32_t *handle_p)
716
{
717
	struct drm_i915_gem_object *obj;
718 719
	int ret;
	u32 handle;
720

721
	size = roundup(size, PAGE_SIZE);
722 723
	if (size == 0)
		return -EINVAL;
724 725

	/* Allocate the new object */
726
	obj = i915_gem_object_create(dev_priv, size);
727 728
	if (IS_ERR(obj))
		return PTR_ERR(obj);
729

730
	ret = drm_gem_handle_create(file, &obj->base, &handle);
731
	/* drop reference from allocate - handle holds it now */
C
Chris Wilson 已提交
732
	i915_gem_object_put(obj);
733 734
	if (ret)
		return ret;
735

736
	*handle_p = handle;
737 738 739
	return 0;
}

740 741 742 743 744 745
int
i915_gem_dumb_create(struct drm_file *file,
		     struct drm_device *dev,
		     struct drm_mode_create_dumb *args)
{
	/* have to work out size/pitch and return them */
746
	args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
747
	args->size = args->pitch * args->height;
748
	return i915_gem_create(file, to_i915(dev),
749
			       args->size, &args->handle);
750 751
}

752 753 754 755 756 757
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
	return !(obj->cache_level == I915_CACHE_NONE ||
		 obj->cache_level == I915_CACHE_WT);
}

758 759
/**
 * Creates a new mm object and returns a handle to it.
760 761 762
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
763 764 765 766 767
 */
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file)
{
768
	struct drm_i915_private *dev_priv = to_i915(dev);
769
	struct drm_i915_gem_create *args = data;
770

771
	i915_gem_flush_free_objects(dev_priv);
772

773
	return i915_gem_create(file, dev_priv,
774
			       args->size, &args->handle);
775 776
}

777 778 779 780 781 782 783
static inline enum fb_op_origin
fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
{
	return (domain == I915_GEM_DOMAIN_GTT ?
		obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
}

784
void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
785
{
786 787 788 789 790
	/*
	 * No actual flushing is required for the GTT write domain for reads
	 * from the GTT domain. Writes to it "immediately" go to main memory
	 * as far as we know, so there's no chipset flush. It also doesn't
	 * land in the GPU render cache.
791 792 793 794 795 796 797 798 799 800
	 *
	 * However, we do have to enforce the order so that all writes through
	 * the GTT land before any writes to the device, such as updates to
	 * the GATT itself.
	 *
	 * We also have to wait a bit for the writes to land from the GTT.
	 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
	 * timing. This issue has only been observed when switching quickly
	 * between GTT writes and CPU reads from inside the kernel on recent hw,
	 * and it appears to only affect discrete GTT blocks (i.e. on LLC
801 802
	 * system agents we cannot reproduce this behaviour, until Cannonlake
	 * that was!).
803
	 */
804

805 806
	wmb();

807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
	intel_runtime_pm_get(dev_priv);
	spin_lock_irq(&dev_priv->uncore.lock);

	POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));

	spin_unlock_irq(&dev_priv->uncore.lock);
	intel_runtime_pm_put(dev_priv);
}

static void
flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
{
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
	struct i915_vma *vma;

822
	if (!(obj->write_domain & flush_domains))
823 824
		return;

825
	switch (obj->write_domain) {
826
	case I915_GEM_DOMAIN_GTT:
827
		i915_gem_flush_ggtt_writes(dev_priv);
828 829 830

		intel_fb_obj_flush(obj,
				   fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
831

832
		for_each_ggtt_vma(vma, obj) {
833 834 835 836 837
			if (vma->iomap)
				continue;

			i915_vma_unset_ggtt_write(vma);
		}
838 839 840 841 842
		break;

	case I915_GEM_DOMAIN_CPU:
		i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
		break;
843 844 845 846 847

	case I915_GEM_DOMAIN_RENDER:
		if (gpu_write_needs_clflush(obj))
			obj->cache_dirty = true;
		break;
848 849
	}

850
	obj->write_domain = 0;
851 852
}

853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
			const char *gpu_vaddr, int gpu_offset,
			int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_to_user(cpu_vaddr + cpu_offset,
				     gpu_vaddr + swizzled_gpu_offset,
				     this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

879
static inline int
880 881
__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
			  const char __user *cpu_vaddr,
882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904
			  int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
				       cpu_vaddr + cpu_offset,
				       this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

905 906 907 908 909 910
/*
 * Pins the specified object's pages and synchronizes the object with
 * GPU accesses. Sets needs_clflush to non-zero if the caller should
 * flush the object from the CPU cache.
 */
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
911
				    unsigned int *needs_clflush)
912 913 914
{
	int ret;

915
	lockdep_assert_held(&obj->base.dev->struct_mutex);
916

917
	*needs_clflush = 0;
918 919
	if (!i915_gem_object_has_struct_page(obj))
		return -ENODEV;
920

921 922 923 924 925
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
926 927 928
	if (ret)
		return ret;

C
Chris Wilson 已提交
929
	ret = i915_gem_object_pin_pages(obj);
930 931 932
	if (ret)
		return ret;

933 934
	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
	    !static_cpu_has(X86_FEATURE_CLFLUSH)) {
935 936 937 938 939 940 941
		ret = i915_gem_object_set_to_cpu_domain(obj, false);
		if (ret)
			goto err_unpin;
		else
			goto out;
	}

942
	flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
943

944 945 946 947 948
	/* If we're not in the cpu read domain, set ourself into the gtt
	 * read domain and manually flush cachelines (if required). This
	 * optimizes for the case when the gpu will dirty the data
	 * anyway again before the next pread happens.
	 */
949
	if (!obj->cache_dirty &&
950
	    !(obj->read_domains & I915_GEM_DOMAIN_CPU))
951
		*needs_clflush = CLFLUSH_BEFORE;
952

953
out:
954
	/* return with the pages pinned */
955
	return 0;
956 957 958 959

err_unpin:
	i915_gem_object_unpin_pages(obj);
	return ret;
960 961 962 963 964 965 966
}

int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
				     unsigned int *needs_clflush)
{
	int ret;

967 968
	lockdep_assert_held(&obj->base.dev->struct_mutex);

969 970 971 972
	*needs_clflush = 0;
	if (!i915_gem_object_has_struct_page(obj))
		return -ENODEV;

973 974 975 976 977 978
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   I915_WAIT_ALL,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
979 980 981
	if (ret)
		return ret;

C
Chris Wilson 已提交
982
	ret = i915_gem_object_pin_pages(obj);
983 984 985
	if (ret)
		return ret;

986 987
	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
	    !static_cpu_has(X86_FEATURE_CLFLUSH)) {
988 989 990 991 992 993 994
		ret = i915_gem_object_set_to_cpu_domain(obj, true);
		if (ret)
			goto err_unpin;
		else
			goto out;
	}

995
	flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
996

997 998 999 1000 1001
	/* If we're not in the cpu write domain, set ourself into the
	 * gtt write domain and manually flush cachelines (as required).
	 * This optimizes for the case when the gpu will use the data
	 * right away and we therefore have to clflush anyway.
	 */
1002
	if (!obj->cache_dirty) {
1003
		*needs_clflush |= CLFLUSH_AFTER;
1004

1005 1006 1007 1008
		/*
		 * Same trick applies to invalidate partially written
		 * cachelines read before writing.
		 */
1009
		if (!(obj->read_domains & I915_GEM_DOMAIN_CPU))
1010 1011
			*needs_clflush |= CLFLUSH_BEFORE;
	}
1012

1013
out:
1014
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
C
Chris Wilson 已提交
1015
	obj->mm.dirty = true;
1016
	/* return with the pages pinned */
1017
	return 0;
1018 1019 1020 1021

err_unpin:
	i915_gem_object_unpin_pages(obj);
	return ret;
1022 1023
}

1024 1025 1026 1027
static void
shmem_clflush_swizzled_range(char *addr, unsigned long length,
			     bool swizzled)
{
1028
	if (unlikely(swizzled)) {
1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045
		unsigned long start = (unsigned long) addr;
		unsigned long end = (unsigned long) addr + length;

		/* For swizzling simply ensure that we always flush both
		 * channels. Lame, but simple and it works. Swizzled
		 * pwrite/pread is far from a hotpath - current userspace
		 * doesn't use it at all. */
		start = round_down(start, 128);
		end = round_up(end, 128);

		drm_clflush_virt_range((void *)start, end - start);
	} else {
		drm_clflush_virt_range(addr, length);
	}

}

1046 1047 1048
/* Only difference to the fast-path function is that this can handle bit17
 * and uses non-atomic copy and kmap functions. */
static int
1049
shmem_pread_slow(struct page *page, int offset, int length,
1050 1051 1052 1053 1054 1055 1056 1057
		 char __user *user_data,
		 bool page_do_bit17_swizzling, bool needs_clflush)
{
	char *vaddr;
	int ret;

	vaddr = kmap(page);
	if (needs_clflush)
1058
		shmem_clflush_swizzled_range(vaddr + offset, length,
1059
					     page_do_bit17_swizzling);
1060 1061

	if (page_do_bit17_swizzling)
1062
		ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
1063
	else
1064
		ret = __copy_to_user(user_data, vaddr + offset, length);
1065 1066
	kunmap(page);

1067
	return ret ? - EFAULT : 0;
1068 1069
}

1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145
static int
shmem_pread(struct page *page, int offset, int length, char __user *user_data,
	    bool page_do_bit17_swizzling, bool needs_clflush)
{
	int ret;

	ret = -ENODEV;
	if (!page_do_bit17_swizzling) {
		char *vaddr = kmap_atomic(page);

		if (needs_clflush)
			drm_clflush_virt_range(vaddr + offset, length);
		ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
		kunmap_atomic(vaddr);
	}
	if (ret == 0)
		return 0;

	return shmem_pread_slow(page, offset, length, user_data,
				page_do_bit17_swizzling, needs_clflush);
}

static int
i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pread *args)
{
	char __user *user_data;
	u64 remain;
	unsigned int obj_do_bit17_swizzling;
	unsigned int needs_clflush;
	unsigned int idx, offset;
	int ret;

	obj_do_bit17_swizzling = 0;
	if (i915_gem_object_needs_bit17_swizzle(obj))
		obj_do_bit17_swizzling = BIT(17);

	ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
	if (ret)
		return ret;

	ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
	mutex_unlock(&obj->base.dev->struct_mutex);
	if (ret)
		return ret;

	remain = args->size;
	user_data = u64_to_user_ptr(args->data_ptr);
	offset = offset_in_page(args->offset);
	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
		struct page *page = i915_gem_object_get_page(obj, idx);
		int length;

		length = remain;
		if (offset + length > PAGE_SIZE)
			length = PAGE_SIZE - offset;

		ret = shmem_pread(page, offset, length, user_data,
				  page_to_phys(page) & obj_do_bit17_swizzling,
				  needs_clflush);
		if (ret)
			break;

		remain -= length;
		user_data += length;
		offset = 0;
	}

	i915_gem_obj_finish_shmem_access(obj);
	return ret;
}

static inline bool
gtt_user_read(struct io_mapping *mapping,
	      loff_t base, int offset,
	      char __user *user_data, int length)
1146
{
1147
	void __iomem *vaddr;
1148
	unsigned long unwritten;
1149 1150

	/* We can use the cpu mem copy function because this is X86. */
1151 1152 1153 1154
	vaddr = io_mapping_map_atomic_wc(mapping, base);
	unwritten = __copy_to_user_inatomic(user_data,
					    (void __force *)vaddr + offset,
					    length);
1155 1156
	io_mapping_unmap_atomic(vaddr);
	if (unwritten) {
1157 1158 1159 1160
		vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
		unwritten = copy_to_user(user_data,
					 (void __force *)vaddr + offset,
					 length);
1161 1162
		io_mapping_unmap(vaddr);
	}
1163 1164 1165 1166
	return unwritten;
}

static int
1167 1168
i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
		   const struct drm_i915_gem_pread *args)
1169
{
1170 1171
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	struct i915_ggtt *ggtt = &i915->ggtt;
1172
	struct drm_mm_node node;
1173 1174 1175
	struct i915_vma *vma;
	void __user *user_data;
	u64 remain, offset;
1176 1177
	int ret;

1178 1179 1180 1181 1182 1183
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
	if (ret)
		return ret;

	intel_runtime_pm_get(i915);
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1184 1185 1186
				       PIN_MAPPABLE |
				       PIN_NONFAULT |
				       PIN_NONBLOCK);
1187 1188 1189
	if (!IS_ERR(vma)) {
		node.start = i915_ggtt_offset(vma);
		node.allocated = false;
1190
		ret = i915_vma_put_fence(vma);
1191 1192 1193 1194 1195
		if (ret) {
			i915_vma_unpin(vma);
			vma = ERR_PTR(ret);
		}
	}
C
Chris Wilson 已提交
1196
	if (IS_ERR(vma)) {
1197
		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1198
		if (ret)
1199 1200
			goto out_unlock;
		GEM_BUG_ON(!node.allocated);
1201 1202 1203 1204 1205 1206
	}

	ret = i915_gem_object_set_to_gtt_domain(obj, false);
	if (ret)
		goto out_unpin;

1207
	mutex_unlock(&i915->drm.struct_mutex);
1208

1209 1210 1211
	user_data = u64_to_user_ptr(args->data_ptr);
	remain = args->size;
	offset = args->offset;
1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227

	while (remain > 0) {
		/* Operation in this page
		 *
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
		 */
		u32 page_base = node.start;
		unsigned page_offset = offset_in_page(offset);
		unsigned page_length = PAGE_SIZE - page_offset;
		page_length = remain < page_length ? remain : page_length;
		if (node.allocated) {
			wmb();
			ggtt->base.insert_page(&ggtt->base,
					       i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1228
					       node.start, I915_CACHE_NONE, 0);
1229 1230 1231 1232
			wmb();
		} else {
			page_base += offset & PAGE_MASK;
		}
1233

1234
		if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
1235
				  user_data, page_length)) {
1236 1237 1238 1239 1240 1241 1242 1243 1244
			ret = -EFAULT;
			break;
		}

		remain -= page_length;
		user_data += page_length;
		offset += page_length;
	}

1245
	mutex_lock(&i915->drm.struct_mutex);
1246 1247 1248 1249
out_unpin:
	if (node.allocated) {
		wmb();
		ggtt->base.clear_range(&ggtt->base,
1250
				       node.start, node.size);
1251 1252
		remove_mappable_node(&node);
	} else {
C
Chris Wilson 已提交
1253
		i915_vma_unpin(vma);
1254
	}
1255 1256 1257
out_unlock:
	intel_runtime_pm_put(i915);
	mutex_unlock(&i915->drm.struct_mutex);
1258

1259 1260 1261
	return ret;
}

1262 1263
/**
 * Reads data from the object referenced by handle.
1264 1265 1266
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
1267 1268 1269 1270 1271
 *
 * On error, the contents of *data are undefined.
 */
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1272
		     struct drm_file *file)
1273 1274
{
	struct drm_i915_gem_pread *args = data;
1275
	struct drm_i915_gem_object *obj;
1276
	int ret;
1277

1278 1279 1280 1281
	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_WRITE,
1282
		       u64_to_user_ptr(args->data_ptr),
1283 1284 1285
		       args->size))
		return -EFAULT;

1286
	obj = i915_gem_object_lookup(file, args->handle);
1287 1288
	if (!obj)
		return -ENOENT;
1289

1290
	/* Bounds check source.  */
1291
	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
C
Chris Wilson 已提交
1292
		ret = -EINVAL;
1293
		goto out;
C
Chris Wilson 已提交
1294 1295
	}

C
Chris Wilson 已提交
1296 1297
	trace_i915_gem_object_pread(obj, args->offset, args->size);

1298 1299 1300 1301
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE,
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
1302
	if (ret)
1303
		goto out;
1304

1305
	ret = i915_gem_object_pin_pages(obj);
1306
	if (ret)
1307
		goto out;
1308

1309
	ret = i915_gem_shmem_pread(obj, args);
1310
	if (ret == -EFAULT || ret == -ENODEV)
1311
		ret = i915_gem_gtt_pread(obj, args);
1312

1313 1314
	i915_gem_object_unpin_pages(obj);
out:
C
Chris Wilson 已提交
1315
	i915_gem_object_put(obj);
1316
	return ret;
1317 1318
}

1319 1320
/* This is the fast write path which cannot handle
 * page faults in the source data
1321
 */
1322

1323 1324 1325 1326
static inline bool
ggtt_write(struct io_mapping *mapping,
	   loff_t base, int offset,
	   char __user *user_data, int length)
1327
{
1328
	void __iomem *vaddr;
1329
	unsigned long unwritten;
1330

1331
	/* We can use the cpu mem copy function because this is X86. */
1332 1333
	vaddr = io_mapping_map_atomic_wc(mapping, base);
	unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
1334
						      user_data, length);
1335 1336
	io_mapping_unmap_atomic(vaddr);
	if (unwritten) {
1337 1338 1339
		vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
		unwritten = copy_from_user((void __force *)vaddr + offset,
					   user_data, length);
1340 1341
		io_mapping_unmap(vaddr);
	}
1342 1343 1344 1345

	return unwritten;
}

1346 1347 1348
/**
 * This is the fast pwrite path, where we copy the data directly from the
 * user into the GTT, uncached.
1349
 * @obj: i915 GEM object
1350
 * @args: pwrite arguments structure
1351
 */
1352
static int
1353 1354
i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
			 const struct drm_i915_gem_pwrite *args)
1355
{
1356
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
1357 1358
	struct i915_ggtt *ggtt = &i915->ggtt;
	struct drm_mm_node node;
1359 1360 1361
	struct i915_vma *vma;
	u64 remain, offset;
	void __user *user_data;
1362
	int ret;
1363

1364 1365 1366
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
	if (ret)
		return ret;
D
Daniel Vetter 已提交
1367

1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384
	if (i915_gem_object_has_struct_page(obj)) {
		/*
		 * Avoid waking the device up if we can fallback, as
		 * waking/resuming is very slow (worst-case 10-100 ms
		 * depending on PCI sleeps and our own resume time).
		 * This easily dwarfs any performance advantage from
		 * using the cache bypass of indirect GGTT access.
		 */
		if (!intel_runtime_pm_get_if_in_use(i915)) {
			ret = -EFAULT;
			goto out_unlock;
		}
	} else {
		/* No backing pages, no fallback, we must force GGTT access */
		intel_runtime_pm_get(i915);
	}

C
Chris Wilson 已提交
1385
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1386 1387 1388
				       PIN_MAPPABLE |
				       PIN_NONFAULT |
				       PIN_NONBLOCK);
1389 1390 1391
	if (!IS_ERR(vma)) {
		node.start = i915_ggtt_offset(vma);
		node.allocated = false;
1392
		ret = i915_vma_put_fence(vma);
1393 1394 1395 1396 1397
		if (ret) {
			i915_vma_unpin(vma);
			vma = ERR_PTR(ret);
		}
	}
C
Chris Wilson 已提交
1398
	if (IS_ERR(vma)) {
1399
		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1400
		if (ret)
1401
			goto out_rpm;
1402
		GEM_BUG_ON(!node.allocated);
1403
	}
D
Daniel Vetter 已提交
1404 1405 1406 1407 1408

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		goto out_unpin;

1409 1410
	mutex_unlock(&i915->drm.struct_mutex);

1411
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1412

1413 1414 1415 1416
	user_data = u64_to_user_ptr(args->data_ptr);
	offset = args->offset;
	remain = args->size;
	while (remain) {
1417 1418
		/* Operation in this page
		 *
1419 1420 1421
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
1422
		 */
1423
		u32 page_base = node.start;
1424 1425
		unsigned int page_offset = offset_in_page(offset);
		unsigned int page_length = PAGE_SIZE - page_offset;
1426 1427 1428 1429 1430 1431 1432 1433 1434 1435
		page_length = remain < page_length ? remain : page_length;
		if (node.allocated) {
			wmb(); /* flush the write before we modify the GGTT */
			ggtt->base.insert_page(&ggtt->base,
					       i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
					       node.start, I915_CACHE_NONE, 0);
			wmb(); /* flush modifications to the GGTT (insert_page) */
		} else {
			page_base += offset & PAGE_MASK;
		}
1436
		/* If we get a fault while copying data, then (presumably) our
1437 1438
		 * source page isn't available.  Return the error and we'll
		 * retry in the slow path.
1439 1440
		 * If the object is non-shmem backed, we retry again with the
		 * path that handles page fault.
1441
		 */
1442
		if (ggtt_write(&ggtt->iomap, page_base, page_offset,
1443 1444 1445
			       user_data, page_length)) {
			ret = -EFAULT;
			break;
D
Daniel Vetter 已提交
1446
		}
1447

1448 1449 1450
		remain -= page_length;
		user_data += page_length;
		offset += page_length;
1451
	}
1452
	intel_fb_obj_flush(obj, ORIGIN_CPU);
1453 1454

	mutex_lock(&i915->drm.struct_mutex);
D
Daniel Vetter 已提交
1455
out_unpin:
1456 1457 1458
	if (node.allocated) {
		wmb();
		ggtt->base.clear_range(&ggtt->base,
1459
				       node.start, node.size);
1460 1461
		remove_mappable_node(&node);
	} else {
C
Chris Wilson 已提交
1462
		i915_vma_unpin(vma);
1463
	}
1464
out_rpm:
1465
	intel_runtime_pm_put(i915);
1466
out_unlock:
1467
	mutex_unlock(&i915->drm.struct_mutex);
1468
	return ret;
1469 1470
}

1471
static int
1472
shmem_pwrite_slow(struct page *page, int offset, int length,
1473 1474 1475 1476
		  char __user *user_data,
		  bool page_do_bit17_swizzling,
		  bool needs_clflush_before,
		  bool needs_clflush_after)
1477
{
1478 1479
	char *vaddr;
	int ret;
1480

1481
	vaddr = kmap(page);
1482
	if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1483
		shmem_clflush_swizzled_range(vaddr + offset, length,
1484
					     page_do_bit17_swizzling);
1485
	if (page_do_bit17_swizzling)
1486 1487
		ret = __copy_from_user_swizzled(vaddr, offset, user_data,
						length);
1488
	else
1489
		ret = __copy_from_user(vaddr + offset, user_data, length);
1490
	if (needs_clflush_after)
1491
		shmem_clflush_swizzled_range(vaddr + offset, length,
1492
					     page_do_bit17_swizzling);
1493
	kunmap(page);
1494

1495
	return ret ? -EFAULT : 0;
1496 1497
}

1498 1499 1500 1501 1502
/* Per-page copy function for the shmem pwrite fastpath.
 * Flushes invalid cachelines before writing to the target if
 * needs_clflush_before is set and flushes out any written cachelines after
 * writing if needs_clflush is set.
 */
1503
static int
1504 1505 1506 1507
shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
	     bool page_do_bit17_swizzling,
	     bool needs_clflush_before,
	     bool needs_clflush_after)
1508
{
1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540
	int ret;

	ret = -ENODEV;
	if (!page_do_bit17_swizzling) {
		char *vaddr = kmap_atomic(page);

		if (needs_clflush_before)
			drm_clflush_virt_range(vaddr + offset, len);
		ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
		if (needs_clflush_after)
			drm_clflush_virt_range(vaddr + offset, len);

		kunmap_atomic(vaddr);
	}
	if (ret == 0)
		return ret;

	return shmem_pwrite_slow(page, offset, len, user_data,
				 page_do_bit17_swizzling,
				 needs_clflush_before,
				 needs_clflush_after);
}

static int
i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
		      const struct drm_i915_gem_pwrite *args)
{
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	void __user *user_data;
	u64 remain;
	unsigned int obj_do_bit17_swizzling;
	unsigned int partial_cacheline_write;
1541
	unsigned int needs_clflush;
1542 1543
	unsigned int offset, idx;
	int ret;
1544

1545
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1546 1547 1548
	if (ret)
		return ret;

1549 1550 1551 1552
	ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
	mutex_unlock(&i915->drm.struct_mutex);
	if (ret)
		return ret;
1553

1554 1555 1556
	obj_do_bit17_swizzling = 0;
	if (i915_gem_object_needs_bit17_swizzle(obj))
		obj_do_bit17_swizzling = BIT(17);
1557

1558 1559 1560 1561 1562 1563 1564
	/* If we don't overwrite a cacheline completely we need to be
	 * careful to have up-to-date data by first clflushing. Don't
	 * overcomplicate things and flush the entire patch.
	 */
	partial_cacheline_write = 0;
	if (needs_clflush & CLFLUSH_BEFORE)
		partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
1565

1566 1567 1568 1569 1570 1571
	user_data = u64_to_user_ptr(args->data_ptr);
	remain = args->size;
	offset = offset_in_page(args->offset);
	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
		struct page *page = i915_gem_object_get_page(obj, idx);
		int length;
1572

1573 1574 1575
		length = remain;
		if (offset + length > PAGE_SIZE)
			length = PAGE_SIZE - offset;
1576

1577 1578 1579 1580
		ret = shmem_pwrite(page, offset, length, user_data,
				   page_to_phys(page) & obj_do_bit17_swizzling,
				   (offset | length) & partial_cacheline_write,
				   needs_clflush & CLFLUSH_AFTER);
1581
		if (ret)
1582
			break;
1583

1584 1585 1586
		remain -= length;
		user_data += length;
		offset = 0;
1587
	}
1588

1589
	intel_fb_obj_flush(obj, ORIGIN_CPU);
1590
	i915_gem_obj_finish_shmem_access(obj);
1591
	return ret;
1592 1593 1594 1595
}

/**
 * Writes data to the object referenced by handle.
1596 1597 1598
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1599 1600 1601 1602 1603
 *
 * On error, the contents of the buffer that were to be modified are undefined.
 */
int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1604
		      struct drm_file *file)
1605 1606
{
	struct drm_i915_gem_pwrite *args = data;
1607
	struct drm_i915_gem_object *obj;
1608 1609 1610 1611 1612 1613
	int ret;

	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_READ,
1614
		       u64_to_user_ptr(args->data_ptr),
1615 1616 1617
		       args->size))
		return -EFAULT;

1618
	obj = i915_gem_object_lookup(file, args->handle);
1619 1620
	if (!obj)
		return -ENOENT;
1621

1622
	/* Bounds check destination. */
1623
	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
C
Chris Wilson 已提交
1624
		ret = -EINVAL;
1625
		goto err;
C
Chris Wilson 已提交
1626 1627
	}

C
Chris Wilson 已提交
1628 1629
	trace_i915_gem_object_pwrite(obj, args->offset, args->size);

1630 1631 1632 1633 1634 1635
	ret = -ENODEV;
	if (obj->ops->pwrite)
		ret = obj->ops->pwrite(obj, args);
	if (ret != -ENODEV)
		goto err;

1636 1637 1638 1639 1640
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_ALL,
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
1641 1642 1643
	if (ret)
		goto err;

1644
	ret = i915_gem_object_pin_pages(obj);
1645
	if (ret)
1646
		goto err;
1647

D
Daniel Vetter 已提交
1648
	ret = -EFAULT;
1649 1650 1651 1652 1653 1654
	/* We can only do the GTT pwrite on untiled buffers, as otherwise
	 * it would end up going through the fenced access, and we'll get
	 * different detiling behavior between reading and writing.
	 * pread/pwrite currently are reading and writing from the CPU
	 * perspective, requiring manual detiling by the client.
	 */
1655
	if (!i915_gem_object_has_struct_page(obj) ||
1656
	    cpu_write_needs_clflush(obj))
D
Daniel Vetter 已提交
1657 1658
		/* Note that the gtt paths might fail with non-page-backed user
		 * pointers (e.g. gtt mappings when moving data between
1659 1660
		 * textures). Fallback to the shmem path in that case.
		 */
1661
		ret = i915_gem_gtt_pwrite_fast(obj, args);
1662

1663
	if (ret == -EFAULT || ret == -ENOSPC) {
1664 1665
		if (obj->phys_handle)
			ret = i915_gem_phys_pwrite(obj, args, file);
1666
		else
1667
			ret = i915_gem_shmem_pwrite(obj, args);
1668
	}
1669

1670
	i915_gem_object_unpin_pages(obj);
1671
err:
C
Chris Wilson 已提交
1672
	i915_gem_object_put(obj);
1673
	return ret;
1674 1675
}

1676 1677 1678 1679 1680 1681
static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *i915;
	struct list_head *list;
	struct i915_vma *vma;

1682 1683
	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));

1684
	for_each_ggtt_vma(vma, obj) {
1685 1686 1687 1688 1689 1690 1691 1692 1693 1694
		if (i915_vma_is_active(vma))
			continue;

		if (!drm_mm_node_allocated(&vma->node))
			continue;

		list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
	}

	i915 = to_i915(obj->base.dev);
1695
	spin_lock(&i915->mm.obj_lock);
1696
	list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
1697 1698
	list_move_tail(&obj->mm.link, list);
	spin_unlock(&i915->mm.obj_lock);
1699 1700
}

1701
/**
1702 1703
 * Called when user space prepares to use an object with the CPU, either
 * through the mmap ioctl's mapping or a GTT mapping.
1704 1705 1706
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1707 1708 1709
 */
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1710
			  struct drm_file *file)
1711 1712
{
	struct drm_i915_gem_set_domain *args = data;
1713
	struct drm_i915_gem_object *obj;
1714 1715
	uint32_t read_domains = args->read_domains;
	uint32_t write_domain = args->write_domain;
1716
	int err;
1717

1718
	/* Only handle setting domains to types used by the CPU. */
1719
	if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
1720 1721 1722 1723 1724 1725 1726 1727
		return -EINVAL;

	/* Having something in the write domain implies it's in the read
	 * domain, and only that read domain.  Enforce that in the request.
	 */
	if (write_domain != 0 && read_domains != write_domain)
		return -EINVAL;

1728
	obj = i915_gem_object_lookup(file, args->handle);
1729 1730
	if (!obj)
		return -ENOENT;
1731

1732 1733 1734 1735
	/* Try to flush the object off the GPU without holding the lock.
	 * We will repeat the flush holding the lock in the normal manner
	 * to catch cases where we are gazumped.
	 */
1736
	err = i915_gem_object_wait(obj,
1737 1738 1739 1740
				   I915_WAIT_INTERRUPTIBLE |
				   (write_domain ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
1741
	if (err)
C
Chris Wilson 已提交
1742
		goto out;
1743

T
Tina Zhang 已提交
1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756
	/*
	 * Proxy objects do not control access to the backing storage, ergo
	 * they cannot be used as a means to manipulate the cache domain
	 * tracking for that backing storage. The proxy object is always
	 * considered to be outside of any cache domain.
	 */
	if (i915_gem_object_is_proxy(obj)) {
		err = -ENXIO;
		goto out;
	}

	/*
	 * Flush and acquire obj->pages so that we are coherent through
1757 1758 1759 1760 1761 1762 1763 1764 1765
	 * direct access in memory with previous cached writes through
	 * shmemfs and that our cache domain tracking remains valid.
	 * For example, if the obj->filp was moved to swap without us
	 * being notified and releasing the pages, we would mistakenly
	 * continue to assume that the obj remained out of the CPU cached
	 * domain.
	 */
	err = i915_gem_object_pin_pages(obj);
	if (err)
C
Chris Wilson 已提交
1766
		goto out;
1767 1768 1769

	err = i915_mutex_lock_interruptible(dev);
	if (err)
C
Chris Wilson 已提交
1770
		goto out_unpin;
1771

1772 1773 1774 1775
	if (read_domains & I915_GEM_DOMAIN_WC)
		err = i915_gem_object_set_to_wc_domain(obj, write_domain);
	else if (read_domains & I915_GEM_DOMAIN_GTT)
		err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
1776
	else
1777
		err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
1778

1779 1780
	/* And bump the LRU for this access */
	i915_gem_object_bump_inactive_ggtt(obj);
1781

1782
	mutex_unlock(&dev->struct_mutex);
1783

1784
	if (write_domain != 0)
1785 1786
		intel_fb_obj_invalidate(obj,
					fb_write_origin(obj, write_domain));
1787

C
Chris Wilson 已提交
1788
out_unpin:
1789
	i915_gem_object_unpin_pages(obj);
C
Chris Wilson 已提交
1790 1791
out:
	i915_gem_object_put(obj);
1792
	return err;
1793 1794 1795 1796
}

/**
 * Called when user space has done writes to this buffer
1797 1798 1799
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1800 1801 1802
 */
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1803
			 struct drm_file *file)
1804 1805
{
	struct drm_i915_gem_sw_finish *args = data;
1806
	struct drm_i915_gem_object *obj;
1807

1808
	obj = i915_gem_object_lookup(file, args->handle);
1809 1810
	if (!obj)
		return -ENOENT;
1811

T
Tina Zhang 已提交
1812 1813 1814 1815 1816
	/*
	 * Proxy objects are barred from CPU access, so there is no
	 * need to ban sw_finish as it is a nop.
	 */

1817
	/* Pinned buffers may be scanout, so flush the cache */
1818
	i915_gem_object_flush_if_display(obj);
C
Chris Wilson 已提交
1819
	i915_gem_object_put(obj);
1820 1821

	return 0;
1822 1823 1824
}

/**
1825 1826 1827 1828 1829
 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
 *			 it is mapped to.
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1830 1831 1832
 *
 * While the mapping holds a reference on the contents of the object, it doesn't
 * imply a ref on the object itself.
1833 1834 1835 1836 1837 1838 1839 1840 1841 1842
 *
 * IMPORTANT:
 *
 * DRM driver writers who look a this function as an example for how to do GEM
 * mmap support, please don't implement mmap support like here. The modern way
 * to implement DRM mmap support is with an mmap offset ioctl (like
 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
 * That way debug tooling like valgrind will understand what's going on, hiding
 * the mmap call in a driver private ioctl will break that. The i915 driver only
 * does cpu mmaps this way because we didn't know better.
1843 1844 1845
 */
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1846
		    struct drm_file *file)
1847 1848
{
	struct drm_i915_gem_mmap *args = data;
1849
	struct drm_i915_gem_object *obj;
1850 1851
	unsigned long addr;

1852 1853 1854
	if (args->flags & ~(I915_MMAP_WC))
		return -EINVAL;

1855
	if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1856 1857
		return -ENODEV;

1858 1859
	obj = i915_gem_object_lookup(file, args->handle);
	if (!obj)
1860
		return -ENOENT;
1861

1862 1863 1864
	/* prime objects have no backing filp to GEM mmap
	 * pages from.
	 */
1865
	if (!obj->base.filp) {
C
Chris Wilson 已提交
1866
		i915_gem_object_put(obj);
1867
		return -ENXIO;
1868 1869
	}

1870
	addr = vm_mmap(obj->base.filp, 0, args->size,
1871 1872
		       PROT_READ | PROT_WRITE, MAP_SHARED,
		       args->offset);
1873 1874 1875 1876
	if (args->flags & I915_MMAP_WC) {
		struct mm_struct *mm = current->mm;
		struct vm_area_struct *vma;

1877
		if (down_write_killable(&mm->mmap_sem)) {
C
Chris Wilson 已提交
1878
			i915_gem_object_put(obj);
1879 1880
			return -EINTR;
		}
1881 1882 1883 1884 1885 1886 1887
		vma = find_vma(mm, addr);
		if (vma)
			vma->vm_page_prot =
				pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
		else
			addr = -ENOMEM;
		up_write(&mm->mmap_sem);
1888 1889

		/* This may race, but that's ok, it only gets set */
1890
		WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
1891
	}
C
Chris Wilson 已提交
1892
	i915_gem_object_put(obj);
1893 1894 1895 1896 1897 1898 1899 1900
	if (IS_ERR((void *)addr))
		return addr;

	args->addr_ptr = (uint64_t) addr;

	return 0;
}

1901 1902
static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
{
1903
	return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
1904 1905
}

1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925
/**
 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
 *
 * A history of the GTT mmap interface:
 *
 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
 *     aligned and suitable for fencing, and still fit into the available
 *     mappable space left by the pinned display objects. A classic problem
 *     we called the page-fault-of-doom where we would ping-pong between
 *     two objects that could not fit inside the GTT and so the memcpy
 *     would page one object in at the expense of the other between every
 *     single byte.
 *
 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
 *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
 *     object is too large for the available space (or simply too large
 *     for the mappable aperture!), a view is created instead and faulted
 *     into userspace. (This view is aligned and sized appropriately for
 *     fenced access.)
 *
1926 1927 1928
 * 2 - Recognise WC as a separate cache domain so that we can flush the
 *     delayed writes via GTT before performing direct access via WC.
 *
1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955
 * Restrictions:
 *
 *  * snoopable objects cannot be accessed via the GTT. It can cause machine
 *    hangs on some architectures, corruption on others. An attempt to service
 *    a GTT page fault from a snoopable object will generate a SIGBUS.
 *
 *  * the object must be able to fit into RAM (physical memory, though no
 *    limited to the mappable aperture).
 *
 *
 * Caveats:
 *
 *  * a new GTT page fault will synchronize rendering from the GPU and flush
 *    all data to system memory. Subsequent access will not be synchronized.
 *
 *  * all mappings are revoked on runtime device suspend.
 *
 *  * there are only 8, 16 or 32 fence registers to share between all users
 *    (older machines require fence register for display and blitter access
 *    as well). Contention of the fence registers will cause the previous users
 *    to be unmapped and any new access will generate new page faults.
 *
 *  * running out of memory while servicing a fault may generate a SIGBUS,
 *    rather than the expected SIGSEGV.
 */
int i915_gem_mmap_gtt_version(void)
{
1956
	return 2;
1957 1958
}

1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969
static inline struct i915_ggtt_view
compute_partial_view(struct drm_i915_gem_object *obj,
		     pgoff_t page_offset,
		     unsigned int chunk)
{
	struct i915_ggtt_view view;

	if (i915_gem_object_is_tiled(obj))
		chunk = roundup(chunk, tile_row_pages(obj));

	view.type = I915_GGTT_VIEW_PARTIAL;
1970 1971
	view.partial.offset = rounddown(page_offset, chunk);
	view.partial.size =
1972
		min_t(unsigned int, chunk,
1973
		      (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
1974 1975 1976 1977 1978 1979 1980 1981

	/* If the partial covers the entire object, just create a normal VMA. */
	if (chunk >= obj->base.size >> PAGE_SHIFT)
		view.type = I915_GGTT_VIEW_NORMAL;

	return view;
}

1982 1983
/**
 * i915_gem_fault - fault a page into the GTT
1984
 * @vmf: fault info
1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995
 *
 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
 * from userspace.  The fault handler takes care of binding the object to
 * the GTT (if needed), allocating and programming a fence register (again,
 * only if needed based on whether the old reg is still valid or the object
 * is tiled) and inserting a new PTE into the faulting process.
 *
 * Note that the faulting process may involve evicting existing objects
 * from the GTT and/or fence registers to make room.  So performance may
 * suffer if the GTT working set is large or there are few fence registers
 * left.
1996 1997 1998
 *
 * The current feature set supported by i915_gem_fault() and thus GTT mmaps
 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
1999
 */
2000
int i915_gem_fault(struct vm_fault *vmf)
2001
{
2002
#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
2003
	struct vm_area_struct *area = vmf->vma;
C
Chris Wilson 已提交
2004
	struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
2005
	struct drm_device *dev = obj->base.dev;
2006 2007
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2008
	bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
C
Chris Wilson 已提交
2009
	struct i915_vma *vma;
2010
	pgoff_t page_offset;
2011
	unsigned int flags;
2012
	int ret;
2013

2014
	/* We don't use vmf->pgoff since that has the fake offset */
2015
	page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
2016

C
Chris Wilson 已提交
2017 2018
	trace_i915_gem_object_fault(obj, page_offset, true, write);

2019
	/* Try to flush the object off the GPU first without holding the lock.
2020
	 * Upon acquiring the lock, we will perform our sanity checks and then
2021 2022 2023
	 * repeat the flush holding the lock in the normal manner to catch cases
	 * where we are gazumped.
	 */
2024 2025 2026 2027
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
2028
	if (ret)
2029 2030
		goto err;

2031 2032 2033 2034
	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		goto err;

2035 2036 2037 2038 2039
	intel_runtime_pm_get(dev_priv);

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto err_rpm;
2040

2041
	/* Access to snoopable pages through the GTT is incoherent. */
2042
	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
2043
		ret = -EFAULT;
2044
		goto err_unlock;
2045 2046
	}

2047 2048 2049 2050 2051 2052 2053 2054
	/* If the object is smaller than a couple of partial vma, it is
	 * not worth only creating a single partial vma - we may as well
	 * clear enough space for the full object.
	 */
	flags = PIN_MAPPABLE;
	if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
		flags |= PIN_NONBLOCK | PIN_NONFAULT;

2055
	/* Now pin it into the GTT as needed */
2056
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
2057 2058
	if (IS_ERR(vma)) {
		/* Use a partial view if it is bigger than available space */
2059
		struct i915_ggtt_view view =
2060
			compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
2061

2062 2063 2064 2065 2066
		/* Userspace is now writing through an untracked VMA, abandon
		 * all hope that the hardware is able to track future writes.
		 */
		obj->frontbuffer_ggtt_origin = ORIGIN_CPU;

2067 2068
		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
	}
C
Chris Wilson 已提交
2069 2070
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
2071
		goto err_unlock;
C
Chris Wilson 已提交
2072
	}
2073

2074 2075
	ret = i915_gem_object_set_to_gtt_domain(obj, write);
	if (ret)
2076
		goto err_unpin;
2077

2078
	ret = i915_vma_pin_fence(vma);
2079
	if (ret)
2080
		goto err_unpin;
2081

2082
	/* Finally, remap it using the new GTT offset */
2083
	ret = remap_io_mapping(area,
2084
			       area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
2085
			       (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
2086
			       min_t(u64, vma->size, area->vm_end - area->vm_start),
2087
			       &ggtt->iomap);
2088 2089
	if (ret)
		goto err_fence;
2090

2091 2092 2093 2094 2095 2096
	/* Mark as being mmapped into userspace for later revocation */
	assert_rpm_wakelock_held(dev_priv);
	if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
		list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
	GEM_BUG_ON(!obj->userfault_count);

2097 2098
	i915_vma_set_ggtt_write(vma);

2099
err_fence:
2100
	i915_vma_unpin_fence(vma);
2101
err_unpin:
C
Chris Wilson 已提交
2102
	__i915_vma_unpin(vma);
2103
err_unlock:
2104
	mutex_unlock(&dev->struct_mutex);
2105 2106
err_rpm:
	intel_runtime_pm_put(dev_priv);
2107
	i915_gem_object_unpin_pages(obj);
2108
err:
2109
	switch (ret) {
2110
	case -EIO:
2111 2112 2113 2114 2115 2116 2117
		/*
		 * We eat errors when the gpu is terminally wedged to avoid
		 * userspace unduly crashing (gl has no provisions for mmaps to
		 * fail). But any other -EIO isn't ours (e.g. swap in failure)
		 * and so needs to be reported.
		 */
		if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
2118 2119 2120
			ret = VM_FAULT_SIGBUS;
			break;
		}
2121
	case -EAGAIN:
D
Daniel Vetter 已提交
2122 2123 2124 2125
		/*
		 * EAGAIN means the gpu is hung and we'll wait for the error
		 * handler to reset everything when re-faulting in
		 * i915_mutex_lock_interruptible.
2126
		 */
2127 2128
	case 0:
	case -ERESTARTSYS:
2129
	case -EINTR:
2130 2131 2132 2133 2134
	case -EBUSY:
		/*
		 * EBUSY is ok: this just means that another thread
		 * already did the job.
		 */
2135 2136
		ret = VM_FAULT_NOPAGE;
		break;
2137
	case -ENOMEM:
2138 2139
		ret = VM_FAULT_OOM;
		break;
2140
	case -ENOSPC:
2141
	case -EFAULT:
2142 2143
		ret = VM_FAULT_SIGBUS;
		break;
2144
	default:
2145
		WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
2146 2147
		ret = VM_FAULT_SIGBUS;
		break;
2148
	}
2149
	return ret;
2150 2151
}

2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162
static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
{
	struct i915_vma *vma;

	GEM_BUG_ON(!obj->userfault_count);

	obj->userfault_count = 0;
	list_del(&obj->userfault_link);
	drm_vma_node_unmap(&obj->base.vma_node,
			   obj->base.dev->anon_inode->i_mapping);

2163
	for_each_ggtt_vma(vma, obj)
2164 2165 2166
		i915_vma_unset_userfault(vma);
}

2167 2168 2169 2170
/**
 * i915_gem_release_mmap - remove physical page mappings
 * @obj: obj in question
 *
2171
 * Preserve the reservation of the mmapping with the DRM core code, but
2172 2173 2174 2175 2176 2177 2178 2179 2180
 * relinquish ownership of the pages back to the system.
 *
 * It is vital that we remove the page mapping if we have mapped a tiled
 * object through the GTT and then lose the fence register due to
 * resource pressure. Similarly if the object has been moved out of the
 * aperture, than pages mapped into userspace must be revoked. Removing the
 * mapping will then trigger a page fault on the next user access, allowing
 * fixup by i915_gem_fault().
 */
2181
void
2182
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
2183
{
2184 2185
	struct drm_i915_private *i915 = to_i915(obj->base.dev);

2186 2187 2188
	/* Serialisation between user GTT access and our code depends upon
	 * revoking the CPU's PTE whilst the mutex is held. The next user
	 * pagefault then has to wait until we release the mutex.
2189 2190 2191 2192
	 *
	 * Note that RPM complicates somewhat by adding an additional
	 * requirement that operations to the GGTT be made holding the RPM
	 * wakeref.
2193
	 */
2194
	lockdep_assert_held(&i915->drm.struct_mutex);
2195
	intel_runtime_pm_get(i915);
2196

2197
	if (!obj->userfault_count)
2198
		goto out;
2199

2200
	__i915_gem_object_release_mmap(obj);
2201 2202 2203 2204 2205 2206 2207 2208 2209

	/* Ensure that the CPU's PTE are revoked and there are not outstanding
	 * memory transactions from userspace before we return. The TLB
	 * flushing implied above by changing the PTE above *should* be
	 * sufficient, an extra barrier here just provides us with a bit
	 * of paranoid documentation about our requirement to serialise
	 * memory writes before touching registers / GSM.
	 */
	wmb();
2210 2211 2212

out:
	intel_runtime_pm_put(i915);
2213 2214
}

2215
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
2216
{
2217
	struct drm_i915_gem_object *obj, *on;
2218
	int i;
2219

2220 2221 2222 2223 2224 2225
	/*
	 * Only called during RPM suspend. All users of the userfault_list
	 * must be holding an RPM wakeref to ensure that this can not
	 * run concurrently with themselves (and use the struct_mutex for
	 * protection between themselves).
	 */
2226

2227
	list_for_each_entry_safe(obj, on,
2228 2229
				 &dev_priv->mm.userfault_list, userfault_link)
		__i915_gem_object_release_mmap(obj);
2230 2231 2232 2233 2234 2235 2236 2237

	/* The fence will be lost when the device powers down. If any were
	 * in use by hardware (i.e. they are pinned), we should not be powering
	 * down! All other fences will be reacquired by the user upon waking.
	 */
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];

2238 2239 2240 2241 2242 2243 2244 2245 2246 2247
		/* Ideally we want to assert that the fence register is not
		 * live at this point (i.e. that no piece of code will be
		 * trying to write through fence + GTT, as that both violates
		 * our tracking of activity and associated locking/barriers,
		 * but also is illegal given that the hw is powered down).
		 *
		 * Previously we used reg->pin_count as a "liveness" indicator.
		 * That is not sufficient, and we need a more fine-grained
		 * tool if we want to have a sanity check here.
		 */
2248 2249 2250 2251

		if (!reg->vma)
			continue;

2252
		GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
2253 2254
		reg->dirty = true;
	}
2255 2256
}

2257 2258
static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
{
2259
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2260
	int err;
2261

2262
	err = drm_gem_create_mmap_offset(&obj->base);
2263
	if (likely(!err))
2264
		return 0;
2265

2266 2267 2268 2269 2270
	/* Attempt to reap some mmap space from dead objects */
	do {
		err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
		if (err)
			break;
2271

2272
		i915_gem_drain_freed_objects(dev_priv);
2273
		err = drm_gem_create_mmap_offset(&obj->base);
2274 2275 2276 2277
		if (!err)
			break;

	} while (flush_delayed_work(&dev_priv->gt.retire_work));
2278

2279
	return err;
2280 2281 2282 2283 2284 2285 2286
}

static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
{
	drm_gem_free_mmap_offset(&obj->base);
}

2287
int
2288 2289
i915_gem_mmap_gtt(struct drm_file *file,
		  struct drm_device *dev,
2290
		  uint32_t handle,
2291
		  uint64_t *offset)
2292
{
2293
	struct drm_i915_gem_object *obj;
2294 2295
	int ret;

2296
	obj = i915_gem_object_lookup(file, handle);
2297 2298
	if (!obj)
		return -ENOENT;
2299

2300
	ret = i915_gem_object_create_mmap_offset(obj);
2301 2302
	if (ret == 0)
		*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2303

C
Chris Wilson 已提交
2304
	i915_gem_object_put(obj);
2305
	return ret;
2306 2307
}

2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328
/**
 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
 * @dev: DRM device
 * @data: GTT mapping ioctl data
 * @file: GEM object info
 *
 * Simply returns the fake offset to userspace so it can mmap it.
 * The mmap call will end up in drm_gem_mmap(), which will set things
 * up so we can get faults in the handler above.
 *
 * The fault handler will take care of binding the object into the GTT
 * (since it may have been evicted to make room for something), allocating
 * a fence register, and mapping the appropriate aperture address into
 * userspace.
 */
int
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file)
{
	struct drm_i915_gem_mmap_gtt *args = data;

2329
	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2330 2331
}

D
Daniel Vetter 已提交
2332 2333 2334
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2335
{
2336
	i915_gem_object_free_mmap_offset(obj);
2337

2338 2339
	if (obj->base.filp == NULL)
		return;
2340

D
Daniel Vetter 已提交
2341 2342 2343 2344 2345
	/* Our goal here is to return as much of the memory as
	 * is possible back to the system as we are called from OOM.
	 * To do this we must instruct the shmfs to drop all of its
	 * backing pages, *now*.
	 */
2346
	shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
C
Chris Wilson 已提交
2347
	obj->mm.madv = __I915_MADV_PURGED;
2348
	obj->mm.pages = ERR_PTR(-EFAULT);
D
Daniel Vetter 已提交
2349
}
2350

2351
/* Try to discard unwanted pages */
2352
void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
D
Daniel Vetter 已提交
2353
{
2354 2355
	struct address_space *mapping;

2356
	lockdep_assert_held(&obj->mm.lock);
2357
	GEM_BUG_ON(i915_gem_object_has_pages(obj));
2358

C
Chris Wilson 已提交
2359
	switch (obj->mm.madv) {
2360 2361 2362 2363 2364 2365 2366 2367 2368
	case I915_MADV_DONTNEED:
		i915_gem_object_truncate(obj);
	case __I915_MADV_PURGED:
		return;
	}

	if (obj->base.filp == NULL)
		return;

2369
	mapping = obj->base.filp->f_mapping,
2370
	invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2371 2372
}

2373
static void
2374 2375
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
			      struct sg_table *pages)
2376
{
2377 2378
	struct sgt_iter sgt_iter;
	struct page *page;
2379

2380
	__i915_gem_object_release_shmem(obj, pages, true);
2381

2382
	i915_gem_gtt_finish_pages(obj, pages);
I
Imre Deak 已提交
2383

2384
	if (i915_gem_object_needs_bit17_swizzle(obj))
2385
		i915_gem_object_save_bit_17_swizzle(obj, pages);
2386

2387
	for_each_sgt_page(page, sgt_iter, pages) {
C
Chris Wilson 已提交
2388
		if (obj->mm.dirty)
2389
			set_page_dirty(page);
2390

C
Chris Wilson 已提交
2391
		if (obj->mm.madv == I915_MADV_WILLNEED)
2392
			mark_page_accessed(page);
2393

2394
		put_page(page);
2395
	}
C
Chris Wilson 已提交
2396
	obj->mm.dirty = false;
2397

2398 2399
	sg_free_table(pages);
	kfree(pages);
2400
}
C
Chris Wilson 已提交
2401

2402 2403 2404
static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
{
	struct radix_tree_iter iter;
2405
	void __rcu **slot;
2406

2407
	rcu_read_lock();
C
Chris Wilson 已提交
2408 2409
	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
2410
	rcu_read_unlock();
2411 2412
}

2413 2414
void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
				 enum i915_mm_subclass subclass)
2415
{
2416
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
2417
	struct sg_table *pages;
2418

C
Chris Wilson 已提交
2419
	if (i915_gem_object_has_pinned_pages(obj))
2420
		return;
2421

2422
	GEM_BUG_ON(obj->bind_count);
2423
	if (!i915_gem_object_has_pages(obj))
2424 2425 2426
		return;

	/* May be called by shrinker from within get_pages() (on another bo) */
2427
	mutex_lock_nested(&obj->mm.lock, subclass);
2428 2429
	if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
		goto unlock;
B
Ben Widawsky 已提交
2430

2431 2432 2433
	/* ->put_pages might need to allocate memory for the bit17 swizzle
	 * array, hence protect them from being reaped by removing them from gtt
	 * lists early. */
2434 2435
	pages = fetch_and_zero(&obj->mm.pages);
	GEM_BUG_ON(!pages);
2436

2437 2438 2439 2440
	spin_lock(&i915->mm.obj_lock);
	list_del(&obj->mm.link);
	spin_unlock(&i915->mm.obj_lock);

C
Chris Wilson 已提交
2441
	if (obj->mm.mapping) {
2442 2443
		void *ptr;

2444
		ptr = page_mask_bits(obj->mm.mapping);
2445 2446
		if (is_vmalloc_addr(ptr))
			vunmap(ptr);
2447
		else
2448 2449
			kunmap(kmap_to_page(ptr));

C
Chris Wilson 已提交
2450
		obj->mm.mapping = NULL;
2451 2452
	}

2453 2454
	__i915_gem_object_reset_page_iter(obj);

2455 2456 2457
	if (!IS_ERR(pages))
		obj->ops->put_pages(obj, pages);

2458 2459
	obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;

2460 2461
unlock:
	mutex_unlock(&obj->mm.lock);
C
Chris Wilson 已提交
2462 2463
}

2464
static bool i915_sg_trim(struct sg_table *orig_st)
2465 2466 2467 2468 2469 2470
{
	struct sg_table new_st;
	struct scatterlist *sg, *new_sg;
	unsigned int i;

	if (orig_st->nents == orig_st->orig_nents)
2471
		return false;
2472

2473
	if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
2474
		return false;
2475 2476 2477 2478 2479 2480 2481

	new_sg = new_st.sgl;
	for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
		sg_set_page(new_sg, sg_page(sg), sg->length, 0);
		/* called before being DMA mapped, no need to copy sg->dma_* */
		new_sg = sg_next(new_sg);
	}
2482
	GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
2483 2484 2485 2486

	sg_free_table(orig_st);

	*orig_st = new_st;
2487
	return true;
2488 2489
}

2490
static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2491
{
2492
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2493 2494
	const unsigned long page_count = obj->base.size / PAGE_SIZE;
	unsigned long i;
2495
	struct address_space *mapping;
2496 2497
	struct sg_table *st;
	struct scatterlist *sg;
2498
	struct sgt_iter sgt_iter;
2499
	struct page *page;
2500
	unsigned long last_pfn = 0;	/* suppress gcc warning */
2501
	unsigned int max_segment = i915_sg_segment_size();
M
Matthew Auld 已提交
2502
	unsigned int sg_page_sizes;
2503
	gfp_t noreclaim;
I
Imre Deak 已提交
2504
	int ret;
2505

C
Chris Wilson 已提交
2506 2507 2508 2509
	/* Assert that the object is not currently in any GPU domain. As it
	 * wasn't in the GTT, there shouldn't be any way it could have been in
	 * a GPU cache
	 */
2510 2511
	GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
	GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
C
Chris Wilson 已提交
2512

2513 2514
	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
2515
		return -ENOMEM;
2516

2517
rebuild_st:
2518 2519
	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
		kfree(st);
2520
		return -ENOMEM;
2521
	}
2522

2523 2524 2525 2526 2527
	/* Get the list of pages out of our struct file.  They'll be pinned
	 * at this point until we release them.
	 *
	 * Fail silently without starting the shrinker
	 */
2528
	mapping = obj->base.filp->f_mapping;
2529
	noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
2530 2531
	noreclaim |= __GFP_NORETRY | __GFP_NOWARN;

2532 2533
	sg = st->sgl;
	st->nents = 0;
M
Matthew Auld 已提交
2534
	sg_page_sizes = 0;
2535
	for (i = 0; i < page_count; i++) {
2536 2537 2538 2539 2540 2541 2542
		const unsigned int shrink[] = {
			I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
			0,
		}, *s = shrink;
		gfp_t gfp = noreclaim;

		do {
C
Chris Wilson 已提交
2543
			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2544 2545 2546 2547 2548 2549 2550 2551
			if (likely(!IS_ERR(page)))
				break;

			if (!*s) {
				ret = PTR_ERR(page);
				goto err_sg;
			}

2552
			i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
2553
			cond_resched();
2554

C
Chris Wilson 已提交
2555 2556 2557
			/* We've tried hard to allocate the memory by reaping
			 * our own buffer, now let the real VM do its job and
			 * go down in flames if truly OOM.
2558 2559 2560 2561
			 *
			 * However, since graphics tend to be disposable,
			 * defer the oom here by reporting the ENOMEM back
			 * to userspace.
C
Chris Wilson 已提交
2562
			 */
2563 2564 2565
			if (!*s) {
				/* reclaim and warn, but no oom */
				gfp = mapping_gfp_mask(mapping);
2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577

				/* Our bo are always dirty and so we require
				 * kswapd to reclaim our pages (direct reclaim
				 * does not effectively begin pageout of our
				 * buffers on its own). However, direct reclaim
				 * only waits for kswapd when under allocation
				 * congestion. So as a result __GFP_RECLAIM is
				 * unreliable and fails to actually reclaim our
				 * dirty pages -- unless you try over and over
				 * again with !__GFP_NORETRY. However, we still
				 * want to fail this allocation rather than
				 * trigger the out-of-memory killer and for
M
Michal Hocko 已提交
2578
				 * this we want __GFP_RETRY_MAYFAIL.
2579
				 */
M
Michal Hocko 已提交
2580
				gfp |= __GFP_RETRY_MAYFAIL;
I
Imre Deak 已提交
2581
			}
2582 2583
		} while (1);

2584 2585 2586
		if (!i ||
		    sg->length >= max_segment ||
		    page_to_pfn(page) != last_pfn + 1) {
2587
			if (i) {
M
Matthew Auld 已提交
2588
				sg_page_sizes |= sg->length;
2589
				sg = sg_next(sg);
2590
			}
2591 2592 2593 2594 2595 2596
			st->nents++;
			sg_set_page(sg, page, PAGE_SIZE, 0);
		} else {
			sg->length += PAGE_SIZE;
		}
		last_pfn = page_to_pfn(page);
2597 2598 2599

		/* Check that the i965g/gm workaround works. */
		WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2600
	}
2601
	if (sg) { /* loop terminated early; short sg table */
M
Matthew Auld 已提交
2602
		sg_page_sizes |= sg->length;
2603
		sg_mark_end(sg);
2604
	}
2605

2606 2607 2608
	/* Trim unused sg entries to avoid wasting memory. */
	i915_sg_trim(st);

2609
	ret = i915_gem_gtt_prepare_pages(obj, st);
2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628
	if (ret) {
		/* DMA remapping failed? One possible cause is that
		 * it could not reserve enough large entries, asking
		 * for PAGE_SIZE chunks instead may be helpful.
		 */
		if (max_segment > PAGE_SIZE) {
			for_each_sgt_page(page, sgt_iter, st)
				put_page(page);
			sg_free_table(st);

			max_segment = PAGE_SIZE;
			goto rebuild_st;
		} else {
			dev_warn(&dev_priv->drm.pdev->dev,
				 "Failed to DMA remap %lu pages\n",
				 page_count);
			goto err_pages;
		}
	}
I
Imre Deak 已提交
2629

2630
	if (i915_gem_object_needs_bit17_swizzle(obj))
2631
		i915_gem_object_do_bit_17_swizzle(obj, st);
2632

M
Matthew Auld 已提交
2633
	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
2634 2635

	return 0;
2636

2637
err_sg:
2638
	sg_mark_end(sg);
2639
err_pages:
2640 2641
	for_each_sgt_page(page, sgt_iter, st)
		put_page(page);
2642 2643
	sg_free_table(st);
	kfree(st);
2644 2645 2646 2647 2648 2649 2650 2651 2652

	/* shmemfs first checks if there is enough memory to allocate the page
	 * and reports ENOSPC should there be insufficient, along with the usual
	 * ENOMEM for a genuine allocation failure.
	 *
	 * We use ENOSPC in our driver to mean that we have run out of aperture
	 * space and so want to translate the error from shmemfs back to our
	 * usual understanding of ENOMEM.
	 */
I
Imre Deak 已提交
2653 2654 2655
	if (ret == -ENOSPC)
		ret = -ENOMEM;

2656
	return ret;
2657 2658 2659
}

void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
2660
				 struct sg_table *pages,
M
Matthew Auld 已提交
2661
				 unsigned int sg_page_sizes)
2662
{
2663 2664 2665 2666
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	unsigned long supported = INTEL_INFO(i915)->page_sizes;
	int i;

2667
	lockdep_assert_held(&obj->mm.lock);
2668 2669 2670 2671 2672

	obj->mm.get_page.sg_pos = pages->sgl;
	obj->mm.get_page.sg_idx = 0;

	obj->mm.pages = pages;
2673 2674

	if (i915_gem_object_is_tiled(obj) &&
2675
	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
2676 2677 2678 2679
		GEM_BUG_ON(obj->mm.quirked);
		__i915_gem_object_pin_pages(obj);
		obj->mm.quirked = true;
	}
2680

M
Matthew Auld 已提交
2681 2682
	GEM_BUG_ON(!sg_page_sizes);
	obj->mm.page_sizes.phys = sg_page_sizes;
2683 2684

	/*
M
Matthew Auld 已提交
2685 2686 2687 2688 2689 2690
	 * Calculate the supported page-sizes which fit into the given
	 * sg_page_sizes. This will give us the page-sizes which we may be able
	 * to use opportunistically when later inserting into the GTT. For
	 * example if phys=2G, then in theory we should be able to use 1G, 2M,
	 * 64K or 4K pages, although in practice this will depend on a number of
	 * other factors.
2691 2692 2693 2694 2695 2696 2697
	 */
	obj->mm.page_sizes.sg = 0;
	for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
		if (obj->mm.page_sizes.phys & ~0u << i)
			obj->mm.page_sizes.sg |= BIT(i);
	}
	GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
2698 2699 2700 2701

	spin_lock(&i915->mm.obj_lock);
	list_add(&obj->mm.link, &i915->mm.unbound_list);
	spin_unlock(&i915->mm.obj_lock);
2702 2703 2704 2705
}

static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
2706
	int err;
2707 2708 2709 2710 2711 2712

	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
		DRM_DEBUG("Attempting to obtain a purgeable object\n");
		return -EFAULT;
	}

2713
	err = obj->ops->get_pages(obj);
2714
	GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
2715

2716
	return err;
2717 2718
}

2719
/* Ensure that the associated pages are gathered from the backing storage
2720
 * and pinned into our object. i915_gem_object_pin_pages() may be called
2721
 * multiple times before they are released by a single call to
2722
 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
2723 2724 2725
 * either as a result of memory pressure (reaping pages under the shrinker)
 * or as the object is itself released.
 */
C
Chris Wilson 已提交
2726
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2727
{
2728
	int err;
2729

2730 2731 2732
	err = mutex_lock_interruptible(&obj->mm.lock);
	if (err)
		return err;
2733

2734
	if (unlikely(!i915_gem_object_has_pages(obj))) {
2735 2736
		GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));

2737 2738 2739
		err = ____i915_gem_object_get_pages(obj);
		if (err)
			goto unlock;
2740

2741 2742 2743
		smp_mb__before_atomic();
	}
	atomic_inc(&obj->mm.pages_pin_count);
2744

2745 2746
unlock:
	mutex_unlock(&obj->mm.lock);
2747
	return err;
2748 2749
}

2750
/* The 'mapping' part of i915_gem_object_pin_map() below */
2751 2752
static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
				 enum i915_map_type type)
2753 2754
{
	unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
C
Chris Wilson 已提交
2755
	struct sg_table *sgt = obj->mm.pages;
2756 2757
	struct sgt_iter sgt_iter;
	struct page *page;
2758 2759
	struct page *stack_pages[32];
	struct page **pages = stack_pages;
2760
	unsigned long i = 0;
2761
	pgprot_t pgprot;
2762 2763 2764
	void *addr;

	/* A single page can always be kmapped */
2765
	if (n_pages == 1 && type == I915_MAP_WB)
2766 2767
		return kmap(sg_page(sgt->sgl));

2768 2769
	if (n_pages > ARRAY_SIZE(stack_pages)) {
		/* Too big for stack -- allocate temporary array instead */
2770
		pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
2771 2772 2773
		if (!pages)
			return NULL;
	}
2774

2775 2776
	for_each_sgt_page(page, sgt_iter, sgt)
		pages[i++] = page;
2777 2778 2779 2780

	/* Check that we have the expected number of pages */
	GEM_BUG_ON(i != n_pages);

2781
	switch (type) {
2782 2783 2784
	default:
		MISSING_CASE(type);
		/* fallthrough to use PAGE_KERNEL anyway */
2785 2786 2787 2788 2789 2790 2791 2792
	case I915_MAP_WB:
		pgprot = PAGE_KERNEL;
		break;
	case I915_MAP_WC:
		pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
		break;
	}
	addr = vmap(pages, n_pages, 0, pgprot);
2793

2794
	if (pages != stack_pages)
M
Michal Hocko 已提交
2795
		kvfree(pages);
2796 2797 2798 2799 2800

	return addr;
}

/* get, pin, and map the pages of the object into kernel space */
2801 2802
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
			      enum i915_map_type type)
2803
{
2804 2805 2806
	enum i915_map_type has_type;
	bool pinned;
	void *ptr;
2807 2808
	int ret;

T
Tina Zhang 已提交
2809 2810
	if (unlikely(!i915_gem_object_has_struct_page(obj)))
		return ERR_PTR(-ENXIO);
2811

2812
	ret = mutex_lock_interruptible(&obj->mm.lock);
2813 2814 2815
	if (ret)
		return ERR_PTR(ret);

2816 2817 2818
	pinned = !(type & I915_MAP_OVERRIDE);
	type &= ~I915_MAP_OVERRIDE;

2819
	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
2820
		if (unlikely(!i915_gem_object_has_pages(obj))) {
2821 2822
			GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));

2823 2824 2825
			ret = ____i915_gem_object_get_pages(obj);
			if (ret)
				goto err_unlock;
2826

2827 2828 2829
			smp_mb__before_atomic();
		}
		atomic_inc(&obj->mm.pages_pin_count);
2830 2831
		pinned = false;
	}
2832
	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
2833

2834
	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
2835 2836 2837
	if (ptr && has_type != type) {
		if (pinned) {
			ret = -EBUSY;
2838
			goto err_unpin;
2839
		}
2840 2841 2842 2843 2844 2845

		if (is_vmalloc_addr(ptr))
			vunmap(ptr);
		else
			kunmap(kmap_to_page(ptr));

C
Chris Wilson 已提交
2846
		ptr = obj->mm.mapping = NULL;
2847 2848
	}

2849 2850 2851 2852
	if (!ptr) {
		ptr = i915_gem_object_map(obj, type);
		if (!ptr) {
			ret = -ENOMEM;
2853
			goto err_unpin;
2854 2855
		}

2856
		obj->mm.mapping = page_pack_bits(ptr, type);
2857 2858
	}

2859 2860
out_unlock:
	mutex_unlock(&obj->mm.lock);
2861 2862
	return ptr;

2863 2864 2865 2866 2867
err_unpin:
	atomic_dec(&obj->mm.pages_pin_count);
err_unlock:
	ptr = ERR_PTR(ret);
	goto out_unlock;
2868 2869
}

2870 2871 2872 2873 2874 2875 2876 2877 2878 2879 2880 2881 2882 2883 2884 2885 2886
static int
i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
			   const struct drm_i915_gem_pwrite *arg)
{
	struct address_space *mapping = obj->base.filp->f_mapping;
	char __user *user_data = u64_to_user_ptr(arg->data_ptr);
	u64 remain, offset;
	unsigned int pg;

	/* Before we instantiate/pin the backing store for our use, we
	 * can prepopulate the shmemfs filp efficiently using a write into
	 * the pagecache. We avoid the penalty of instantiating all the
	 * pages, important if the user is just writing to a few and never
	 * uses the object on the GPU, and using a direct write into shmemfs
	 * allows it to avoid the cost of retrieving a page (either swapin
	 * or clearing-before-use) before it is overwritten.
	 */
2887
	if (i915_gem_object_has_pages(obj))
2888 2889
		return -ENODEV;

2890 2891 2892
	if (obj->mm.madv != I915_MADV_WILLNEED)
		return -EFAULT;

2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937 2938 2939 2940 2941
	/* Before the pages are instantiated the object is treated as being
	 * in the CPU domain. The pages will be clflushed as required before
	 * use, and we can freely write into the pages directly. If userspace
	 * races pwrite with any other operation; corruption will ensue -
	 * that is userspace's prerogative!
	 */

	remain = arg->size;
	offset = arg->offset;
	pg = offset_in_page(offset);

	do {
		unsigned int len, unwritten;
		struct page *page;
		void *data, *vaddr;
		int err;

		len = PAGE_SIZE - pg;
		if (len > remain)
			len = remain;

		err = pagecache_write_begin(obj->base.filp, mapping,
					    offset, len, 0,
					    &page, &data);
		if (err < 0)
			return err;

		vaddr = kmap(page);
		unwritten = copy_from_user(vaddr + pg, user_data, len);
		kunmap(page);

		err = pagecache_write_end(obj->base.filp, mapping,
					  offset, len, len - unwritten,
					  page, data);
		if (err < 0)
			return err;

		if (unwritten)
			return -EFAULT;

		remain -= len;
		user_data += len;
		offset += len;
		pg = 0;
	} while (remain);

	return 0;
}

2942
static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
2943
{
2944
	bool banned;
2945

2946
	atomic_inc(&ctx->guilty_count);
2947

2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958
	banned = false;
	if (i915_gem_context_is_bannable(ctx)) {
		unsigned int score;

		score = atomic_add_return(CONTEXT_SCORE_GUILTY,
					  &ctx->ban_score);
		banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;

		DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
				 ctx->name, score, yesno(banned));
	}
2959
	if (!banned)
2960 2961
		return;

2962 2963 2964 2965 2966 2967
	i915_gem_context_set_banned(ctx);
	if (!IS_ERR_OR_NULL(ctx->file_priv)) {
		atomic_inc(&ctx->file_priv->context_bans);
		DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
				 ctx->name, atomic_read(&ctx->file_priv->context_bans));
	}
2968 2969 2970 2971
}

static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
{
2972
	atomic_inc(&ctx->active_count);
2973 2974
}

2975
struct i915_request *
2976
i915_gem_find_active_request(struct intel_engine_cs *engine)
2977
{
2978
	struct i915_request *request, *active = NULL;
2979
	unsigned long flags;
2980

2981 2982 2983 2984 2985 2986
	/*
	 * We are called by the error capture, reset and to dump engine
	 * state at random points in time. In particular, note that neither is
	 * crucially ordered with an interrupt. After a hang, the GPU is dead
	 * and we assume that no more writes can happen (we waited long enough
	 * for all writes that were in transaction to be flushed) - adding an
2987 2988
	 * extra delay for a recent interrupt is pointless. Hence, we do
	 * not need an engine->irq_seqno_barrier() before the seqno reads.
2989 2990
	 * At all other times, we must assume the GPU is still running, but
	 * we only care about the snapshot of this moment.
2991
	 */
2992 2993
	spin_lock_irqsave(&engine->timeline.lock, flags);
	list_for_each_entry(request, &engine->timeline.requests, link) {
2994
		if (__i915_request_completed(request, request->global_seqno))
2995
			continue;
2996

2997 2998
		active = request;
		break;
2999
	}
3000
	spin_unlock_irqrestore(&engine->timeline.lock, flags);
3001

3002
	return active;
3003 3004
}

3005 3006 3007 3008
/*
 * Ensure irq handler finishes, and not run again.
 * Also return the active request so that we only search for it once.
 */
3009
struct i915_request *
3010 3011
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
{
3012
	struct i915_request *request;
3013

3014 3015 3016 3017 3018 3019 3020 3021 3022
	/*
	 * During the reset sequence, we must prevent the engine from
	 * entering RC6. As the context state is undefined until we restart
	 * the engine, if it does enter RC6 during the reset, the state
	 * written to the powercontext is undefined and so we may lose
	 * GPU state upon resume, i.e. fail to restart after a reset.
	 */
	intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);

3023
	request = engine->reset.prepare(engine);
3024 3025
	if (request && request->fence.error == -EIO)
		request = ERR_PTR(-EIO); /* Previous reset failed! */
3026 3027 3028 3029

	return request;
}

3030
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
3031 3032
{
	struct intel_engine_cs *engine;
3033
	struct i915_request *request;
3034
	enum intel_engine_id id;
3035
	int err = 0;
3036

3037
	for_each_engine(engine, dev_priv, id) {
3038 3039 3040 3041
		request = i915_gem_reset_prepare_engine(engine);
		if (IS_ERR(request)) {
			err = PTR_ERR(request);
			continue;
3042
		}
3043 3044

		engine->hangcheck.active_request = request;
3045 3046
	}

3047
	i915_gem_revoke_fences(dev_priv);
3048
	intel_uc_sanitize(dev_priv);
3049 3050

	return err;
3051 3052
}

3053
static void skip_request(struct i915_request *request)
3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067
{
	void *vaddr = request->ring->vaddr;
	u32 head;

	/* As this request likely depends on state from the lost
	 * context, clear out all the user operations leaving the
	 * breadcrumb at the end (so we get the fence notifications).
	 */
	head = request->head;
	if (request->postfix < head) {
		memset(vaddr + head, 0, request->ring->size - head);
		head = 0;
	}
	memset(vaddr + head, 0, request->postfix - head);
3068 3069

	dma_fence_set_error(&request->fence, -EIO);
3070 3071
}

3072
static void engine_skip_context(struct i915_request *request)
3073 3074
{
	struct intel_engine_cs *engine = request->engine;
C
Chris Wilson 已提交
3075
	struct i915_gem_context *hung_ctx = request->gem_context;
3076
	struct i915_timeline *timeline = request->timeline;
3077 3078
	unsigned long flags;

3079
	GEM_BUG_ON(timeline == &engine->timeline);
3080

3081
	spin_lock_irqsave(&engine->timeline.lock, flags);
3082
	spin_lock_nested(&timeline->lock, SINGLE_DEPTH_NESTING);
3083

3084
	list_for_each_entry_continue(request, &engine->timeline.requests, link)
C
Chris Wilson 已提交
3085
		if (request->gem_context == hung_ctx)
3086 3087 3088 3089 3090 3091
			skip_request(request);

	list_for_each_entry(request, &timeline->requests, link)
		skip_request(request);

	spin_unlock(&timeline->lock);
3092
	spin_unlock_irqrestore(&engine->timeline.lock, flags);
3093 3094
}

3095
/* Returns the request if it was guilty of the hang */
3096
static struct i915_request *
3097
i915_gem_reset_request(struct intel_engine_cs *engine,
3098 3099
		       struct i915_request *request,
		       bool stalled)
3100
{
3101 3102 3103 3104 3105 3106 3107 3108 3109 3110 3111 3112 3113 3114 3115 3116 3117 3118 3119 3120 3121
	/* The guilty request will get skipped on a hung engine.
	 *
	 * Users of client default contexts do not rely on logical
	 * state preserved between batches so it is safe to execute
	 * queued requests following the hang. Non default contexts
	 * rely on preserved state, so skipping a batch loses the
	 * evolution of the state and it needs to be considered corrupted.
	 * Executing more queued batches on top of corrupted state is
	 * risky. But we take the risk by trying to advance through
	 * the queued requests in order to make the client behaviour
	 * more predictable around resets, by not throwing away random
	 * amount of batches it has prepared for execution. Sophisticated
	 * clients can use gem_reset_stats_ioctl and dma fence status
	 * (exported via sync_file info ioctl on explicit fences) to observe
	 * when it loses the context state and should rebuild accordingly.
	 *
	 * The context ban, and ultimately the client ban, mechanism are safety
	 * valves if client submission ends up resulting in nothing more than
	 * subsequent hangs.
	 */

3122 3123 3124 3125 3126 3127 3128 3129 3130
	if (i915_request_completed(request)) {
		GEM_TRACE("%s pardoned global=%d (fence %llx:%d), current %d\n",
			  engine->name, request->global_seqno,
			  request->fence.context, request->fence.seqno,
			  intel_engine_get_seqno(engine));
		stalled = false;
	}

	if (stalled) {
C
Chris Wilson 已提交
3131
		i915_gem_context_mark_guilty(request->gem_context);
3132
		skip_request(request);
3133 3134

		/* If this context is now banned, skip all pending requests. */
C
Chris Wilson 已提交
3135
		if (i915_gem_context_is_banned(request->gem_context))
3136
			engine_skip_context(request);
3137
	} else {
3138 3139 3140 3141 3142 3143 3144
		/*
		 * Since this is not the hung engine, it may have advanced
		 * since the hang declaration. Double check by refinding
		 * the active request at the time of the reset.
		 */
		request = i915_gem_find_active_request(engine);
		if (request) {
C
Chris Wilson 已提交
3145
			i915_gem_context_mark_innocent(request->gem_context);
3146 3147 3148
			dma_fence_set_error(&request->fence, -EAGAIN);

			/* Rewind the engine to replay the incomplete rq */
3149
			spin_lock_irq(&engine->timeline.lock);
3150
			request = list_prev_entry(request, link);
3151
			if (&request->link == &engine->timeline.requests)
3152
				request = NULL;
3153
			spin_unlock_irq(&engine->timeline.lock);
3154
		}
3155 3156
	}

3157
	return request;
3158 3159
}

3160
void i915_gem_reset_engine(struct intel_engine_cs *engine,
3161 3162
			   struct i915_request *request,
			   bool stalled)
3163
{
3164 3165 3166 3167 3168 3169
	/*
	 * Make sure this write is visible before we re-enable the interrupt
	 * handlers on another CPU, as tasklet_enable() resolves to just
	 * a compiler barrier which is insufficient for our purpose here.
	 */
	smp_store_mb(engine->irq_posted, 0);
3170

3171
	if (request)
3172
		request = i915_gem_reset_request(engine, request, stalled);
3173

3174
	/* Setup the CS to resume from the breadcrumb of the hung request */
3175
	engine->reset.reset(engine, request);
3176
}
3177

3178 3179
void i915_gem_reset(struct drm_i915_private *dev_priv,
		    unsigned int stalled_mask)
3180
{
3181
	struct intel_engine_cs *engine;
3182
	enum intel_engine_id id;
3183

3184 3185
	lockdep_assert_held(&dev_priv->drm.struct_mutex);

3186
	i915_retire_requests(dev_priv);
3187

3188
	for_each_engine(engine, dev_priv, id) {
3189
		struct intel_context *ce;
3190

3191 3192
		i915_gem_reset_engine(engine,
				      engine->hangcheck.active_request,
3193
				      stalled_mask & ENGINE_MASK(id));
3194 3195 3196
		ce = fetch_and_zero(&engine->last_retired_context);
		if (ce)
			intel_context_unpin(ce);
3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207

		/*
		 * Ostensibily, we always want a context loaded for powersaving,
		 * so if the engine is idle after the reset, send a request
		 * to load our scratch kernel_context.
		 *
		 * More mysteriously, if we leave the engine idle after a reset,
		 * the next userspace batch may hang, with what appears to be
		 * an incoherent read by the CS (presumably stale TLB). An
		 * empty request appears sufficient to paper over the glitch.
		 */
3208
		if (intel_engine_is_idle(engine)) {
3209
			struct i915_request *rq;
3210

3211 3212
			rq = i915_request_alloc(engine,
						dev_priv->kernel_context);
3213
			if (!IS_ERR(rq))
3214
				__i915_request_add(rq, false);
3215
		}
3216
	}
3217

3218
	i915_gem_restore_fences(dev_priv);
3219 3220
}

3221 3222
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
{
3223 3224
	engine->reset.finish(engine);

3225
	intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
3226 3227
}

3228 3229
void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
{
3230 3231 3232
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

3233
	lockdep_assert_held(&dev_priv->drm.struct_mutex);
3234

3235
	for_each_engine(engine, dev_priv, id) {
3236
		engine->hangcheck.active_request = NULL;
3237
		i915_gem_reset_finish_engine(engine);
3238
	}
3239 3240
}

3241
static void nop_submit_request(struct i915_request *request)
3242
{
3243 3244 3245
	GEM_TRACE("%s fence %llx:%d -> -EIO\n",
		  request->engine->name,
		  request->fence.context, request->fence.seqno);
3246 3247
	dma_fence_set_error(&request->fence, -EIO);

3248
	i915_request_submit(request);
3249 3250
}

3251
static void nop_complete_submit_request(struct i915_request *request)
3252
{
3253 3254
	unsigned long flags;

3255 3256 3257
	GEM_TRACE("%s fence %llx:%d -> -EIO\n",
		  request->engine->name,
		  request->fence.context, request->fence.seqno);
3258
	dma_fence_set_error(&request->fence, -EIO);
3259

3260
	spin_lock_irqsave(&request->engine->timeline.lock, flags);
3261
	__i915_request_submit(request);
3262
	intel_engine_init_global_seqno(request->engine, request->global_seqno);
3263
	spin_unlock_irqrestore(&request->engine->timeline.lock, flags);
3264 3265
}

3266
void i915_gem_set_wedged(struct drm_i915_private *i915)
3267
{
3268 3269 3270
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

3271 3272
	GEM_TRACE("start\n");

3273
	if (GEM_SHOW_DEBUG()) {
3274 3275 3276 3277 3278 3279
		struct drm_printer p = drm_debug_printer(__func__);

		for_each_engine(engine, i915, id)
			intel_engine_dump(engine, &p, "%s\n", engine->name);
	}

3280 3281 3282
	set_bit(I915_WEDGED, &i915->gpu_error.flags);
	smp_mb__after_atomic();

3283 3284 3285 3286 3287
	/*
	 * First, stop submission to hw, but do not yet complete requests by
	 * rolling the global seqno forward (since this would complete requests
	 * for which we haven't set the fence error to EIO yet).
	 */
3288 3289
	for_each_engine(engine, i915, id) {
		i915_gem_reset_prepare_engine(engine);
3290

3291
		engine->submit_request = nop_submit_request;
3292
		engine->schedule = NULL;
3293
	}
3294
	i915->caps.scheduler = 0;
3295

3296 3297 3298
	/* Even if the GPU reset fails, it should still stop the engines */
	intel_gpu_reset(i915, ALL_ENGINES);

3299 3300 3301 3302
	/*
	 * Make sure no one is running the old callback before we proceed with
	 * cancelling requests and resetting the completion tracking. Otherwise
	 * we might submit a request to the hardware which never completes.
3303
	 */
3304
	synchronize_rcu();
3305

3306 3307 3308
	for_each_engine(engine, i915, id) {
		/* Mark all executing requests as skipped */
		engine->cancel_requests(engine);
3309

3310 3311 3312 3313 3314 3315 3316 3317 3318 3319 3320
		/*
		 * Only once we've force-cancelled all in-flight requests can we
		 * start to complete all requests.
		 */
		engine->submit_request = nop_complete_submit_request;
	}

	/*
	 * Make sure no request can slip through without getting completed by
	 * either this call here to intel_engine_init_global_seqno, or the one
	 * in nop_complete_submit_request.
3321
	 */
3322
	synchronize_rcu();
3323

3324 3325
	for_each_engine(engine, i915, id) {
		unsigned long flags;
3326

3327 3328
		/*
		 * Mark all pending requests as complete so that any concurrent
3329 3330 3331
		 * (lockless) lookup doesn't try and wait upon the request as we
		 * reset it.
		 */
3332
		spin_lock_irqsave(&engine->timeline.lock, flags);
3333 3334
		intel_engine_init_global_seqno(engine,
					       intel_engine_last_submit(engine));
3335
		spin_unlock_irqrestore(&engine->timeline.lock, flags);
3336 3337

		i915_gem_reset_finish_engine(engine);
3338
	}
3339

3340 3341
	GEM_TRACE("end\n");

3342
	wake_up_all(&i915->gpu_error.reset_queue);
3343 3344
}

3345 3346
bool i915_gem_unset_wedged(struct drm_i915_private *i915)
{
3347
	struct i915_timeline *tl;
3348 3349 3350 3351 3352

	lockdep_assert_held(&i915->drm.struct_mutex);
	if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
		return true;

3353 3354
	GEM_TRACE("start\n");

3355 3356
	/*
	 * Before unwedging, make sure that all pending operations
3357 3358 3359 3360 3361 3362 3363 3364 3365
	 * are flushed and errored out - we may have requests waiting upon
	 * third party fences. We marked all inflight requests as EIO, and
	 * every execbuf since returned EIO, for consistency we want all
	 * the currently pending requests to also be marked as EIO, which
	 * is done inside our nop_submit_request - and so we must wait.
	 *
	 * No more can be submitted until we reset the wedged bit.
	 */
	list_for_each_entry(tl, &i915->gt.timelines, link) {
3366
		struct i915_request *rq;
3367

3368 3369 3370 3371
		rq = i915_gem_active_peek(&tl->last_request,
					  &i915->drm.struct_mutex);
		if (!rq)
			continue;
3372

3373 3374 3375 3376 3377 3378 3379 3380 3381 3382 3383 3384 3385 3386
		/*
		 * We can't use our normal waiter as we want to
		 * avoid recursively trying to handle the current
		 * reset. The basic dma_fence_default_wait() installs
		 * a callback for dma_fence_signal(), which is
		 * triggered by our nop handler (indirectly, the
		 * callback enables the signaler thread which is
		 * woken by the nop_submit_request() advancing the seqno
		 * and when the seqno passes the fence, the signaler
		 * then signals the fence waking us up).
		 */
		if (dma_fence_default_wait(&rq->fence, true,
					   MAX_SCHEDULE_TIMEOUT) < 0)
			return false;
3387
	}
3388 3389
	i915_retire_requests(i915);
	GEM_BUG_ON(i915->gt.active_requests);
3390

3391 3392
	/*
	 * Undo nop_submit_request. We prevent all new i915 requests from
3393 3394 3395 3396 3397 3398 3399 3400
	 * being queued (by disallowing execbuf whilst wedged) so having
	 * waited for all active requests above, we know the system is idle
	 * and do not have to worry about a thread being inside
	 * engine->submit_request() as we swap over. So unlike installing
	 * the nop_submit_request on reset, we can do this from normal
	 * context and do not require stop_machine().
	 */
	intel_engines_reset_default_submission(i915);
3401
	i915_gem_contexts_lost(i915);
3402

3403 3404
	GEM_TRACE("end\n");

3405 3406 3407 3408 3409 3410
	smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
	clear_bit(I915_WEDGED, &i915->gpu_error.flags);

	return true;
}

3411
static void
3412 3413
i915_gem_retire_work_handler(struct work_struct *work)
{
3414
	struct drm_i915_private *dev_priv =
3415
		container_of(work, typeof(*dev_priv), gt.retire_work.work);
3416
	struct drm_device *dev = &dev_priv->drm;
3417

3418
	/* Come back later if the device is busy... */
3419
	if (mutex_trylock(&dev->struct_mutex)) {
3420
		i915_retire_requests(dev_priv);
3421
		mutex_unlock(&dev->struct_mutex);
3422
	}
3423

3424 3425
	/*
	 * Keep the retire handler running until we are finally idle.
3426 3427 3428
	 * We do not need to do this test under locking as in the worst-case
	 * we queue the retire worker once too often.
	 */
3429
	if (READ_ONCE(dev_priv->gt.awake))
3430 3431
		queue_delayed_work(dev_priv->wq,
				   &dev_priv->gt.retire_work,
3432
				   round_jiffies_up_relative(HZ));
3433
}
3434

3435 3436 3437 3438 3439 3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450 3451 3452 3453 3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479 3480 3481 3482 3483 3484 3485 3486 3487 3488 3489 3490 3491 3492 3493
static void shrink_caches(struct drm_i915_private *i915)
{
	/*
	 * kmem_cache_shrink() discards empty slabs and reorders partially
	 * filled slabs to prioritise allocating from the mostly full slabs,
	 * with the aim of reducing fragmentation.
	 */
	kmem_cache_shrink(i915->priorities);
	kmem_cache_shrink(i915->dependencies);
	kmem_cache_shrink(i915->requests);
	kmem_cache_shrink(i915->luts);
	kmem_cache_shrink(i915->vmas);
	kmem_cache_shrink(i915->objects);
}

struct sleep_rcu_work {
	union {
		struct rcu_head rcu;
		struct work_struct work;
	};
	struct drm_i915_private *i915;
	unsigned int epoch;
};

static inline bool
same_epoch(struct drm_i915_private *i915, unsigned int epoch)
{
	/*
	 * There is a small chance that the epoch wrapped since we started
	 * sleeping. If we assume that epoch is at least a u32, then it will
	 * take at least 2^32 * 100ms for it to wrap, or about 326 years.
	 */
	return epoch == READ_ONCE(i915->gt.epoch);
}

static void __sleep_work(struct work_struct *work)
{
	struct sleep_rcu_work *s = container_of(work, typeof(*s), work);
	struct drm_i915_private *i915 = s->i915;
	unsigned int epoch = s->epoch;

	kfree(s);
	if (same_epoch(i915, epoch))
		shrink_caches(i915);
}

static void __sleep_rcu(struct rcu_head *rcu)
{
	struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
	struct drm_i915_private *i915 = s->i915;

	if (same_epoch(i915, s->epoch)) {
		INIT_WORK(&s->work, __sleep_work);
		queue_work(i915->wq, &s->work);
	} else {
		kfree(s);
	}
}

3494 3495 3496 3497 3498 3499 3500
static inline bool
new_requests_since_last_retire(const struct drm_i915_private *i915)
{
	return (READ_ONCE(i915->gt.active_requests) ||
		work_pending(&i915->gt.idle_work.work));
}

3501 3502 3503 3504
static void
i915_gem_idle_work_handler(struct work_struct *work)
{
	struct drm_i915_private *dev_priv =
3505
		container_of(work, typeof(*dev_priv), gt.idle_work.work);
3506
	unsigned int epoch = I915_EPOCH_INVALID;
3507 3508 3509 3510 3511
	bool rearm_hangcheck;

	if (!READ_ONCE(dev_priv->gt.awake))
		return;

3512 3513 3514 3515 3516 3517 3518 3519 3520 3521 3522 3523 3524 3525 3526 3527 3528 3529
	if (READ_ONCE(dev_priv->gt.active_requests))
		return;

	/*
	 * Flush out the last user context, leaving only the pinned
	 * kernel context resident. When we are idling on the kernel_context,
	 * no more new requests (with a context switch) are emitted and we
	 * can finally rest. A consequence is that the idle work handler is
	 * always called at least twice before idling (and if the system is
	 * idle that implies a round trip through the retire worker).
	 */
	mutex_lock(&dev_priv->drm.struct_mutex);
	i915_gem_switch_to_kernel_context(dev_priv);
	mutex_unlock(&dev_priv->drm.struct_mutex);

	GEM_TRACE("active_requests=%d (after switch-to-kernel-context)\n",
		  READ_ONCE(dev_priv->gt.active_requests));

3530 3531
	/*
	 * Wait for last execlists context complete, but bail out in case a
3532 3533 3534 3535 3536
	 * new request is submitted. As we don't trust the hardware, we
	 * continue on if the wait times out. This is necessary to allow
	 * the machine to suspend even if the hardware dies, and we will
	 * try to recover in resume (after depriving the hardware of power,
	 * it may be in a better mmod).
3537
	 */
3538 3539 3540 3541
	__wait_for(if (new_requests_since_last_retire(dev_priv)) return,
		   intel_engines_are_idle(dev_priv),
		   I915_IDLE_ENGINES_TIMEOUT * 1000,
		   10, 500);
3542 3543 3544 3545

	rearm_hangcheck =
		cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);

3546
	if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
3547 3548 3549 3550 3551 3552 3553
		/* Currently busy, come back later */
		mod_delayed_work(dev_priv->wq,
				 &dev_priv->gt.idle_work,
				 msecs_to_jiffies(50));
		goto out_rearm;
	}

3554 3555 3556 3557
	/*
	 * New request retired after this work handler started, extend active
	 * period until next instance of the work.
	 */
3558
	if (new_requests_since_last_retire(dev_priv))
3559
		goto out_unlock;
3560

3561
	epoch = __i915_gem_park(dev_priv);
3562

3563 3564
	rearm_hangcheck = false;
out_unlock:
3565
	mutex_unlock(&dev_priv->drm.struct_mutex);
3566

3567 3568 3569 3570
out_rearm:
	if (rearm_hangcheck) {
		GEM_BUG_ON(!dev_priv->gt.awake);
		i915_queue_hangcheck(dev_priv);
3571
	}
3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588

	/*
	 * When we are idle, it is an opportune time to reap our caches.
	 * However, we have many objects that utilise RCU and the ordered
	 * i915->wq that this work is executing on. To try and flush any
	 * pending frees now we are idle, we first wait for an RCU grace
	 * period, and then queue a task (that will run last on the wq) to
	 * shrink and re-optimize the caches.
	 */
	if (same_epoch(dev_priv, epoch)) {
		struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
		if (s) {
			s->i915 = dev_priv;
			s->epoch = epoch;
			call_rcu(&s->rcu, __sleep_rcu);
		}
	}
3589 3590
}

3591 3592
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
3593
	struct drm_i915_private *i915 = to_i915(gem->dev);
3594 3595
	struct drm_i915_gem_object *obj = to_intel_bo(gem);
	struct drm_i915_file_private *fpriv = file->driver_priv;
3596
	struct i915_lut_handle *lut, *ln;
3597

3598 3599 3600 3601 3602 3603
	mutex_lock(&i915->drm.struct_mutex);

	list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
		struct i915_gem_context *ctx = lut->ctx;
		struct i915_vma *vma;

3604
		GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF));
3605 3606 3607 3608
		if (ctx->file_priv != fpriv)
			continue;

		vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
3609 3610 3611 3612 3613 3614 3615
		GEM_BUG_ON(vma->obj != obj);

		/* We allow the process to have multiple handles to the same
		 * vma, in the same fd namespace, by virtue of flink/open.
		 */
		GEM_BUG_ON(!vma->open_count);
		if (!--vma->open_count && !i915_vma_is_ggtt(vma))
3616
			i915_vma_close(vma);
3617

3618 3619
		list_del(&lut->obj_link);
		list_del(&lut->ctx_link);
3620

3621 3622
		kmem_cache_free(i915->luts, lut);
		__i915_gem_object_release_unless_active(obj);
3623
	}
3624 3625

	mutex_unlock(&i915->drm.struct_mutex);
3626 3627
}

3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638
static unsigned long to_wait_timeout(s64 timeout_ns)
{
	if (timeout_ns < 0)
		return MAX_SCHEDULE_TIMEOUT;

	if (timeout_ns == 0)
		return 0;

	return nsecs_to_jiffies_timeout(timeout_ns);
}

3639 3640
/**
 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
3641 3642 3643
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
3644 3645 3646 3647 3648 3649 3650
 *
 * Returns 0 if successful, else an error is returned with the remaining time in
 * the timeout parameter.
 *  -ETIME: object is still busy after timeout
 *  -ERESTARTSYS: signal interrupted the wait
 *  -ENONENT: object doesn't exist
 * Also possible, but rare:
3651
 *  -EAGAIN: incomplete, restart syscall
3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667
 *  -ENOMEM: damn
 *  -ENODEV: Internal IRQ fail
 *  -E?: The add request failed
 *
 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
 * non-zero timeout parameter the wait ioctl will wait for the given number of
 * nanoseconds on an object becoming unbusy. Since the wait itself does so
 * without holding struct_mutex the object may become re-busied before this
 * function completes. A similar but shorter * race condition exists in the busy
 * ioctl
 */
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
	struct drm_i915_gem_wait *args = data;
	struct drm_i915_gem_object *obj;
3668 3669
	ktime_t start;
	long ret;
3670

3671 3672 3673
	if (args->flags != 0)
		return -EINVAL;

3674
	obj = i915_gem_object_lookup(file, args->bo_handle);
3675
	if (!obj)
3676 3677
		return -ENOENT;

3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688
	start = ktime_get();

	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
				   to_wait_timeout(args->timeout_ns),
				   to_rps_client(file));

	if (args->timeout_ns > 0) {
		args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
		if (args->timeout_ns < 0)
			args->timeout_ns = 0;
3689 3690 3691 3692 3693 3694 3695 3696 3697 3698

		/*
		 * Apparently ktime isn't accurate enough and occasionally has a
		 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
		 * things up to make the test happy. We allow up to 1 jiffy.
		 *
		 * This is a regression from the timespec->ktime conversion.
		 */
		if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
			args->timeout_ns = 0;
3699 3700 3701 3702

		/* Asked to wait beyond the jiffie/scheduler precision? */
		if (ret == -ETIME && args->timeout_ns)
			ret = -EAGAIN;
3703 3704
	}

C
Chris Wilson 已提交
3705
	i915_gem_object_put(obj);
3706
	return ret;
3707 3708
}

3709
static int wait_for_timeline(struct i915_timeline *tl, unsigned int flags)
3710
{
3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733
	struct i915_request *rq;
	long ret;

	rq = i915_gem_active_get_unlocked(&tl->last_request);
	if (!rq)
		return 0;

	/*
	 * "Race-to-idle".
	 *
	 * Switching to the kernel context is often used a synchronous
	 * step prior to idling, e.g. in suspend for flushing all
	 * current operations to memory before sleeping. These we
	 * want to complete as quickly as possible to avoid prolonged
	 * stalls, so allow the gpu to boost to maximum clocks.
	 */
	if (flags & I915_WAIT_FOR_IDLE_BOOST)
		gen6_rps_boost(rq, NULL);

	ret = i915_request_wait(rq, flags, MAX_SCHEDULE_TIMEOUT);
	i915_request_put(rq);

	return ret < 0 ? ret : 0;
3734 3735
}

3736 3737
static int wait_for_engines(struct drm_i915_private *i915)
{
3738
	if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
3739 3740
		dev_err(i915->drm.dev,
			"Failed to idle engines, declaring wedged!\n");
3741
		GEM_TRACE_DUMP();
3742 3743
		i915_gem_set_wedged(i915);
		return -EIO;
3744 3745 3746 3747 3748
	}

	return 0;
}

3749 3750
int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
{
3751 3752 3753
	GEM_TRACE("flags=%x (%s)\n",
		  flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked");

3754 3755 3756 3757
	/* If the device is asleep, we have no requests outstanding */
	if (!READ_ONCE(i915->gt.awake))
		return 0;

3758
	if (flags & I915_WAIT_LOCKED) {
3759 3760
		struct i915_timeline *tl;
		int err;
3761 3762 3763 3764

		lockdep_assert_held(&i915->drm.struct_mutex);

		list_for_each_entry(tl, &i915->gt.timelines, link) {
3765 3766 3767
			err = wait_for_timeline(tl, flags);
			if (err)
				return err;
3768
		}
3769
		i915_retire_requests(i915);
3770
		GEM_BUG_ON(i915->gt.active_requests);
3771

3772
		return wait_for_engines(i915);
3773
	} else {
3774 3775 3776
		struct intel_engine_cs *engine;
		enum intel_engine_id id;
		int err;
3777

3778 3779 3780 3781 3782 3783 3784 3785
		for_each_engine(engine, i915, id) {
			err = wait_for_timeline(&engine->timeline, flags);
			if (err)
				return err;
		}

		return 0;
	}
3786 3787
}

3788 3789
static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
{
3790 3791 3792 3793 3794 3795 3796
	/*
	 * We manually flush the CPU domain so that we can override and
	 * force the flush for the display, and perform it asyncrhonously.
	 */
	flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
	if (obj->cache_dirty)
		i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
3797
	obj->write_domain = 0;
3798 3799 3800 3801
}

void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
{
3802
	if (!READ_ONCE(obj->pin_global))
3803 3804 3805 3806 3807 3808 3809
		return;

	mutex_lock(&obj->base.dev->struct_mutex);
	__i915_gem_object_flush_for_display(obj);
	mutex_unlock(&obj->base.dev->struct_mutex);
}

3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833
/**
 * Moves a single object to the WC read, and possibly write domain.
 * @obj: object to act on
 * @write: ask for write access or read only
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
int
i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
{
	int ret;

	lockdep_assert_held(&obj->base.dev->struct_mutex);

	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   (write ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
	if (ret)
		return ret;

3834
	if (obj->write_domain == I915_GEM_DOMAIN_WC)
3835 3836 3837 3838 3839 3840 3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854
		return 0;

	/* Flush and acquire obj->pages so that we are coherent through
	 * direct access in memory with previous cached writes through
	 * shmemfs and that our cache domain tracking remains valid.
	 * For example, if the obj->filp was moved to swap without us
	 * being notified and releasing the pages, we would mistakenly
	 * continue to assume that the obj remained out of the CPU cached
	 * domain.
	 */
	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		return ret;

	flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);

	/* Serialise direct access to this object with the barriers for
	 * coherent writes from the GPU, by effectively invalidating the
	 * WC domain upon first access.
	 */
3855
	if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0)
3856 3857 3858 3859 3860
		mb();

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3861 3862
	GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0);
	obj->read_domains |= I915_GEM_DOMAIN_WC;
3863
	if (write) {
3864 3865
		obj->read_domains = I915_GEM_DOMAIN_WC;
		obj->write_domain = I915_GEM_DOMAIN_WC;
3866 3867 3868 3869 3870 3871 3872
		obj->mm.dirty = true;
	}

	i915_gem_object_unpin_pages(obj);
	return 0;
}

3873 3874
/**
 * Moves a single object to the GTT read, and possibly write domain.
3875 3876
 * @obj: object to act on
 * @write: ask for write access or read only
3877 3878 3879 3880
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
J
Jesse Barnes 已提交
3881
int
3882
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3883
{
3884
	int ret;
3885

3886
	lockdep_assert_held(&obj->base.dev->struct_mutex);
3887

3888 3889 3890 3891 3892 3893
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   (write ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
3894 3895 3896
	if (ret)
		return ret;

3897
	if (obj->write_domain == I915_GEM_DOMAIN_GTT)
3898 3899
		return 0;

3900 3901 3902 3903 3904 3905 3906 3907
	/* Flush and acquire obj->pages so that we are coherent through
	 * direct access in memory with previous cached writes through
	 * shmemfs and that our cache domain tracking remains valid.
	 * For example, if the obj->filp was moved to swap without us
	 * being notified and releasing the pages, we would mistakenly
	 * continue to assume that the obj remained out of the CPU cached
	 * domain.
	 */
C
Chris Wilson 已提交
3908
	ret = i915_gem_object_pin_pages(obj);
3909 3910 3911
	if (ret)
		return ret;

3912
	flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
C
Chris Wilson 已提交
3913

3914 3915 3916 3917
	/* Serialise direct access to this object with the barriers for
	 * coherent writes from the GPU, by effectively invalidating the
	 * GTT domain upon first access.
	 */
3918
	if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0)
3919 3920
		mb();

3921 3922 3923
	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3924 3925
	GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
	obj->read_domains |= I915_GEM_DOMAIN_GTT;
3926
	if (write) {
3927 3928
		obj->read_domains = I915_GEM_DOMAIN_GTT;
		obj->write_domain = I915_GEM_DOMAIN_GTT;
C
Chris Wilson 已提交
3929
		obj->mm.dirty = true;
3930 3931
	}

C
Chris Wilson 已提交
3932
	i915_gem_object_unpin_pages(obj);
3933 3934 3935
	return 0;
}

3936 3937
/**
 * Changes the cache-level of an object across all VMA.
3938 3939
 * @obj: object to act on
 * @cache_level: new cache level to set for the object
3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950
 *
 * After this function returns, the object will be in the new cache-level
 * across all GTT and the contents of the backing storage will be coherent,
 * with respect to the new cache-level. In order to keep the backing storage
 * coherent for all users, we only allow a single cache level to be set
 * globally on the object and prevent it from being changed whilst the
 * hardware is reading from the object. That is if the object is currently
 * on the scanout it will be set to uncached (or equivalent display
 * cache coherency) and all non-MOCS GPU access will also be uncached so
 * that all direct access to the scanout remains coherent.
 */
3951 3952 3953
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
				    enum i915_cache_level cache_level)
{
3954
	struct i915_vma *vma;
3955
	int ret;
3956

3957 3958
	lockdep_assert_held(&obj->base.dev->struct_mutex);

3959
	if (obj->cache_level == cache_level)
3960
		return 0;
3961

3962 3963 3964 3965 3966
	/* Inspect the list of currently bound VMA and unbind any that would
	 * be invalid given the new cache-level. This is principally to
	 * catch the issue of the CS prefetch crossing page boundaries and
	 * reading an invalid PTE on older architectures.
	 */
3967 3968
restart:
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
3969 3970 3971
		if (!drm_mm_node_allocated(&vma->node))
			continue;

3972
		if (i915_vma_is_pinned(vma)) {
3973 3974 3975 3976
			DRM_DEBUG("can not change the cache level of pinned objects\n");
			return -EBUSY;
		}

3977 3978
		if (!i915_vma_is_closed(vma) &&
		    i915_gem_valid_gtt_space(vma, cache_level))
3979 3980 3981 3982 3983 3984 3985 3986 3987 3988 3989
			continue;

		ret = i915_vma_unbind(vma);
		if (ret)
			return ret;

		/* As unbinding may affect other elements in the
		 * obj->vma_list (due to side-effects from retiring
		 * an active vma), play safe and restart the iterator.
		 */
		goto restart;
3990 3991
	}

3992 3993 3994 3995 3996 3997 3998
	/* We can reuse the existing drm_mm nodes but need to change the
	 * cache-level on the PTE. We could simply unbind them all and
	 * rebind with the correct cache-level on next use. However since
	 * we already have a valid slot, dma mapping, pages etc, we may as
	 * rewrite the PTE in the belief that doing so tramples upon less
	 * state and so involves less work.
	 */
3999
	if (obj->bind_count) {
4000 4001 4002 4003
		/* Before we change the PTE, the GPU must not be accessing it.
		 * If we wait upon the object, we know that all the bound
		 * VMA are no longer active.
		 */
4004 4005 4006 4007 4008 4009
		ret = i915_gem_object_wait(obj,
					   I915_WAIT_INTERRUPTIBLE |
					   I915_WAIT_LOCKED |
					   I915_WAIT_ALL,
					   MAX_SCHEDULE_TIMEOUT,
					   NULL);
4010 4011 4012
		if (ret)
			return ret;

4013 4014
		if (!HAS_LLC(to_i915(obj->base.dev)) &&
		    cache_level != I915_CACHE_NONE) {
4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030
			/* Access to snoopable pages through the GTT is
			 * incoherent and on some machines causes a hard
			 * lockup. Relinquish the CPU mmaping to force
			 * userspace to refault in the pages and we can
			 * then double check if the GTT mapping is still
			 * valid for that pointer access.
			 */
			i915_gem_release_mmap(obj);

			/* As we no longer need a fence for GTT access,
			 * we can relinquish it now (and so prevent having
			 * to steal a fence from someone else on the next
			 * fence request). Note GPU activity would have
			 * dropped the fence as all snoopable access is
			 * supposed to be linear.
			 */
4031
			for_each_ggtt_vma(vma, obj) {
4032 4033 4034 4035
				ret = i915_vma_put_fence(vma);
				if (ret)
					return ret;
			}
4036 4037 4038 4039 4040 4041 4042 4043
		} else {
			/* We either have incoherent backing store and
			 * so no GTT access or the architecture is fully
			 * coherent. In such cases, existing GTT mmaps
			 * ignore the cache bit in the PTE and we can
			 * rewrite it without confusing the GPU or having
			 * to force userspace to fault back in its mmaps.
			 */
4044 4045
		}

4046
		list_for_each_entry(vma, &obj->vma_list, obj_link) {
4047 4048 4049 4050 4051 4052 4053
			if (!drm_mm_node_allocated(&vma->node))
				continue;

			ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
			if (ret)
				return ret;
		}
4054 4055
	}

4056
	list_for_each_entry(vma, &obj->vma_list, obj_link)
4057
		vma->node.color = cache_level;
4058
	i915_gem_object_set_cache_coherency(obj, cache_level);
4059
	obj->cache_dirty = true; /* Always invalidate stale cachelines */
4060

4061 4062 4063
	return 0;
}

B
Ben Widawsky 已提交
4064 4065
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file)
4066
{
B
Ben Widawsky 已提交
4067
	struct drm_i915_gem_caching *args = data;
4068
	struct drm_i915_gem_object *obj;
4069
	int err = 0;
4070

4071 4072 4073 4074 4075 4076
	rcu_read_lock();
	obj = i915_gem_object_lookup_rcu(file, args->handle);
	if (!obj) {
		err = -ENOENT;
		goto out;
	}
4077

4078 4079 4080 4081 4082 4083
	switch (obj->cache_level) {
	case I915_CACHE_LLC:
	case I915_CACHE_L3_LLC:
		args->caching = I915_CACHING_CACHED;
		break;

4084 4085 4086 4087
	case I915_CACHE_WT:
		args->caching = I915_CACHING_DISPLAY;
		break;

4088 4089 4090 4091
	default:
		args->caching = I915_CACHING_NONE;
		break;
	}
4092 4093 4094
out:
	rcu_read_unlock();
	return err;
4095 4096
}

B
Ben Widawsky 已提交
4097 4098
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file)
4099
{
4100
	struct drm_i915_private *i915 = to_i915(dev);
B
Ben Widawsky 已提交
4101
	struct drm_i915_gem_caching *args = data;
4102 4103
	struct drm_i915_gem_object *obj;
	enum i915_cache_level level;
4104
	int ret = 0;
4105

B
Ben Widawsky 已提交
4106 4107
	switch (args->caching) {
	case I915_CACHING_NONE:
4108 4109
		level = I915_CACHE_NONE;
		break;
B
Ben Widawsky 已提交
4110
	case I915_CACHING_CACHED:
4111 4112 4113 4114 4115 4116
		/*
		 * Due to a HW issue on BXT A stepping, GPU stores via a
		 * snooped mapping may leave stale data in a corresponding CPU
		 * cacheline, whereas normally such cachelines would get
		 * invalidated.
		 */
4117
		if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
4118 4119
			return -ENODEV;

4120 4121
		level = I915_CACHE_LLC;
		break;
4122
	case I915_CACHING_DISPLAY:
4123
		level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
4124
		break;
4125 4126 4127 4128
	default:
		return -EINVAL;
	}

4129 4130 4131 4132
	obj = i915_gem_object_lookup(file, args->handle);
	if (!obj)
		return -ENOENT;

T
Tina Zhang 已提交
4133 4134 4135 4136 4137 4138 4139 4140 4141
	/*
	 * The caching mode of proxy object is handled by its generator, and
	 * not allowed to be changed by userspace.
	 */
	if (i915_gem_object_is_proxy(obj)) {
		ret = -ENXIO;
		goto out;
	}

4142 4143 4144 4145 4146 4147 4148
	if (obj->cache_level == level)
		goto out;

	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE,
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
B
Ben Widawsky 已提交
4149
	if (ret)
4150
		goto out;
B
Ben Widawsky 已提交
4151

4152 4153 4154
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto out;
4155 4156 4157

	ret = i915_gem_object_set_cache_level(obj, level);
	mutex_unlock(&dev->struct_mutex);
4158 4159 4160

out:
	i915_gem_object_put(obj);
4161 4162 4163
	return ret;
}

4164
/*
4165 4166 4167 4168
 * Prepare buffer for display plane (scanout, cursors, etc). Can be called from
 * an uninterruptible phase (modesetting) and allows any flushes to be pipelined
 * (for pageflips). We only flush the caches while preparing the buffer for
 * display, the callers are responsible for frontbuffer flush.
4169
 */
C
Chris Wilson 已提交
4170
struct i915_vma *
4171 4172
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
				     u32 alignment,
4173 4174
				     const struct i915_ggtt_view *view,
				     unsigned int flags)
4175
{
C
Chris Wilson 已提交
4176
	struct i915_vma *vma;
4177 4178
	int ret;

4179 4180
	lockdep_assert_held(&obj->base.dev->struct_mutex);

4181
	/* Mark the global pin early so that we account for the
4182 4183
	 * display coherency whilst setting up the cache domains.
	 */
4184
	obj->pin_global++;
4185

4186 4187 4188 4189 4190 4191 4192 4193 4194
	/* The display engine is not coherent with the LLC cache on gen6.  As
	 * a result, we make sure that the pinning that is about to occur is
	 * done with uncached PTEs. This is lowest common denominator for all
	 * chipsets.
	 *
	 * However for gen6+, we could do better by using the GFDT bit instead
	 * of uncaching, which would allow us to flush all the LLC-cached data
	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
	 */
4195
	ret = i915_gem_object_set_cache_level(obj,
4196 4197
					      HAS_WT(to_i915(obj->base.dev)) ?
					      I915_CACHE_WT : I915_CACHE_NONE);
C
Chris Wilson 已提交
4198 4199
	if (ret) {
		vma = ERR_PTR(ret);
4200
		goto err_unpin_global;
C
Chris Wilson 已提交
4201
	}
4202

4203 4204
	/* As the user may map the buffer once pinned in the display plane
	 * (e.g. libkms for the bootup splash), we have to ensure that we
4205 4206 4207 4208
	 * always use map_and_fenceable for all scanout buffers. However,
	 * it may simply be too big to fit into mappable, in which case
	 * put it anyway and hope that userspace can cope (but always first
	 * try to preserve the existing ABI).
4209
	 */
4210
	vma = ERR_PTR(-ENOSPC);
4211 4212
	if ((flags & PIN_MAPPABLE) == 0 &&
	    (!view || view->type == I915_GGTT_VIEW_NORMAL))
4213
		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
4214 4215 4216 4217
					       flags |
					       PIN_MAPPABLE |
					       PIN_NONBLOCK);
	if (IS_ERR(vma))
4218
		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
C
Chris Wilson 已提交
4219
	if (IS_ERR(vma))
4220
		goto err_unpin_global;
4221

4222 4223
	vma->display_alignment = max_t(u64, vma->display_alignment, alignment);

4224
	__i915_gem_object_flush_for_display(obj);
4225

4226 4227 4228
	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
4229
	obj->read_domains |= I915_GEM_DOMAIN_GTT;
4230

C
Chris Wilson 已提交
4231
	return vma;
4232

4233 4234
err_unpin_global:
	obj->pin_global--;
C
Chris Wilson 已提交
4235
	return vma;
4236 4237 4238
}

void
C
Chris Wilson 已提交
4239
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
4240
{
4241
	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
4242

4243
	if (WARN_ON(vma->obj->pin_global == 0))
4244 4245
		return;

4246
	if (--vma->obj->pin_global == 0)
4247
		vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
4248

4249
	/* Bump the LRU to try and avoid premature eviction whilst flipping  */
4250
	i915_gem_object_bump_inactive_ggtt(vma->obj);
4251

C
Chris Wilson 已提交
4252
	i915_vma_unpin(vma);
4253 4254
}

4255 4256
/**
 * Moves a single object to the CPU read, and possibly write domain.
4257 4258
 * @obj: object to act on
 * @write: requesting write or read-only access
4259 4260 4261 4262
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
4263
int
4264
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
4265 4266 4267
{
	int ret;

4268
	lockdep_assert_held(&obj->base.dev->struct_mutex);
4269

4270 4271 4272 4273 4274 4275
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   (write ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
4276 4277 4278
	if (ret)
		return ret;

4279
	flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
4280

4281
	/* Flush the CPU cache if it's still invalid. */
4282
	if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
4283
		i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
4284
		obj->read_domains |= I915_GEM_DOMAIN_CPU;
4285 4286 4287 4288 4289
	}

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
4290
	GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU);
4291 4292 4293 4294

	/* If we're writing through the CPU, then the GPU read domains will
	 * need to be invalidated at next use.
	 */
4295 4296
	if (write)
		__start_cpu_write(obj);
4297 4298 4299 4300

	return 0;
}

4301 4302 4303
/* Throttle our rendering by waiting until the ring has completed our requests
 * emitted over 20 msec ago.
 *
4304 4305 4306 4307
 * Note that if we were to use the current jiffies each time around the loop,
 * we wouldn't escape the function with any frames outstanding if the time to
 * render a frame was over 20ms.
 *
4308 4309 4310
 * This should get us reasonable parallelism between CPU and GPU but also
 * relatively low latency when blocking on a particular request to finish.
 */
4311
static int
4312
i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4313
{
4314
	struct drm_i915_private *dev_priv = to_i915(dev);
4315
	struct drm_i915_file_private *file_priv = file->driver_priv;
4316
	unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
4317
	struct i915_request *request, *target = NULL;
4318
	long ret;
4319

4320 4321 4322
	/* ABI: return -EIO if already wedged */
	if (i915_terminally_wedged(&dev_priv->gpu_error))
		return -EIO;
4323

4324
	spin_lock(&file_priv->mm.lock);
4325
	list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
4326 4327
		if (time_after_eq(request->emitted_jiffies, recent_enough))
			break;
4328

4329 4330 4331 4332
		if (target) {
			list_del(&target->client_link);
			target->file_priv = NULL;
		}
4333

4334
		target = request;
4335
	}
4336
	if (target)
4337
		i915_request_get(target);
4338
	spin_unlock(&file_priv->mm.lock);
4339

4340
	if (target == NULL)
4341
		return 0;
4342

4343
	ret = i915_request_wait(target,
4344 4345
				I915_WAIT_INTERRUPTIBLE,
				MAX_SCHEDULE_TIMEOUT);
4346
	i915_request_put(target);
4347

4348
	return ret < 0 ? ret : 0;
4349 4350
}

C
Chris Wilson 已提交
4351
struct i915_vma *
4352 4353
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
			 const struct i915_ggtt_view *view,
4354
			 u64 size,
4355 4356
			 u64 alignment,
			 u64 flags)
4357
{
4358 4359
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
	struct i915_address_space *vm = &dev_priv->ggtt.base;
4360 4361
	struct i915_vma *vma;
	int ret;
4362

4363 4364
	lockdep_assert_held(&obj->base.dev->struct_mutex);

4365 4366
	if (flags & PIN_MAPPABLE &&
	    (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396
		/* If the required space is larger than the available
		 * aperture, we will not able to find a slot for the
		 * object and unbinding the object now will be in
		 * vain. Worse, doing so may cause us to ping-pong
		 * the object in and out of the Global GTT and
		 * waste a lot of cycles under the mutex.
		 */
		if (obj->base.size > dev_priv->ggtt.mappable_end)
			return ERR_PTR(-E2BIG);

		/* If NONBLOCK is set the caller is optimistically
		 * trying to cache the full object within the mappable
		 * aperture, and *must* have a fallback in place for
		 * situations where we cannot bind the object. We
		 * can be a little more lax here and use the fallback
		 * more often to avoid costly migrations of ourselves
		 * and other objects within the aperture.
		 *
		 * Half-the-aperture is used as a simple heuristic.
		 * More interesting would to do search for a free
		 * block prior to making the commitment to unbind.
		 * That caters for the self-harm case, and with a
		 * little more heuristics (e.g. NOFAULT, NOEVICT)
		 * we could try to minimise harm to others.
		 */
		if (flags & PIN_NONBLOCK &&
		    obj->base.size > dev_priv->ggtt.mappable_end / 2)
			return ERR_PTR(-ENOSPC);
	}

4397
	vma = i915_vma_instance(obj, vm, view);
4398
	if (unlikely(IS_ERR(vma)))
C
Chris Wilson 已提交
4399
		return vma;
4400 4401

	if (i915_vma_misplaced(vma, size, alignment, flags)) {
4402 4403 4404
		if (flags & PIN_NONBLOCK) {
			if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
				return ERR_PTR(-ENOSPC);
4405

4406
			if (flags & PIN_MAPPABLE &&
4407
			    vma->fence_size > dev_priv->ggtt.mappable_end / 2)
4408 4409 4410
				return ERR_PTR(-ENOSPC);
		}

4411 4412
		WARN(i915_vma_is_pinned(vma),
		     "bo is already pinned in ggtt with incorrect alignment:"
4413 4414 4415
		     " offset=%08x, req.alignment=%llx,"
		     " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
		     i915_ggtt_offset(vma), alignment,
4416
		     !!(flags & PIN_MAPPABLE),
4417
		     i915_vma_is_map_and_fenceable(vma));
4418 4419
		ret = i915_vma_unbind(vma);
		if (ret)
C
Chris Wilson 已提交
4420
			return ERR_PTR(ret);
4421 4422
	}

C
Chris Wilson 已提交
4423 4424 4425
	ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
	if (ret)
		return ERR_PTR(ret);
4426

C
Chris Wilson 已提交
4427
	return vma;
4428 4429
}

4430
static __always_inline unsigned int __busy_read_flag(unsigned int id)
4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444
{
	/* Note that we could alias engines in the execbuf API, but
	 * that would be very unwise as it prevents userspace from
	 * fine control over engine selection. Ahem.
	 *
	 * This should be something like EXEC_MAX_ENGINE instead of
	 * I915_NUM_ENGINES.
	 */
	BUILD_BUG_ON(I915_NUM_ENGINES > 16);
	return 0x10000 << id;
}

static __always_inline unsigned int __busy_write_id(unsigned int id)
{
4445 4446 4447 4448 4449 4450 4451 4452 4453
	/* The uABI guarantees an active writer is also amongst the read
	 * engines. This would be true if we accessed the activity tracking
	 * under the lock, but as we perform the lookup of the object and
	 * its activity locklessly we can not guarantee that the last_write
	 * being active implies that we have set the same engine flag from
	 * last_read - hence we always set both read and write busy for
	 * last_write.
	 */
	return id | __busy_read_flag(id);
4454 4455
}

4456
static __always_inline unsigned int
4457
__busy_set_if_active(const struct dma_fence *fence,
4458 4459
		     unsigned int (*flag)(unsigned int id))
{
4460
	struct i915_request *rq;
4461

4462 4463 4464 4465
	/* We have to check the current hw status of the fence as the uABI
	 * guarantees forward progress. We could rely on the idle worker
	 * to eventually flush us, but to minimise latency just ask the
	 * hardware.
4466
	 *
4467
	 * Note we only report on the status of native fences.
4468
	 */
4469 4470 4471 4472
	if (!dma_fence_is_i915(fence))
		return 0;

	/* opencode to_request() in order to avoid const warnings */
4473 4474
	rq = container_of(fence, struct i915_request, fence);
	if (i915_request_completed(rq))
4475 4476
		return 0;

4477
	return flag(rq->engine->uabi_id);
4478 4479
}

4480
static __always_inline unsigned int
4481
busy_check_reader(const struct dma_fence *fence)
4482
{
4483
	return __busy_set_if_active(fence, __busy_read_flag);
4484 4485
}

4486
static __always_inline unsigned int
4487
busy_check_writer(const struct dma_fence *fence)
4488
{
4489 4490 4491 4492
	if (!fence)
		return 0;

	return __busy_set_if_active(fence, __busy_write_id);
4493 4494
}

4495 4496
int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4497
		    struct drm_file *file)
4498 4499
{
	struct drm_i915_gem_busy *args = data;
4500
	struct drm_i915_gem_object *obj;
4501 4502
	struct reservation_object_list *list;
	unsigned int seq;
4503
	int err;
4504

4505
	err = -ENOENT;
4506 4507
	rcu_read_lock();
	obj = i915_gem_object_lookup_rcu(file, args->handle);
4508
	if (!obj)
4509
		goto out;
4510

4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521 4522 4523 4524 4525 4526 4527 4528
	/* A discrepancy here is that we do not report the status of
	 * non-i915 fences, i.e. even though we may report the object as idle,
	 * a call to set-domain may still stall waiting for foreign rendering.
	 * This also means that wait-ioctl may report an object as busy,
	 * where busy-ioctl considers it idle.
	 *
	 * We trade the ability to warn of foreign fences to report on which
	 * i915 engines are active for the object.
	 *
	 * Alternatively, we can trade that extra information on read/write
	 * activity with
	 *	args->busy =
	 *		!reservation_object_test_signaled_rcu(obj->resv, true);
	 * to report the overall busyness. This is what the wait-ioctl does.
	 *
	 */
retry:
	seq = raw_read_seqcount(&obj->resv->seq);
4529

4530 4531
	/* Translate the exclusive fence to the READ *and* WRITE engine */
	args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
4532

4533 4534 4535 4536
	/* Translate shared fences to READ set of engines */
	list = rcu_dereference(obj->resv->fence);
	if (list) {
		unsigned int shared_count = list->shared_count, i;
4537

4538 4539 4540 4541 4542 4543
		for (i = 0; i < shared_count; ++i) {
			struct dma_fence *fence =
				rcu_dereference(list->shared[i]);

			args->busy |= busy_check_reader(fence);
		}
4544
	}
4545

4546 4547 4548 4549
	if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
		goto retry;

	err = 0;
4550 4551 4552
out:
	rcu_read_unlock();
	return err;
4553 4554 4555 4556 4557 4558
}

int
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file_priv)
{
4559
	return i915_gem_ring_throttle(dev, file_priv);
4560 4561
}

4562 4563 4564 4565
int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
4566
	struct drm_i915_private *dev_priv = to_i915(dev);
4567
	struct drm_i915_gem_madvise *args = data;
4568
	struct drm_i915_gem_object *obj;
4569
	int err;
4570 4571 4572 4573 4574 4575 4576 4577 4578

	switch (args->madv) {
	case I915_MADV_DONTNEED:
	case I915_MADV_WILLNEED:
	    break;
	default:
	    return -EINVAL;
	}

4579
	obj = i915_gem_object_lookup(file_priv, args->handle);
4580 4581 4582 4583 4584 4585
	if (!obj)
		return -ENOENT;

	err = mutex_lock_interruptible(&obj->mm.lock);
	if (err)
		goto out;
4586

4587
	if (i915_gem_object_has_pages(obj) &&
4588
	    i915_gem_object_is_tiled(obj) &&
4589
	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4590 4591
		if (obj->mm.madv == I915_MADV_WILLNEED) {
			GEM_BUG_ON(!obj->mm.quirked);
C
Chris Wilson 已提交
4592
			__i915_gem_object_unpin_pages(obj);
4593 4594 4595
			obj->mm.quirked = false;
		}
		if (args->madv == I915_MADV_WILLNEED) {
4596
			GEM_BUG_ON(obj->mm.quirked);
C
Chris Wilson 已提交
4597
			__i915_gem_object_pin_pages(obj);
4598 4599
			obj->mm.quirked = true;
		}
4600 4601
	}

C
Chris Wilson 已提交
4602 4603
	if (obj->mm.madv != __I915_MADV_PURGED)
		obj->mm.madv = args->madv;
4604

C
Chris Wilson 已提交
4605
	/* if the object is no longer attached, discard its backing storage */
4606 4607
	if (obj->mm.madv == I915_MADV_DONTNEED &&
	    !i915_gem_object_has_pages(obj))
4608 4609
		i915_gem_object_truncate(obj);

C
Chris Wilson 已提交
4610
	args->retained = obj->mm.madv != __I915_MADV_PURGED;
4611
	mutex_unlock(&obj->mm.lock);
C
Chris Wilson 已提交
4612

4613
out:
4614
	i915_gem_object_put(obj);
4615
	return err;
4616 4617
}

4618
static void
4619
frontbuffer_retire(struct i915_gem_active *active, struct i915_request *request)
4620 4621 4622 4623
{
	struct drm_i915_gem_object *obj =
		container_of(active, typeof(*obj), frontbuffer_write);

4624
	intel_fb_obj_flush(obj, ORIGIN_CS);
4625 4626
}

4627 4628
void i915_gem_object_init(struct drm_i915_gem_object *obj,
			  const struct drm_i915_gem_object_ops *ops)
4629
{
4630 4631
	mutex_init(&obj->mm.lock);

B
Ben Widawsky 已提交
4632
	INIT_LIST_HEAD(&obj->vma_list);
4633
	INIT_LIST_HEAD(&obj->lut_list);
4634
	INIT_LIST_HEAD(&obj->batch_pool_link);
4635

4636 4637
	obj->ops = ops;

4638 4639 4640
	reservation_object_init(&obj->__builtin_resv);
	obj->resv = &obj->__builtin_resv;

4641
	obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
4642
	init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
C
Chris Wilson 已提交
4643 4644 4645 4646

	obj->mm.madv = I915_MADV_WILLNEED;
	INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
	mutex_init(&obj->mm.get_page.lock);
4647

4648
	i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
4649 4650
}

4651
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4652 4653
	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
		 I915_GEM_OBJECT_IS_SHRINKABLE,
4654

4655 4656
	.get_pages = i915_gem_object_get_pages_gtt,
	.put_pages = i915_gem_object_put_pages_gtt,
4657 4658

	.pwrite = i915_gem_object_pwrite_gtt,
4659 4660
};

M
Matthew Auld 已提交
4661 4662 4663 4664 4665 4666 4667 4668 4669 4670 4671 4672 4673 4674 4675 4676 4677 4678 4679 4680 4681 4682 4683 4684
static int i915_gem_object_create_shmem(struct drm_device *dev,
					struct drm_gem_object *obj,
					size_t size)
{
	struct drm_i915_private *i915 = to_i915(dev);
	unsigned long flags = VM_NORESERVE;
	struct file *filp;

	drm_gem_private_object_init(dev, obj, size);

	if (i915->mm.gemfs)
		filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
						 flags);
	else
		filp = shmem_file_setup("i915", size, flags);

	if (IS_ERR(filp))
		return PTR_ERR(filp);

	obj->filp = filp;

	return 0;
}

4685
struct drm_i915_gem_object *
4686
i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
4687
{
4688
	struct drm_i915_gem_object *obj;
4689
	struct address_space *mapping;
4690
	unsigned int cache_level;
D
Daniel Vetter 已提交
4691
	gfp_t mask;
4692
	int ret;
4693

4694 4695 4696 4697 4698
	/* There is a prevalence of the assumption that we fit the object's
	 * page count inside a 32bit _signed_ variable. Let's document this and
	 * catch if we ever need to fix it. In the meantime, if you do spot
	 * such a local variable, please consider fixing!
	 */
4699
	if (size >> PAGE_SHIFT > INT_MAX)
4700 4701 4702 4703 4704
		return ERR_PTR(-E2BIG);

	if (overflows_type(size, obj->base.size))
		return ERR_PTR(-E2BIG);

4705
	obj = i915_gem_object_alloc(dev_priv);
4706
	if (obj == NULL)
4707
		return ERR_PTR(-ENOMEM);
4708

M
Matthew Auld 已提交
4709
	ret = i915_gem_object_create_shmem(&dev_priv->drm, &obj->base, size);
4710 4711
	if (ret)
		goto fail;
4712

4713
	mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4714
	if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) {
4715 4716 4717 4718 4719
		/* 965gm cannot relocate objects above 4GiB. */
		mask &= ~__GFP_HIGHMEM;
		mask |= __GFP_DMA32;
	}

4720
	mapping = obj->base.filp->f_mapping;
4721
	mapping_set_gfp_mask(mapping, mask);
4722
	GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
4723

4724
	i915_gem_object_init(obj, &i915_gem_object_ops);
4725

4726 4727
	obj->write_domain = I915_GEM_DOMAIN_CPU;
	obj->read_domains = I915_GEM_DOMAIN_CPU;
4728

4729
	if (HAS_LLC(dev_priv))
4730
		/* On some devices, we can have the GPU use the LLC (the CPU
4731 4732 4733 4734 4735 4736 4737 4738 4739 4740 4741
		 * cache) for about a 10% performance improvement
		 * compared to uncached.  Graphics requests other than
		 * display scanout are coherent with the CPU in
		 * accessing this cache.  This means in this mode we
		 * don't need to clflush on the CPU side, and on the
		 * GPU side we only need to flush internal caches to
		 * get data visible to the CPU.
		 *
		 * However, we maintain the display planes as UC, and so
		 * need to rebind when first used as such.
		 */
4742 4743 4744
		cache_level = I915_CACHE_LLC;
	else
		cache_level = I915_CACHE_NONE;
4745

4746
	i915_gem_object_set_cache_coherency(obj, cache_level);
4747

4748 4749
	trace_i915_gem_object_create(obj);

4750
	return obj;
4751 4752 4753 4754

fail:
	i915_gem_object_free(obj);
	return ERR_PTR(ret);
4755 4756
}

4757 4758 4759 4760 4761 4762 4763 4764
static bool discard_backing_storage(struct drm_i915_gem_object *obj)
{
	/* If we are the last user of the backing storage (be it shmemfs
	 * pages or stolen etc), we know that the pages are going to be
	 * immediately released. In this case, we can then skip copying
	 * back the contents from the GPU.
	 */

C
Chris Wilson 已提交
4765
	if (obj->mm.madv != I915_MADV_WILLNEED)
4766 4767 4768 4769 4770 4771 4772 4773 4774 4775 4776 4777 4778 4779 4780
		return false;

	if (obj->base.filp == NULL)
		return true;

	/* At first glance, this looks racy, but then again so would be
	 * userspace racing mmap against close. However, the first external
	 * reference to the filp can only be obtained through the
	 * i915_gem_mmap_ioctl() which safeguards us against the user
	 * acquiring such a reference whilst we are in the middle of
	 * freeing the object.
	 */
	return atomic_long_read(&obj->base.filp->f_count) == 1;
}

4781 4782
static void __i915_gem_free_objects(struct drm_i915_private *i915,
				    struct llist_node *freed)
4783
{
4784
	struct drm_i915_gem_object *obj, *on;
4785

4786
	intel_runtime_pm_get(i915);
4787
	llist_for_each_entry_safe(obj, on, freed, freed) {
4788 4789 4790 4791
		struct i915_vma *vma, *vn;

		trace_i915_gem_object_destroy(obj);

4792 4793
		mutex_lock(&i915->drm.struct_mutex);

4794 4795 4796 4797 4798
		GEM_BUG_ON(i915_gem_object_is_active(obj));
		list_for_each_entry_safe(vma, vn,
					 &obj->vma_list, obj_link) {
			GEM_BUG_ON(i915_vma_is_active(vma));
			vma->flags &= ~I915_VMA_PIN_MASK;
4799
			i915_vma_destroy(vma);
4800
		}
4801 4802
		GEM_BUG_ON(!list_empty(&obj->vma_list));
		GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
4803

4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815
		/* This serializes freeing with the shrinker. Since the free
		 * is delayed, first by RCU then by the workqueue, we want the
		 * shrinker to be able to free pages of unreferenced objects,
		 * or else we may oom whilst there are plenty of deferred
		 * freed objects.
		 */
		if (i915_gem_object_has_pages(obj)) {
			spin_lock(&i915->mm.obj_lock);
			list_del_init(&obj->mm.link);
			spin_unlock(&i915->mm.obj_lock);
		}

4816
		mutex_unlock(&i915->drm.struct_mutex);
4817 4818

		GEM_BUG_ON(obj->bind_count);
4819
		GEM_BUG_ON(obj->userfault_count);
4820
		GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
4821
		GEM_BUG_ON(!list_empty(&obj->lut_list));
4822 4823 4824

		if (obj->ops->release)
			obj->ops->release(obj);
4825

4826 4827
		if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
			atomic_set(&obj->mm.pages_pin_count, 0);
4828
		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
4829
		GEM_BUG_ON(i915_gem_object_has_pages(obj));
4830 4831 4832 4833

		if (obj->base.import_attach)
			drm_prime_gem_destroy(&obj->base, NULL);

4834
		reservation_object_fini(&obj->__builtin_resv);
4835 4836 4837 4838 4839
		drm_gem_object_release(&obj->base);
		i915_gem_info_remove_obj(i915, obj->base.size);

		kfree(obj->bit_17);
		i915_gem_object_free(obj);
4840

4841 4842 4843
		GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
		atomic_dec(&i915->mm.free_count);

4844 4845
		if (on)
			cond_resched();
4846
	}
4847
	intel_runtime_pm_put(i915);
4848 4849 4850 4851 4852 4853
}

static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
{
	struct llist_node *freed;

4854 4855 4856 4857 4858 4859 4860 4861 4862 4863
	/* Free the oldest, most stale object to keep the free_list short */
	freed = NULL;
	if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
		/* Only one consumer of llist_del_first() allowed */
		spin_lock(&i915->mm.free_lock);
		freed = llist_del_first(&i915->mm.free_list);
		spin_unlock(&i915->mm.free_lock);
	}
	if (unlikely(freed)) {
		freed->next = NULL;
4864
		__i915_gem_free_objects(i915, freed);
4865
	}
4866 4867 4868 4869 4870 4871 4872
}

static void __i915_gem_free_work(struct work_struct *work)
{
	struct drm_i915_private *i915 =
		container_of(work, struct drm_i915_private, mm.free_work);
	struct llist_node *freed;
4873

4874 4875
	/*
	 * All file-owned VMA should have been released by this point through
4876 4877 4878 4879 4880 4881
	 * i915_gem_close_object(), or earlier by i915_gem_context_close().
	 * However, the object may also be bound into the global GTT (e.g.
	 * older GPUs without per-process support, or for direct access through
	 * the GTT either for the user or for scanout). Those VMA still need to
	 * unbound now.
	 */
4882

4883
	spin_lock(&i915->mm.free_lock);
4884
	while ((freed = llist_del_all(&i915->mm.free_list))) {
4885 4886
		spin_unlock(&i915->mm.free_lock);

4887
		__i915_gem_free_objects(i915, freed);
4888
		if (need_resched())
4889 4890 4891
			return;

		spin_lock(&i915->mm.free_lock);
4892
	}
4893
	spin_unlock(&i915->mm.free_lock);
4894
}
4895

4896 4897 4898 4899 4900 4901
static void __i915_gem_free_object_rcu(struct rcu_head *head)
{
	struct drm_i915_gem_object *obj =
		container_of(head, typeof(*obj), rcu);
	struct drm_i915_private *i915 = to_i915(obj->base.dev);

4902 4903 4904 4905 4906 4907 4908 4909 4910
	/*
	 * Since we require blocking on struct_mutex to unbind the freed
	 * object from the GPU before releasing resources back to the
	 * system, we can not do that directly from the RCU callback (which may
	 * be a softirq context), but must instead then defer that work onto a
	 * kthread. We use the RCU callback rather than move the freed object
	 * directly onto the work queue so that we can mix between using the
	 * worker and performing frees directly from subsequent allocations for
	 * crude but effective memory throttling.
4911 4912
	 */
	if (llist_add(&obj->freed, &i915->mm.free_list))
4913
		queue_work(i915->wq, &i915->mm.free_work);
4914
}
4915

4916 4917 4918
void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
C
Chris Wilson 已提交
4919

4920 4921 4922
	if (obj->mm.quirked)
		__i915_gem_object_unpin_pages(obj);

4923
	if (discard_backing_storage(obj))
C
Chris Wilson 已提交
4924
		obj->mm.madv = I915_MADV_DONTNEED;
4925

4926 4927
	/*
	 * Before we free the object, make sure any pure RCU-only
4928 4929 4930 4931
	 * read-side critical sections are complete, e.g.
	 * i915_gem_busy_ioctl(). For the corresponding synchronized
	 * lookup see i915_gem_object_lookup_rcu().
	 */
4932
	atomic_inc(&to_i915(obj->base.dev)->mm.free_count);
4933
	call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
4934 4935
}

4936 4937 4938 4939
void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
{
	lockdep_assert_held(&obj->base.dev->struct_mutex);

4940 4941
	if (!i915_gem_object_has_active_reference(obj) &&
	    i915_gem_object_is_active(obj))
4942 4943 4944 4945 4946
		i915_gem_object_set_active_reference(obj);
	else
		i915_gem_object_put(obj);
}

4947
static void assert_kernel_context_is_current(struct drm_i915_private *i915)
4948
{
4949
	struct i915_gem_context *kctx = i915->kernel_context;
4950 4951 4952
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

4953
	GEM_BUG_ON(i915->gt.active_requests);
4954
	for_each_engine(engine, i915, id) {
4955
		GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline.last_request));
4956
		GEM_BUG_ON(engine->last_retired_context->gem_context != kctx);
4957
	}
4958 4959
}

4960 4961
void i915_gem_sanitize(struct drm_i915_private *i915)
{
4962 4963 4964 4965 4966
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	GEM_TRACE("\n");

4967
	mutex_lock(&i915->drm.struct_mutex);
4968 4969 4970 4971 4972 4973 4974 4975 4976 4977

	intel_runtime_pm_get(i915);
	intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);

	/*
	 * As we have just resumed the machine and woken the device up from
	 * deep PCI sleep (presumably D3_cold), assume the HW has been reset
	 * back to defaults, recovering from whatever wedged state we left it
	 * in and so worth trying to use the device once more.
	 */
4978
	if (i915_terminally_wedged(&i915->gpu_error))
4979 4980
		i915_gem_unset_wedged(i915);

4981 4982 4983 4984 4985 4986
	/*
	 * If we inherit context state from the BIOS or earlier occupants
	 * of the GPU, the GPU may be in an inconsistent state when we
	 * try to take over. The only way to remove the earlier state
	 * is by resetting. However, resetting on earlier gen is tricky as
	 * it may impact the display and we are uncertain about the stability
4987
	 * of the reset, so this could be applied to even earlier gen.
4988
	 */
4989 4990
	if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915))
		WARN_ON(intel_gpu_reset(i915, ALL_ENGINES));
4991

4992 4993 4994 4995 4996 4997 4998 4999 5000
	/* Reset the submission backend after resume as well as the GPU reset */
	for_each_engine(engine, i915, id) {
		if (engine->reset.reset)
			engine->reset.reset(engine, NULL);
	}

	intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
	intel_runtime_pm_put(i915);

5001 5002
	i915_gem_contexts_lost(i915);
	mutex_unlock(&i915->drm.struct_mutex);
5003 5004
}

5005
int i915_gem_suspend(struct drm_i915_private *dev_priv)
5006
{
5007
	struct drm_device *dev = &dev_priv->drm;
5008
	int ret;
5009

5010 5011
	GEM_TRACE("\n");

5012
	intel_runtime_pm_get(dev_priv);
5013 5014
	intel_suspend_gt_powersave(dev_priv);

5015
	mutex_lock(&dev->struct_mutex);
5016 5017 5018 5019 5020 5021 5022 5023 5024

	/* We have to flush all the executing contexts to main memory so
	 * that they can saved in the hibernation image. To ensure the last
	 * context image is coherent, we have to switch away from it. That
	 * leaves the dev_priv->kernel_context still active when
	 * we actually suspend, and its image in memory may not match the GPU
	 * state. Fortunately, the kernel_context is disposable and we do
	 * not rely on its state.
	 */
5025 5026 5027 5028
	if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
		ret = i915_gem_switch_to_kernel_context(dev_priv);
		if (ret)
			goto err_unlock;
5029

5030 5031
		ret = i915_gem_wait_for_idle(dev_priv,
					     I915_WAIT_INTERRUPTIBLE |
5032 5033
					     I915_WAIT_LOCKED |
					     I915_WAIT_FOR_IDLE_BOOST);
5034 5035
		if (ret && ret != -EIO)
			goto err_unlock;
5036

5037 5038
		assert_kernel_context_is_current(dev_priv);
	}
5039 5040
	mutex_unlock(&dev->struct_mutex);

5041
	intel_uc_suspend(dev_priv);
5042

5043
	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
5044
	cancel_delayed_work_sync(&dev_priv->gt.retire_work);
5045 5046 5047 5048

	/* As the idle_work is rearming if it detects a race, play safe and
	 * repeat the flush until it is definitely idle.
	 */
5049
	drain_delayed_work(&dev_priv->gt.idle_work);
5050

5051 5052 5053
	/* Assert that we sucessfully flushed all the work and
	 * reset the GPU back to its idle, low power state.
	 */
5054
	WARN_ON(dev_priv->gt.awake);
5055 5056
	if (WARN_ON(!intel_engines_are_idle(dev_priv)))
		i915_gem_set_wedged(dev_priv); /* no hope, discard everything */
5057

5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076
	/*
	 * Neither the BIOS, ourselves or any other kernel
	 * expects the system to be in execlists mode on startup,
	 * so we need to reset the GPU back to legacy mode. And the only
	 * known way to disable logical contexts is through a GPU reset.
	 *
	 * So in order to leave the system in a known default configuration,
	 * always reset the GPU upon unload and suspend. Afterwards we then
	 * clean up the GEM state tracking, flushing off the requests and
	 * leaving the system in a known idle state.
	 *
	 * Note that is of the upmost importance that the GPU is idle and
	 * all stray writes are flushed *before* we dismantle the backing
	 * storage for the pinned objects.
	 *
	 * However, since we are uncertain that resetting the GPU on older
	 * machines is a good idea, we don't - just in case it leaves the
	 * machine in an unusable condition.
	 */
5077
	intel_uc_sanitize(dev_priv);
5078
	i915_gem_sanitize(dev_priv);
5079 5080 5081

	intel_runtime_pm_put(dev_priv);
	return 0;
5082

5083
err_unlock:
5084
	mutex_unlock(&dev->struct_mutex);
5085
	intel_runtime_pm_put(dev_priv);
5086
	return ret;
5087 5088
}

5089
void i915_gem_resume(struct drm_i915_private *i915)
5090
{
5091 5092
	GEM_TRACE("\n");

5093
	WARN_ON(i915->gt.awake);
5094

5095 5096
	mutex_lock(&i915->drm.struct_mutex);
	intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
5097

5098 5099
	i915_gem_restore_gtt_mappings(i915);
	i915_gem_restore_fences(i915);
5100

5101 5102
	/*
	 * As we didn't flush the kernel context before suspend, we cannot
5103 5104 5105
	 * guarantee that the context image is complete. So let's just reset
	 * it and start again.
	 */
5106
	i915->gt.resume(i915);
5107

5108 5109 5110
	if (i915_gem_init_hw(i915))
		goto err_wedged;

5111
	intel_uc_resume(i915);
5112

5113 5114 5115 5116 5117 5118 5119 5120 5121 5122
	/* Always reload a context for powersaving. */
	if (i915_gem_switch_to_kernel_context(i915))
		goto err_wedged;

out_unlock:
	intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
	mutex_unlock(&i915->drm.struct_mutex);
	return;

err_wedged:
5123 5124 5125 5126
	if (!i915_terminally_wedged(&i915->gpu_error)) {
		DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
		i915_gem_set_wedged(i915);
	}
5127
	goto out_unlock;
5128 5129
}

5130
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
5131
{
5132
	if (INTEL_GEN(dev_priv) < 5 ||
5133 5134 5135 5136 5137 5138
	    dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
		return;

	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
				 DISP_TILE_SURFACE_SWIZZLING);

5139
	if (IS_GEN5(dev_priv))
5140 5141
		return;

5142
	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
5143
	if (IS_GEN6(dev_priv))
5144
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
5145
	else if (IS_GEN7(dev_priv))
5146
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
5147
	else if (IS_GEN8(dev_priv))
B
Ben Widawsky 已提交
5148
		I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
5149 5150
	else
		BUG();
5151
}
D
Daniel Vetter 已提交
5152

5153
static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
5154 5155 5156 5157 5158 5159 5160
{
	I915_WRITE(RING_CTL(base), 0);
	I915_WRITE(RING_HEAD(base), 0);
	I915_WRITE(RING_TAIL(base), 0);
	I915_WRITE(RING_START(base), 0);
}

5161
static void init_unused_rings(struct drm_i915_private *dev_priv)
5162
{
5163 5164 5165 5166 5167 5168 5169 5170 5171 5172 5173 5174
	if (IS_I830(dev_priv)) {
		init_unused_ring(dev_priv, PRB1_BASE);
		init_unused_ring(dev_priv, SRB0_BASE);
		init_unused_ring(dev_priv, SRB1_BASE);
		init_unused_ring(dev_priv, SRB2_BASE);
		init_unused_ring(dev_priv, SRB3_BASE);
	} else if (IS_GEN2(dev_priv)) {
		init_unused_ring(dev_priv, SRB0_BASE);
		init_unused_ring(dev_priv, SRB1_BASE);
	} else if (IS_GEN3(dev_priv)) {
		init_unused_ring(dev_priv, PRB1_BASE);
		init_unused_ring(dev_priv, PRB2_BASE);
5175 5176 5177
	}
}

5178
static int __i915_gem_restart_engines(void *data)
5179
{
5180
	struct drm_i915_private *i915 = data;
5181
	struct intel_engine_cs *engine;
5182
	enum intel_engine_id id;
5183 5184 5185 5186
	int err;

	for_each_engine(engine, i915, id) {
		err = engine->init_hw(engine);
5187 5188 5189
		if (err) {
			DRM_ERROR("Failed to restart %s (%d)\n",
				  engine->name, err);
5190
			return err;
5191
		}
5192 5193 5194 5195 5196 5197 5198
	}

	return 0;
}

int i915_gem_init_hw(struct drm_i915_private *dev_priv)
{
C
Chris Wilson 已提交
5199
	int ret;
5200

5201 5202
	dev_priv->gt.last_init_time = ktime_get();

5203 5204 5205
	/* Double layer security blanket, see i915_gem_init() */
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

5206
	if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
5207
		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
5208

5209
	if (IS_HASWELL(dev_priv))
5210
		I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
5211
			   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
5212

5213
	if (HAS_PCH_NOP(dev_priv)) {
5214
		if (IS_IVYBRIDGE(dev_priv)) {
5215 5216 5217
			u32 temp = I915_READ(GEN7_MSG_CTL);
			temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
			I915_WRITE(GEN7_MSG_CTL, temp);
5218
		} else if (INTEL_GEN(dev_priv) >= 7) {
5219 5220 5221 5222
			u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
			temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
			I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
		}
5223 5224
	}

5225 5226
	intel_gt_workarounds_apply(dev_priv);

5227
	i915_gem_init_swizzling(dev_priv);
5228

5229 5230 5231 5232 5233 5234
	/*
	 * At least 830 can leave some of the unused rings
	 * "active" (ie. head != tail) after resume which
	 * will prevent c3 entry. Makes sure all unused rings
	 * are totally idle.
	 */
5235
	init_unused_rings(dev_priv);
5236

5237
	BUG_ON(!dev_priv->kernel_context);
5238 5239 5240 5241
	if (i915_terminally_wedged(&dev_priv->gpu_error)) {
		ret = -EIO;
		goto out;
	}
5242

5243
	ret = i915_ppgtt_init_hw(dev_priv);
5244
	if (ret) {
5245
		DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
5246 5247 5248
		goto out;
	}

5249 5250 5251 5252 5253 5254
	ret = intel_wopcm_init_hw(&dev_priv->wopcm);
	if (ret) {
		DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
		goto out;
	}

5255 5256
	/* We can't enable contexts until all firmware is loaded */
	ret = intel_uc_init_hw(dev_priv);
5257 5258
	if (ret) {
		DRM_ERROR("Enabling uc failed (%d)\n", ret);
5259
		goto out;
5260
	}
5261

5262
	intel_mocs_init_l3cc_table(dev_priv);
5263

5264 5265
	/* Only when the HW is re-initialised, can we replay the requests */
	ret = __i915_gem_restart_engines(dev_priv);
5266 5267
out:
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5268
	return ret;
5269 5270
}

5271 5272 5273 5274 5275 5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291
static int __intel_engines_record_defaults(struct drm_i915_private *i915)
{
	struct i915_gem_context *ctx;
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	int err;

	/*
	 * As we reset the gpu during very early sanitisation, the current
	 * register state on the GPU should reflect its defaults values.
	 * We load a context onto the hw (with restore-inhibit), then switch
	 * over to a second context to save that default register state. We
	 * can then prime every new context with that state so they all start
	 * from the same default HW values.
	 */

	ctx = i915_gem_context_create_kernel(i915, 0);
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);

	for_each_engine(engine, i915, id) {
5292
		struct i915_request *rq;
5293

5294
		rq = i915_request_alloc(engine, ctx);
5295 5296 5297 5298 5299
		if (IS_ERR(rq)) {
			err = PTR_ERR(rq);
			goto out_ctx;
		}

5300
		err = 0;
5301 5302 5303
		if (engine->init_context)
			err = engine->init_context(rq);

5304
		__i915_request_add(rq, true);
5305 5306 5307 5308 5309 5310 5311 5312 5313 5314 5315 5316 5317 5318 5319 5320 5321
		if (err)
			goto err_active;
	}

	err = i915_gem_switch_to_kernel_context(i915);
	if (err)
		goto err_active;

	err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
	if (err)
		goto err_active;

	assert_kernel_context_is_current(i915);

	for_each_engine(engine, i915, id) {
		struct i915_vma *state;

5322
		state = to_intel_context(ctx, engine)->state;
5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334 5335 5336 5337 5338 5339 5340 5341 5342 5343 5344 5345 5346 5347 5348 5349 5350 5351 5352 5353 5354 5355 5356 5357 5358 5359 5360 5361 5362 5363 5364 5365 5366 5367 5368 5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379 5380 5381 5382 5383 5384
		if (!state)
			continue;

		/*
		 * As we will hold a reference to the logical state, it will
		 * not be torn down with the context, and importantly the
		 * object will hold onto its vma (making it possible for a
		 * stray GTT write to corrupt our defaults). Unmap the vma
		 * from the GTT to prevent such accidents and reclaim the
		 * space.
		 */
		err = i915_vma_unbind(state);
		if (err)
			goto err_active;

		err = i915_gem_object_set_to_cpu_domain(state->obj, false);
		if (err)
			goto err_active;

		engine->default_state = i915_gem_object_get(state->obj);
	}

	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
		unsigned int found = intel_engines_has_context_isolation(i915);

		/*
		 * Make sure that classes with multiple engine instances all
		 * share the same basic configuration.
		 */
		for_each_engine(engine, i915, id) {
			unsigned int bit = BIT(engine->uabi_class);
			unsigned int expected = engine->default_state ? bit : 0;

			if ((found & bit) != expected) {
				DRM_ERROR("mismatching default context state for class %d on engine %s\n",
					  engine->uabi_class, engine->name);
			}
		}
	}

out_ctx:
	i915_gem_context_set_closed(ctx);
	i915_gem_context_put(ctx);
	return err;

err_active:
	/*
	 * If we have to abandon now, we expect the engines to be idle
	 * and ready to be torn-down. First try to flush any remaining
	 * request, ensure we are pointing at the kernel context and
	 * then remove it.
	 */
	if (WARN_ON(i915_gem_switch_to_kernel_context(i915)))
		goto out_ctx;

	if (WARN_ON(i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED)))
		goto out_ctx;

	i915_gem_contexts_lost(i915);
	goto out_ctx;
}

5385
int i915_gem_init(struct drm_i915_private *dev_priv)
5386 5387 5388
{
	int ret;

5389 5390 5391 5392 5393 5394 5395 5396 5397
	/*
	 * We need to fallback to 4K pages since gvt gtt handling doesn't
	 * support huge page entries - we will need to check either hypervisor
	 * mm can support huge guest page or just do emulation in gvt.
	 */
	if (intel_vgpu_active(dev_priv))
		mkwrite_device_info(dev_priv)->page_sizes =
			I915_GTT_PAGE_SIZE_4K;

5398
	dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
5399

5400
	if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
5401
		dev_priv->gt.resume = intel_lr_context_resume;
5402
		dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
5403 5404 5405
	} else {
		dev_priv->gt.resume = intel_legacy_submission_resume;
		dev_priv->gt.cleanup_engine = intel_engine_cleanup;
5406 5407
	}

5408 5409 5410 5411
	ret = i915_gem_init_userptr(dev_priv);
	if (ret)
		return ret;

5412 5413 5414 5415
	ret = intel_wopcm_init(&dev_priv->wopcm);
	if (ret)
		return ret;

5416
	ret = intel_uc_init_misc(dev_priv);
5417 5418 5419
	if (ret)
		return ret;

5420 5421 5422 5423 5424 5425
	/* This is just a security blanket to placate dragons.
	 * On some systems, we very sporadically observe that the first TLBs
	 * used by the CS may be stale, despite us poking the TLB reset. If
	 * we hold the forcewake during initialisation these problems
	 * just magically go away.
	 */
5426
	mutex_lock(&dev_priv->drm.struct_mutex);
5427 5428
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

5429
	ret = i915_gem_init_ggtt(dev_priv);
5430 5431 5432 5433
	if (ret) {
		GEM_BUG_ON(ret == -EIO);
		goto err_unlock;
	}
5434

5435
	ret = i915_gem_contexts_init(dev_priv);
5436 5437 5438 5439
	if (ret) {
		GEM_BUG_ON(ret == -EIO);
		goto err_ggtt;
	}
5440

5441
	ret = intel_engines_init(dev_priv);
5442 5443 5444 5445
	if (ret) {
		GEM_BUG_ON(ret == -EIO);
		goto err_context;
	}
5446

5447 5448
	intel_init_gt_powersave(dev_priv);

5449
	ret = intel_uc_init(dev_priv);
5450
	if (ret)
5451
		goto err_pm;
5452

5453 5454 5455 5456
	ret = i915_gem_init_hw(dev_priv);
	if (ret)
		goto err_uc_init;

5457 5458 5459 5460 5461 5462 5463 5464 5465 5466 5467
	/*
	 * Despite its name intel_init_clock_gating applies both display
	 * clock gating workarounds; GT mmio workarounds and the occasional
	 * GT power context workaround. Worse, sometimes it includes a context
	 * register workaround which we need to apply before we record the
	 * default HW state for all contexts.
	 *
	 * FIXME: break up the workarounds and apply them at the right time!
	 */
	intel_init_clock_gating(dev_priv);

5468
	ret = __intel_engines_record_defaults(dev_priv);
5469 5470 5471 5472 5473 5474 5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485 5486 5487 5488 5489 5490 5491 5492 5493 5494 5495 5496
	if (ret)
		goto err_init_hw;

	if (i915_inject_load_failure()) {
		ret = -ENODEV;
		goto err_init_hw;
	}

	if (i915_inject_load_failure()) {
		ret = -EIO;
		goto err_init_hw;
	}

	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
	mutex_unlock(&dev_priv->drm.struct_mutex);

	return 0;

	/*
	 * Unwinding is complicated by that we want to handle -EIO to mean
	 * disable GPU submission but keep KMS alive. We want to mark the
	 * HW as irrevisibly wedged, but keep enough state around that the
	 * driver doesn't explode during runtime.
	 */
err_init_hw:
	i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
	i915_gem_contexts_lost(dev_priv);
	intel_uc_fini_hw(dev_priv);
5497 5498
err_uc_init:
	intel_uc_fini(dev_priv);
5499 5500 5501 5502 5503 5504 5505 5506 5507 5508 5509 5510 5511
err_pm:
	if (ret != -EIO) {
		intel_cleanup_gt_powersave(dev_priv);
		i915_gem_cleanup_engines(dev_priv);
	}
err_context:
	if (ret != -EIO)
		i915_gem_contexts_fini(dev_priv);
err_ggtt:
err_unlock:
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
	mutex_unlock(&dev_priv->drm.struct_mutex);

5512
	intel_uc_fini_misc(dev_priv);
5513

5514 5515 5516
	if (ret != -EIO)
		i915_gem_cleanup_userptr(dev_priv);

5517
	if (ret == -EIO) {
5518 5519
		/*
		 * Allow engine initialisation to fail by marking the GPU as
5520 5521 5522
		 * wedged. But we only want to do this where the GPU is angry,
		 * for all other failure, such as an allocation failure, bail.
		 */
5523 5524 5525 5526
		if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
			DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
			i915_gem_set_wedged(dev_priv);
		}
5527
		ret = 0;
5528 5529
	}

5530
	i915_gem_drain_freed_objects(dev_priv);
5531
	return ret;
5532 5533
}

5534 5535 5536 5537 5538
void i915_gem_init_mmio(struct drm_i915_private *i915)
{
	i915_gem_sanitize(i915);
}

5539
void
5540
i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
5541
{
5542
	struct intel_engine_cs *engine;
5543
	enum intel_engine_id id;
5544

5545
	for_each_engine(engine, dev_priv, id)
5546
		dev_priv->gt.cleanup_engine(engine);
5547 5548
}

5549 5550 5551
void
i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
5552
	int i;
5553

5554
	if (INTEL_GEN(dev_priv) >= 7 && !IS_VALLEYVIEW(dev_priv) &&
5555 5556
	    !IS_CHERRYVIEW(dev_priv))
		dev_priv->num_fence_regs = 32;
5557
	else if (INTEL_GEN(dev_priv) >= 4 ||
5558 5559
		 IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
		 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
5560 5561 5562 5563
		dev_priv->num_fence_regs = 16;
	else
		dev_priv->num_fence_regs = 8;

5564
	if (intel_vgpu_active(dev_priv))
5565 5566 5567 5568
		dev_priv->num_fence_regs =
				I915_READ(vgtif_reg(avail_rs.fence_num));

	/* Initialize fence registers to zero */
5569 5570 5571 5572 5573 5574 5575
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
		struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];

		fence->i915 = dev_priv;
		fence->id = i;
		list_add_tail(&fence->link, &dev_priv->mm.fence_list);
	}
5576
	i915_gem_restore_fences(dev_priv);
5577

5578
	i915_gem_detect_bit_6_swizzle(dev_priv);
5579 5580
}

5581 5582 5583 5584 5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596
static void i915_gem_init__mm(struct drm_i915_private *i915)
{
	spin_lock_init(&i915->mm.object_stat_lock);
	spin_lock_init(&i915->mm.obj_lock);
	spin_lock_init(&i915->mm.free_lock);

	init_llist_head(&i915->mm.free_list);

	INIT_LIST_HEAD(&i915->mm.unbound_list);
	INIT_LIST_HEAD(&i915->mm.bound_list);
	INIT_LIST_HEAD(&i915->mm.fence_list);
	INIT_LIST_HEAD(&i915->mm.userfault_list);

	INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
}

5597
int i915_gem_init_early(struct drm_i915_private *dev_priv)
5598
{
5599
	int err = -ENOMEM;
5600

5601 5602
	dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
	if (!dev_priv->objects)
5603 5604
		goto err_out;

5605 5606
	dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
	if (!dev_priv->vmas)
5607 5608
		goto err_objects;

5609 5610 5611 5612
	dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0);
	if (!dev_priv->luts)
		goto err_vmas;

5613
	dev_priv->requests = KMEM_CACHE(i915_request,
5614 5615
					SLAB_HWCACHE_ALIGN |
					SLAB_RECLAIM_ACCOUNT |
5616
					SLAB_TYPESAFE_BY_RCU);
5617
	if (!dev_priv->requests)
5618
		goto err_luts;
5619

5620 5621 5622 5623 5624 5625
	dev_priv->dependencies = KMEM_CACHE(i915_dependency,
					    SLAB_HWCACHE_ALIGN |
					    SLAB_RECLAIM_ACCOUNT);
	if (!dev_priv->dependencies)
		goto err_requests;

5626 5627 5628 5629
	dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
	if (!dev_priv->priorities)
		goto err_dependencies;

5630
	INIT_LIST_HEAD(&dev_priv->gt.timelines);
5631
	INIT_LIST_HEAD(&dev_priv->gt.active_rings);
5632
	INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
5633

5634
	i915_gem_init__mm(dev_priv);
5635

5636
	INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
5637
			  i915_gem_retire_work_handler);
5638
	INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
5639
			  i915_gem_idle_work_handler);
5640
	init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
5641
	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
5642

5643 5644
	atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);

5645
	spin_lock_init(&dev_priv->fb_tracking.lock);
5646

M
Matthew Auld 已提交
5647 5648 5649 5650
	err = i915_gemfs_init(dev_priv);
	if (err)
		DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);

5651 5652
	return 0;

5653 5654
err_dependencies:
	kmem_cache_destroy(dev_priv->dependencies);
5655 5656
err_requests:
	kmem_cache_destroy(dev_priv->requests);
5657 5658
err_luts:
	kmem_cache_destroy(dev_priv->luts);
5659 5660 5661 5662 5663 5664
err_vmas:
	kmem_cache_destroy(dev_priv->vmas);
err_objects:
	kmem_cache_destroy(dev_priv->objects);
err_out:
	return err;
5665
}
5666

5667
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
5668
{
5669
	i915_gem_drain_freed_objects(dev_priv);
5670 5671
	GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
	GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
5672
	WARN_ON(dev_priv->mm.object_count);
5673 5674
	WARN_ON(!list_empty(&dev_priv->gt.timelines));

5675
	kmem_cache_destroy(dev_priv->priorities);
5676
	kmem_cache_destroy(dev_priv->dependencies);
5677
	kmem_cache_destroy(dev_priv->requests);
5678
	kmem_cache_destroy(dev_priv->luts);
5679 5680
	kmem_cache_destroy(dev_priv->vmas);
	kmem_cache_destroy(dev_priv->objects);
5681 5682 5683

	/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
	rcu_barrier();
M
Matthew Auld 已提交
5684 5685

	i915_gemfs_fini(dev_priv);
5686 5687
}

5688 5689
int i915_gem_freeze(struct drm_i915_private *dev_priv)
{
5690 5691 5692
	/* Discard all purgeable objects, let userspace recover those as
	 * required after resuming.
	 */
5693 5694 5695 5696 5697
	i915_gem_shrink_all(dev_priv);

	return 0;
}

5698 5699 5700
int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
{
	struct drm_i915_gem_object *obj;
5701 5702 5703 5704 5705
	struct list_head *phases[] = {
		&dev_priv->mm.unbound_list,
		&dev_priv->mm.bound_list,
		NULL
	}, **p;
5706 5707 5708 5709 5710 5711 5712 5713 5714 5715

	/* Called just before we write the hibernation image.
	 *
	 * We need to update the domain tracking to reflect that the CPU
	 * will be accessing all the pages to create and restore from the
	 * hibernation, and so upon restoration those pages will be in the
	 * CPU domain.
	 *
	 * To make sure the hibernation image contains the latest state,
	 * we update that state just before writing out the image.
5716 5717
	 *
	 * To try and reduce the hibernation image, we manually shrink
5718
	 * the objects as well, see i915_gem_freeze()
5719 5720
	 */

5721
	i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND);
5722
	i915_gem_drain_freed_objects(dev_priv);
5723

5724
	spin_lock(&dev_priv->mm.obj_lock);
5725
	for (p = phases; *p; p++) {
5726
		list_for_each_entry(obj, *p, mm.link)
5727
			__start_cpu_write(obj);
5728
	}
5729
	spin_unlock(&dev_priv->mm.obj_lock);
5730 5731 5732 5733

	return 0;
}

5734
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5735
{
5736
	struct drm_i915_file_private *file_priv = file->driver_priv;
5737
	struct i915_request *request;
5738 5739 5740 5741 5742

	/* Clean up our request list when the client is going away, so that
	 * later retire_requests won't dereference our soon-to-be-gone
	 * file_priv.
	 */
5743
	spin_lock(&file_priv->mm.lock);
5744
	list_for_each_entry(request, &file_priv->mm.request_list, client_link)
5745
		request->file_priv = NULL;
5746
	spin_unlock(&file_priv->mm.lock);
5747 5748
}

5749
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
5750 5751
{
	struct drm_i915_file_private *file_priv;
5752
	int ret;
5753

5754
	DRM_DEBUG("\n");
5755 5756 5757 5758 5759 5760

	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
	if (!file_priv)
		return -ENOMEM;

	file->driver_priv = file_priv;
5761
	file_priv->dev_priv = i915;
5762
	file_priv->file = file;
5763 5764 5765 5766

	spin_lock_init(&file_priv->mm.lock);
	INIT_LIST_HEAD(&file_priv->mm.request_list);

5767
	file_priv->bsd_engine = -1;
5768

5769
	ret = i915_gem_context_open(i915, file);
5770 5771
	if (ret)
		kfree(file_priv);
5772

5773
	return ret;
5774 5775
}

5776 5777
/**
 * i915_gem_track_fb - update frontbuffer tracking
5778 5779 5780
 * @old: current GEM buffer for the frontbuffer slots
 * @new: new GEM buffer for the frontbuffer slots
 * @frontbuffer_bits: bitmask of frontbuffer slots
5781 5782 5783 5784
 *
 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
 * from @old and setting them in @new. Both @old and @new can be NULL.
 */
5785 5786 5787 5788
void i915_gem_track_fb(struct drm_i915_gem_object *old,
		       struct drm_i915_gem_object *new,
		       unsigned frontbuffer_bits)
{
5789 5790 5791 5792 5793 5794 5795 5796 5797
	/* Control of individual bits within the mask are guarded by
	 * the owning plane->mutex, i.e. we can never see concurrent
	 * manipulation of individual bits. But since the bitfield as a whole
	 * is updated using RMW, we need to use atomics in order to update
	 * the bits.
	 */
	BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
		     sizeof(atomic_t) * BITS_PER_BYTE);

5798
	if (old) {
5799 5800
		WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
		atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
5801 5802 5803
	}

	if (new) {
5804 5805
		WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
		atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
5806 5807 5808
	}
}

5809 5810
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
5811
i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
5812 5813 5814
			         const void *data, size_t size)
{
	struct drm_i915_gem_object *obj;
5815 5816 5817
	struct file *file;
	size_t offset;
	int err;
5818

5819
	obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
5820
	if (IS_ERR(obj))
5821 5822
		return obj;

5823
	GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
5824

5825 5826 5827 5828 5829 5830
	file = obj->base.filp;
	offset = 0;
	do {
		unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
		struct page *page;
		void *pgdata, *vaddr;
5831

5832 5833 5834 5835 5836
		err = pagecache_write_begin(file, file->f_mapping,
					    offset, len, 0,
					    &page, &pgdata);
		if (err < 0)
			goto fail;
5837

5838 5839 5840 5841 5842 5843 5844 5845 5846 5847 5848 5849 5850 5851
		vaddr = kmap(page);
		memcpy(vaddr, data, len);
		kunmap(page);

		err = pagecache_write_end(file, file->f_mapping,
					  offset, len, len,
					  page, pgdata);
		if (err < 0)
			goto fail;

		size -= len;
		data += len;
		offset += len;
	} while (size);
5852 5853 5854 5855

	return obj;

fail:
5856
	i915_gem_object_put(obj);
5857
	return ERR_PTR(err);
5858
}
5859 5860 5861 5862 5863 5864

struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
		       unsigned int n,
		       unsigned int *offset)
{
C
Chris Wilson 已提交
5865
	struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
5866 5867 5868 5869 5870
	struct scatterlist *sg;
	unsigned int idx, count;

	might_sleep();
	GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
C
Chris Wilson 已提交
5871
	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
5872 5873 5874 5875 5876 5877 5878 5879 5880 5881 5882 5883 5884 5885 5886 5887 5888 5889 5890 5891 5892 5893 5894 5895 5896 5897 5898 5899 5900 5901 5902 5903 5904 5905 5906 5907 5908 5909 5910 5911 5912 5913 5914 5915 5916 5917 5918 5919 5920 5921 5922 5923 5924 5925 5926 5927 5928 5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939 5940 5941 5942 5943 5944 5945 5946 5947 5948 5949 5950 5951 5952 5953 5954 5955 5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970 5971 5972 5973 5974 5975 5976 5977 5978 5979 5980 5981 5982 5983 5984 5985 5986 5987 5988 5989 5990 5991 5992 5993 5994 5995

	/* As we iterate forward through the sg, we record each entry in a
	 * radixtree for quick repeated (backwards) lookups. If we have seen
	 * this index previously, we will have an entry for it.
	 *
	 * Initial lookup is O(N), but this is amortized to O(1) for
	 * sequential page access (where each new request is consecutive
	 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
	 * i.e. O(1) with a large constant!
	 */
	if (n < READ_ONCE(iter->sg_idx))
		goto lookup;

	mutex_lock(&iter->lock);

	/* We prefer to reuse the last sg so that repeated lookup of this
	 * (or the subsequent) sg are fast - comparing against the last
	 * sg is faster than going through the radixtree.
	 */

	sg = iter->sg_pos;
	idx = iter->sg_idx;
	count = __sg_page_count(sg);

	while (idx + count <= n) {
		unsigned long exception, i;
		int ret;

		/* If we cannot allocate and insert this entry, or the
		 * individual pages from this range, cancel updating the
		 * sg_idx so that on this lookup we are forced to linearly
		 * scan onwards, but on future lookups we will try the
		 * insertion again (in which case we need to be careful of
		 * the error return reporting that we have already inserted
		 * this index).
		 */
		ret = radix_tree_insert(&iter->radix, idx, sg);
		if (ret && ret != -EEXIST)
			goto scan;

		exception =
			RADIX_TREE_EXCEPTIONAL_ENTRY |
			idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
		for (i = 1; i < count; i++) {
			ret = radix_tree_insert(&iter->radix, idx + i,
						(void *)exception);
			if (ret && ret != -EEXIST)
				goto scan;
		}

		idx += count;
		sg = ____sg_next(sg);
		count = __sg_page_count(sg);
	}

scan:
	iter->sg_pos = sg;
	iter->sg_idx = idx;

	mutex_unlock(&iter->lock);

	if (unlikely(n < idx)) /* insertion completed by another thread */
		goto lookup;

	/* In case we failed to insert the entry into the radixtree, we need
	 * to look beyond the current sg.
	 */
	while (idx + count <= n) {
		idx += count;
		sg = ____sg_next(sg);
		count = __sg_page_count(sg);
	}

	*offset = n - idx;
	return sg;

lookup:
	rcu_read_lock();

	sg = radix_tree_lookup(&iter->radix, n);
	GEM_BUG_ON(!sg);

	/* If this index is in the middle of multi-page sg entry,
	 * the radixtree will contain an exceptional entry that points
	 * to the start of that range. We will return the pointer to
	 * the base page and the offset of this page within the
	 * sg entry's range.
	 */
	*offset = 0;
	if (unlikely(radix_tree_exception(sg))) {
		unsigned long base =
			(unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;

		sg = radix_tree_lookup(&iter->radix, base);
		GEM_BUG_ON(!sg);

		*offset = n - base;
	}

	rcu_read_unlock();

	return sg;
}

struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
{
	struct scatterlist *sg;
	unsigned int offset;

	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));

	sg = i915_gem_object_get_sg(obj, n, &offset);
	return nth_page(sg_page(sg), offset);
}

/* Like i915_gem_object_get_page(), but mark the returned page dirty */
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
			       unsigned int n)
{
	struct page *page;

	page = i915_gem_object_get_page(obj, n);
C
Chris Wilson 已提交
5996
	if (!obj->mm.dirty)
5997 5998 5999 6000 6001 6002 6003 6004 6005 6006 6007 6008 6009 6010 6011
		set_page_dirty(page);

	return page;
}

dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
				unsigned long n)
{
	struct scatterlist *sg;
	unsigned int offset;

	sg = i915_gem_object_get_sg(obj, n, &offset);
	return sg_dma_address(sg) + (offset << PAGE_SHIFT);
}
6012

6013 6014 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
{
	struct sg_table *pages;
	int err;

	if (align > obj->base.size)
		return -EINVAL;

	if (obj->ops == &i915_gem_phys_ops)
		return 0;

	if (obj->ops != &i915_gem_object_ops)
		return -EINVAL;

	err = i915_gem_object_unbind(obj);
	if (err)
		return err;

	mutex_lock(&obj->mm.lock);

	if (obj->mm.madv != I915_MADV_WILLNEED) {
		err = -EFAULT;
		goto err_unlock;
	}

	if (obj->mm.quirked) {
		err = -EFAULT;
		goto err_unlock;
	}

	if (obj->mm.mapping) {
		err = -EBUSY;
		goto err_unlock;
	}

6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058
	pages = fetch_and_zero(&obj->mm.pages);
	if (pages) {
		struct drm_i915_private *i915 = to_i915(obj->base.dev);

		__i915_gem_object_reset_page_iter(obj);

		spin_lock(&i915->mm.obj_lock);
		list_del(&obj->mm.link);
		spin_unlock(&i915->mm.obj_lock);
	}

6059 6060
	obj->ops = &i915_gem_phys_ops;

6061
	err = ____i915_gem_object_get_pages(obj);
6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080
	if (err)
		goto err_xfer;

	/* Perma-pin (until release) the physical set of pages */
	__i915_gem_object_pin_pages(obj);

	if (!IS_ERR_OR_NULL(pages))
		i915_gem_object_ops.put_pages(obj, pages);
	mutex_unlock(&obj->mm.lock);
	return 0;

err_xfer:
	obj->ops = &i915_gem_object_ops;
	obj->mm.pages = pages;
err_unlock:
	mutex_unlock(&obj->mm.lock);
	return err;
}

6081 6082
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/scatterlist.c"
6083
#include "selftests/mock_gem_device.c"
6084
#include "selftests/huge_gem_object.c"
M
Matthew Auld 已提交
6085
#include "selftests/huge_pages.c"
6086
#include "selftests/i915_gem_object.c"
6087
#include "selftests/i915_gem_coherency.c"
6088
#endif