i915_gem.c 155.3 KB
Newer Older
1
/*
2
 * Copyright © 2008-2015 Intel Corporation
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *
 */

28
#include <drm/drmP.h>
29
#include <drm/drm_vma_manager.h>
30
#include <drm/i915_drm.h>
31
#include "i915_drv.h"
32
#include "i915_gem_clflush.h"
33
#include "i915_vgpu.h"
C
Chris Wilson 已提交
34
#include "i915_trace.h"
35
#include "intel_drv.h"
36
#include "intel_frontbuffer.h"
37
#include "intel_mocs.h"
M
Matthew Auld 已提交
38
#include "i915_gemfs.h"
39
#include <linux/dma-fence-array.h>
40
#include <linux/kthread.h>
41
#include <linux/reservation.h>
42
#include <linux/shmem_fs.h>
43
#include <linux/slab.h>
44
#include <linux/stop_machine.h>
45
#include <linux/swap.h>
J
Jesse Barnes 已提交
46
#include <linux/pci.h>
47
#include <linux/dma-buf.h>
48

49
static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
50

51 52
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
53
	if (obj->cache_dirty)
54 55
		return false;

56
	if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
57 58
		return true;

59
	return obj->pin_global; /* currently in use by HW, keep flushed */
60 61
}

62
static int
63
insert_mappable_node(struct i915_ggtt *ggtt,
64 65 66
                     struct drm_mm_node *node, u32 size)
{
	memset(node, 0, sizeof(*node));
67 68 69 70
	return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
					   size, 0, I915_COLOR_UNEVICTABLE,
					   0, ggtt->mappable_end,
					   DRM_MM_INSERT_LOW);
71 72 73 74 75 76 77 78
}

static void
remove_mappable_node(struct drm_mm_node *node)
{
	drm_mm_remove_node(node);
}

79 80
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
81
				  u64 size)
82
{
83
	spin_lock(&dev_priv->mm.object_stat_lock);
84 85
	dev_priv->mm.object_count++;
	dev_priv->mm.object_memory += size;
86
	spin_unlock(&dev_priv->mm.object_stat_lock);
87 88 89
}

static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
90
				     u64 size)
91
{
92
	spin_lock(&dev_priv->mm.object_stat_lock);
93 94
	dev_priv->mm.object_count--;
	dev_priv->mm.object_memory -= size;
95
	spin_unlock(&dev_priv->mm.object_stat_lock);
96 97
}

98
static int
99
i915_gem_wait_for_error(struct i915_gpu_error *error)
100 101 102
{
	int ret;

103 104
	might_sleep();

105 106 107 108 109
	/*
	 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
	 * userspace. If it takes that long something really bad is going on and
	 * we should simply try to bail out and fail as gracefully as possible.
	 */
110
	ret = wait_event_interruptible_timeout(error->reset_queue,
111
					       !i915_reset_backoff(error),
112
					       I915_RESET_TIMEOUT);
113 114 115 116
	if (ret == 0) {
		DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
		return -EIO;
	} else if (ret < 0) {
117
		return ret;
118 119
	} else {
		return 0;
120
	}
121 122
}

123
int i915_mutex_lock_interruptible(struct drm_device *dev)
124
{
125
	struct drm_i915_private *dev_priv = to_i915(dev);
126 127
	int ret;

128
	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
129 130 131 132 133 134 135 136 137
	if (ret)
		return ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	return 0;
}
138

139 140
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
141
			    struct drm_file *file)
142
{
143
	struct drm_i915_private *dev_priv = to_i915(dev);
144
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
145
	struct drm_i915_gem_get_aperture *args = data;
146
	struct i915_vma *vma;
147
	u64 pinned;
148

149
	pinned = ggtt->base.reserved;
150
	mutex_lock(&dev->struct_mutex);
151
	list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
152
		if (i915_vma_is_pinned(vma))
153
			pinned += vma->node.size;
154
	list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
155
		if (i915_vma_is_pinned(vma))
156
			pinned += vma->node.size;
157
	mutex_unlock(&dev->struct_mutex);
158

159
	args->aper_size = ggtt->base.total;
160
	args->aper_available_size = args->aper_size - pinned;
161

162 163 164
	return 0;
}

165
static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
166
{
167
	struct address_space *mapping = obj->base.filp->f_mapping;
168
	drm_dma_handle_t *phys;
169 170
	struct sg_table *st;
	struct scatterlist *sg;
171
	char *vaddr;
172
	int i;
173
	int err;
174

175
	if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
176
		return -EINVAL;
177

178 179 180 181 182
	/* Always aligning to the object size, allows a single allocation
	 * to handle all possible callers, and given typical object sizes,
	 * the alignment of the buddy allocation will naturally match.
	 */
	phys = drm_pci_alloc(obj->base.dev,
183
			     roundup_pow_of_two(obj->base.size),
184 185
			     roundup_pow_of_two(obj->base.size));
	if (!phys)
186
		return -ENOMEM;
187 188

	vaddr = phys->vaddr;
189 190 191 192 193
	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
		struct page *page;
		char *src;

		page = shmem_read_mapping_page(mapping, i);
194
		if (IS_ERR(page)) {
195
			err = PTR_ERR(page);
196 197
			goto err_phys;
		}
198 199 200 201 202 203

		src = kmap_atomic(page);
		memcpy(vaddr, src, PAGE_SIZE);
		drm_clflush_virt_range(vaddr, PAGE_SIZE);
		kunmap_atomic(src);

204
		put_page(page);
205 206 207
		vaddr += PAGE_SIZE;
	}

208
	i915_gem_chipset_flush(to_i915(obj->base.dev));
209 210

	st = kmalloc(sizeof(*st), GFP_KERNEL);
211
	if (!st) {
212
		err = -ENOMEM;
213 214
		goto err_phys;
	}
215 216 217

	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
		kfree(st);
218
		err = -ENOMEM;
219
		goto err_phys;
220 221 222 223 224
	}

	sg = st->sgl;
	sg->offset = 0;
	sg->length = obj->base.size;
225

226
	sg_dma_address(sg) = phys->busaddr;
227 228
	sg_dma_len(sg) = obj->base.size;

229
	obj->phys_handle = phys;
230

231
	__i915_gem_object_set_pages(obj, st, sg->length);
232 233

	return 0;
234 235 236

err_phys:
	drm_pci_free(obj->base.dev, phys);
237 238

	return err;
239 240
}

241 242 243 244 245 246 247 248
static void __start_cpu_write(struct drm_i915_gem_object *obj)
{
	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	if (cpu_write_needs_clflush(obj))
		obj->cache_dirty = true;
}

249
static void
250
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
251 252
				struct sg_table *pages,
				bool needs_clflush)
253
{
C
Chris Wilson 已提交
254
	GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
255

C
Chris Wilson 已提交
256 257
	if (obj->mm.madv == I915_MADV_DONTNEED)
		obj->mm.dirty = false;
258

259 260
	if (needs_clflush &&
	    (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
261
	    !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
262
		drm_clflush_sg(pages);
263

264
	__start_cpu_write(obj);
265 266 267 268 269 270
}

static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
			       struct sg_table *pages)
{
271
	__i915_gem_object_release_shmem(obj, pages, false);
272

C
Chris Wilson 已提交
273
	if (obj->mm.dirty) {
274
		struct address_space *mapping = obj->base.filp->f_mapping;
275
		char *vaddr = obj->phys_handle->vaddr;
276 277 278
		int i;

		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
279 280 281 282 283 284 285 286 287 288 289 290 291
			struct page *page;
			char *dst;

			page = shmem_read_mapping_page(mapping, i);
			if (IS_ERR(page))
				continue;

			dst = kmap_atomic(page);
			drm_clflush_virt_range(vaddr, PAGE_SIZE);
			memcpy(dst, vaddr, PAGE_SIZE);
			kunmap_atomic(dst);

			set_page_dirty(page);
C
Chris Wilson 已提交
292
			if (obj->mm.madv == I915_MADV_WILLNEED)
293
				mark_page_accessed(page);
294
			put_page(page);
295 296
			vaddr += PAGE_SIZE;
		}
C
Chris Wilson 已提交
297
		obj->mm.dirty = false;
298 299
	}

300 301
	sg_free_table(pages);
	kfree(pages);
302 303

	drm_pci_free(obj->base.dev, obj->phys_handle);
304 305 306 307 308
}

static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{
C
Chris Wilson 已提交
309
	i915_gem_object_unpin_pages(obj);
310 311 312 313 314 315 316 317
}

static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
	.get_pages = i915_gem_object_get_pages_phys,
	.put_pages = i915_gem_object_put_pages_phys,
	.release = i915_gem_object_release_phys,
};

318 319
static const struct drm_i915_gem_object_ops i915_gem_object_ops;

320
int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
321 322 323
{
	struct i915_vma *vma;
	LIST_HEAD(still_in_list);
324 325 326
	int ret;

	lockdep_assert_held(&obj->base.dev->struct_mutex);
327

328 329 330 331
	/* Closed vma are removed from the obj->vma_list - but they may
	 * still have an active binding on the object. To remove those we
	 * must wait for all rendering to complete to the object (as unbinding
	 * must anyway), and retire the requests.
332
	 */
333
	ret = i915_gem_object_set_to_cpu_domain(obj, false);
334 335 336
	if (ret)
		return ret;

337 338 339 340 341 342 343 344 345 346 347 348 349
	while ((vma = list_first_entry_or_null(&obj->vma_list,
					       struct i915_vma,
					       obj_link))) {
		list_move_tail(&vma->obj_link, &still_in_list);
		ret = i915_vma_unbind(vma);
		if (ret)
			break;
	}
	list_splice(&still_in_list, &obj->vma_list);

	return ret;
}

350 351 352 353
static long
i915_gem_object_wait_fence(struct dma_fence *fence,
			   unsigned int flags,
			   long timeout,
354
			   struct intel_rps_client *rps_client)
355
{
356
	struct drm_i915_gem_request *rq;
357

358
	BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
359

360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
		return timeout;

	if (!dma_fence_is_i915(fence))
		return dma_fence_wait_timeout(fence,
					      flags & I915_WAIT_INTERRUPTIBLE,
					      timeout);

	rq = to_request(fence);
	if (i915_gem_request_completed(rq))
		goto out;

	/* This client is about to stall waiting for the GPU. In many cases
	 * this is undesirable and limits the throughput of the system, as
	 * many clients cannot continue processing user input/output whilst
	 * blocked. RPS autotuning may take tens of milliseconds to respond
	 * to the GPU load and thus incurs additional latency for the client.
	 * We can circumvent that by promoting the GPU frequency to maximum
	 * before we wait. This makes the GPU throttle up much more quickly
	 * (good for benchmarks and user experience, e.g. window animations),
	 * but at a cost of spending more power processing the workload
	 * (bad for battery). Not all clients even want their results
	 * immediately and for them we should just let the GPU select its own
	 * frequency to maximise efficiency. To prevent a single client from
	 * forcing the clocks too high for the whole system, we only allow
	 * each client to waitboost once in a busy period.
	 */
387
	if (rps_client) {
388
		if (INTEL_GEN(rq->i915) >= 6)
389
			gen6_rps_boost(rq, rps_client);
390
		else
391
			rps_client = NULL;
392 393
	}

394 395 396 397 398 399 400 401 402 403 404 405 406
	timeout = i915_wait_request(rq, flags, timeout);

out:
	if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
		i915_gem_request_retire_upto(rq);

	return timeout;
}

static long
i915_gem_object_wait_reservation(struct reservation_object *resv,
				 unsigned int flags,
				 long timeout,
407
				 struct intel_rps_client *rps_client)
408
{
409
	unsigned int seq = __read_seqcount_begin(&resv->seq);
410
	struct dma_fence *excl;
411
	bool prune_fences = false;
412 413 414 415

	if (flags & I915_WAIT_ALL) {
		struct dma_fence **shared;
		unsigned int count, i;
416 417
		int ret;

418 419
		ret = reservation_object_get_fences_rcu(resv,
							&excl, &count, &shared);
420 421 422
		if (ret)
			return ret;

423 424 425
		for (i = 0; i < count; i++) {
			timeout = i915_gem_object_wait_fence(shared[i],
							     flags, timeout,
426
							     rps_client);
427
			if (timeout < 0)
428
				break;
429

430 431 432 433 434 435
			dma_fence_put(shared[i]);
		}

		for (; i < count; i++)
			dma_fence_put(shared[i]);
		kfree(shared);
436 437

		prune_fences = count && timeout >= 0;
438 439
	} else {
		excl = reservation_object_get_excl_rcu(resv);
440 441
	}

442
	if (excl && timeout >= 0) {
443 444
		timeout = i915_gem_object_wait_fence(excl, flags, timeout,
						     rps_client);
445 446
		prune_fences = timeout >= 0;
	}
447 448 449

	dma_fence_put(excl);

450 451 452 453
	/* Oportunistically prune the fences iff we know they have *all* been
	 * signaled and that the reservation object has not been changed (i.e.
	 * no new fences have been added).
	 */
454
	if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
455 456 457 458 459
		if (reservation_object_trylock(resv)) {
			if (!__read_seqcount_retry(&resv->seq, seq))
				reservation_object_add_excl_fence(resv, NULL);
			reservation_object_unlock(resv);
		}
460 461
	}

462
	return timeout;
463 464
}

465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528
static void __fence_set_priority(struct dma_fence *fence, int prio)
{
	struct drm_i915_gem_request *rq;
	struct intel_engine_cs *engine;

	if (!dma_fence_is_i915(fence))
		return;

	rq = to_request(fence);
	engine = rq->engine;
	if (!engine->schedule)
		return;

	engine->schedule(rq, prio);
}

static void fence_set_priority(struct dma_fence *fence, int prio)
{
	/* Recurse once into a fence-array */
	if (dma_fence_is_array(fence)) {
		struct dma_fence_array *array = to_dma_fence_array(fence);
		int i;

		for (i = 0; i < array->num_fences; i++)
			__fence_set_priority(array->fences[i], prio);
	} else {
		__fence_set_priority(fence, prio);
	}
}

int
i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
			      unsigned int flags,
			      int prio)
{
	struct dma_fence *excl;

	if (flags & I915_WAIT_ALL) {
		struct dma_fence **shared;
		unsigned int count, i;
		int ret;

		ret = reservation_object_get_fences_rcu(obj->resv,
							&excl, &count, &shared);
		if (ret)
			return ret;

		for (i = 0; i < count; i++) {
			fence_set_priority(shared[i], prio);
			dma_fence_put(shared[i]);
		}

		kfree(shared);
	} else {
		excl = reservation_object_get_excl_rcu(obj->resv);
	}

	if (excl) {
		fence_set_priority(excl, prio);
		dma_fence_put(excl);
	}
	return 0;
}

529 530 531 532 533
/**
 * Waits for rendering to the object to be completed
 * @obj: i915 gem object
 * @flags: how to wait (under a lock, for all rendering or just for writes etc)
 * @timeout: how long to wait
534
 * @rps_client: client (user process) to charge for any waitboosting
535
 */
536 537 538 539
int
i915_gem_object_wait(struct drm_i915_gem_object *obj,
		     unsigned int flags,
		     long timeout,
540
		     struct intel_rps_client *rps_client)
541
{
542 543 544 545 546 547 548
	might_sleep();
#if IS_ENABLED(CONFIG_LOCKDEP)
	GEM_BUG_ON(debug_locks &&
		   !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
		   !!(flags & I915_WAIT_LOCKED));
#endif
	GEM_BUG_ON(timeout < 0);
549

550 551
	timeout = i915_gem_object_wait_reservation(obj->resv,
						   flags, timeout,
552
						   rps_client);
553
	return timeout < 0 ? timeout : 0;
554 555 556 557 558 559
}

static struct intel_rps_client *to_rps_client(struct drm_file *file)
{
	struct drm_i915_file_private *fpriv = file->driver_priv;

560
	return &fpriv->rps_client;
561 562
}

563 564 565
static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pwrite *args,
566
		     struct drm_file *file)
567 568
{
	void *vaddr = obj->phys_handle->vaddr + args->offset;
569
	char __user *user_data = u64_to_user_ptr(args->data_ptr);
570 571 572 573

	/* We manually control the domain here and pretend that it
	 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
	 */
574
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
575 576
	if (copy_from_user(vaddr, user_data, args->size))
		return -EFAULT;
577

578
	drm_clflush_virt_range(vaddr, args->size);
579
	i915_gem_chipset_flush(to_i915(obj->base.dev));
580

581
	intel_fb_obj_flush(obj, ORIGIN_CPU);
582
	return 0;
583 584
}

585
void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
586
{
587
	return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
588 589 590 591
}

void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
592
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
593
	kmem_cache_free(dev_priv->objects, obj);
594 595
}

596 597
static int
i915_gem_create(struct drm_file *file,
598
		struct drm_i915_private *dev_priv,
599 600
		uint64_t size,
		uint32_t *handle_p)
601
{
602
	struct drm_i915_gem_object *obj;
603 604
	int ret;
	u32 handle;
605

606
	size = roundup(size, PAGE_SIZE);
607 608
	if (size == 0)
		return -EINVAL;
609 610

	/* Allocate the new object */
611
	obj = i915_gem_object_create(dev_priv, size);
612 613
	if (IS_ERR(obj))
		return PTR_ERR(obj);
614

615
	ret = drm_gem_handle_create(file, &obj->base, &handle);
616
	/* drop reference from allocate - handle holds it now */
C
Chris Wilson 已提交
617
	i915_gem_object_put(obj);
618 619
	if (ret)
		return ret;
620

621
	*handle_p = handle;
622 623 624
	return 0;
}

625 626 627 628 629 630
int
i915_gem_dumb_create(struct drm_file *file,
		     struct drm_device *dev,
		     struct drm_mode_create_dumb *args)
{
	/* have to work out size/pitch and return them */
631
	args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
632
	args->size = args->pitch * args->height;
633
	return i915_gem_create(file, to_i915(dev),
634
			       args->size, &args->handle);
635 636
}

637 638 639 640 641 642
static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
	return !(obj->cache_level == I915_CACHE_NONE ||
		 obj->cache_level == I915_CACHE_WT);
}

643 644
/**
 * Creates a new mm object and returns a handle to it.
645 646 647
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
648 649 650 651 652
 */
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file)
{
653
	struct drm_i915_private *dev_priv = to_i915(dev);
654
	struct drm_i915_gem_create *args = data;
655

656
	i915_gem_flush_free_objects(dev_priv);
657

658
	return i915_gem_create(file, dev_priv,
659
			       args->size, &args->handle);
660 661
}

662 663 664 665 666 667 668
static inline enum fb_op_origin
fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
{
	return (domain == I915_GEM_DOMAIN_GTT ?
		obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
}

669
void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
670
{
671 672 673 674 675
	/*
	 * No actual flushing is required for the GTT write domain for reads
	 * from the GTT domain. Writes to it "immediately" go to main memory
	 * as far as we know, so there's no chipset flush. It also doesn't
	 * land in the GPU render cache.
676 677 678 679 680 681 682 683 684 685
	 *
	 * However, we do have to enforce the order so that all writes through
	 * the GTT land before any writes to the device, such as updates to
	 * the GATT itself.
	 *
	 * We also have to wait a bit for the writes to land from the GTT.
	 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
	 * timing. This issue has only been observed when switching quickly
	 * between GTT writes and CPU reads from inside the kernel on recent hw,
	 * and it appears to only affect discrete GTT blocks (i.e. on LLC
686 687
	 * system agents we cannot reproduce this behaviour, until Cannonlake
	 * that was!).
688
	 */
689

690 691
	wmb();

692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709
	intel_runtime_pm_get(dev_priv);
	spin_lock_irq(&dev_priv->uncore.lock);

	POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));

	spin_unlock_irq(&dev_priv->uncore.lock);
	intel_runtime_pm_put(dev_priv);
}

static void
flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
{
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
	struct i915_vma *vma;

	if (!(obj->base.write_domain & flush_domains))
		return;

710 711
	switch (obj->base.write_domain) {
	case I915_GEM_DOMAIN_GTT:
712
		i915_gem_flush_ggtt_writes(dev_priv);
713 714 715

		intel_fb_obj_flush(obj,
				   fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
716

717
		for_each_ggtt_vma(vma, obj) {
718 719 720 721 722
			if (vma->iomap)
				continue;

			i915_vma_unset_ggtt_write(vma);
		}
723 724 725 726 727
		break;

	case I915_GEM_DOMAIN_CPU:
		i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
		break;
728 729 730 731 732

	case I915_GEM_DOMAIN_RENDER:
		if (gpu_write_needs_clflush(obj))
			obj->cache_dirty = true;
		break;
733 734 735 736 737
	}

	obj->base.write_domain = 0;
}

738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
			const char *gpu_vaddr, int gpu_offset,
			int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_to_user(cpu_vaddr + cpu_offset,
				     gpu_vaddr + swizzled_gpu_offset,
				     this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

764
static inline int
765 766
__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
			  const char __user *cpu_vaddr,
767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789
			  int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
				       cpu_vaddr + cpu_offset,
				       this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

790 791 792 793 794 795
/*
 * Pins the specified object's pages and synchronizes the object with
 * GPU accesses. Sets needs_clflush to non-zero if the caller should
 * flush the object from the CPU cache.
 */
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
796
				    unsigned int *needs_clflush)
797 798 799
{
	int ret;

800
	lockdep_assert_held(&obj->base.dev->struct_mutex);
801

802
	*needs_clflush = 0;
803 804
	if (!i915_gem_object_has_struct_page(obj))
		return -ENODEV;
805

806 807 808 809 810
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
811 812 813
	if (ret)
		return ret;

C
Chris Wilson 已提交
814
	ret = i915_gem_object_pin_pages(obj);
815 816 817
	if (ret)
		return ret;

818 819
	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
	    !static_cpu_has(X86_FEATURE_CLFLUSH)) {
820 821 822 823 824 825 826
		ret = i915_gem_object_set_to_cpu_domain(obj, false);
		if (ret)
			goto err_unpin;
		else
			goto out;
	}

827
	flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
828

829 830 831 832 833
	/* If we're not in the cpu read domain, set ourself into the gtt
	 * read domain and manually flush cachelines (if required). This
	 * optimizes for the case when the gpu will dirty the data
	 * anyway again before the next pread happens.
	 */
834 835
	if (!obj->cache_dirty &&
	    !(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
836
		*needs_clflush = CLFLUSH_BEFORE;
837

838
out:
839
	/* return with the pages pinned */
840
	return 0;
841 842 843 844

err_unpin:
	i915_gem_object_unpin_pages(obj);
	return ret;
845 846 847 848 849 850 851
}

int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
				     unsigned int *needs_clflush)
{
	int ret;

852 853
	lockdep_assert_held(&obj->base.dev->struct_mutex);

854 855 856 857
	*needs_clflush = 0;
	if (!i915_gem_object_has_struct_page(obj))
		return -ENODEV;

858 859 860 861 862 863
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   I915_WAIT_ALL,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
864 865 866
	if (ret)
		return ret;

C
Chris Wilson 已提交
867
	ret = i915_gem_object_pin_pages(obj);
868 869 870
	if (ret)
		return ret;

871 872
	if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
	    !static_cpu_has(X86_FEATURE_CLFLUSH)) {
873 874 875 876 877 878 879
		ret = i915_gem_object_set_to_cpu_domain(obj, true);
		if (ret)
			goto err_unpin;
		else
			goto out;
	}

880
	flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
881

882 883 884 885 886
	/* If we're not in the cpu write domain, set ourself into the
	 * gtt write domain and manually flush cachelines (as required).
	 * This optimizes for the case when the gpu will use the data
	 * right away and we therefore have to clflush anyway.
	 */
887
	if (!obj->cache_dirty) {
888
		*needs_clflush |= CLFLUSH_AFTER;
889

890 891 892 893 894 895 896
		/*
		 * Same trick applies to invalidate partially written
		 * cachelines read before writing.
		 */
		if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
			*needs_clflush |= CLFLUSH_BEFORE;
	}
897

898
out:
899
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
C
Chris Wilson 已提交
900
	obj->mm.dirty = true;
901
	/* return with the pages pinned */
902
	return 0;
903 904 905 906

err_unpin:
	i915_gem_object_unpin_pages(obj);
	return ret;
907 908
}

909 910 911 912
static void
shmem_clflush_swizzled_range(char *addr, unsigned long length,
			     bool swizzled)
{
913
	if (unlikely(swizzled)) {
914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930
		unsigned long start = (unsigned long) addr;
		unsigned long end = (unsigned long) addr + length;

		/* For swizzling simply ensure that we always flush both
		 * channels. Lame, but simple and it works. Swizzled
		 * pwrite/pread is far from a hotpath - current userspace
		 * doesn't use it at all. */
		start = round_down(start, 128);
		end = round_up(end, 128);

		drm_clflush_virt_range((void *)start, end - start);
	} else {
		drm_clflush_virt_range(addr, length);
	}

}

931 932 933
/* Only difference to the fast-path function is that this can handle bit17
 * and uses non-atomic copy and kmap functions. */
static int
934
shmem_pread_slow(struct page *page, int offset, int length,
935 936 937 938 939 940 941 942
		 char __user *user_data,
		 bool page_do_bit17_swizzling, bool needs_clflush)
{
	char *vaddr;
	int ret;

	vaddr = kmap(page);
	if (needs_clflush)
943
		shmem_clflush_swizzled_range(vaddr + offset, length,
944
					     page_do_bit17_swizzling);
945 946

	if (page_do_bit17_swizzling)
947
		ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
948
	else
949
		ret = __copy_to_user(user_data, vaddr + offset, length);
950 951
	kunmap(page);

952
	return ret ? - EFAULT : 0;
953 954
}

955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030
static int
shmem_pread(struct page *page, int offset, int length, char __user *user_data,
	    bool page_do_bit17_swizzling, bool needs_clflush)
{
	int ret;

	ret = -ENODEV;
	if (!page_do_bit17_swizzling) {
		char *vaddr = kmap_atomic(page);

		if (needs_clflush)
			drm_clflush_virt_range(vaddr + offset, length);
		ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
		kunmap_atomic(vaddr);
	}
	if (ret == 0)
		return 0;

	return shmem_pread_slow(page, offset, length, user_data,
				page_do_bit17_swizzling, needs_clflush);
}

static int
i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pread *args)
{
	char __user *user_data;
	u64 remain;
	unsigned int obj_do_bit17_swizzling;
	unsigned int needs_clflush;
	unsigned int idx, offset;
	int ret;

	obj_do_bit17_swizzling = 0;
	if (i915_gem_object_needs_bit17_swizzle(obj))
		obj_do_bit17_swizzling = BIT(17);

	ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
	if (ret)
		return ret;

	ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
	mutex_unlock(&obj->base.dev->struct_mutex);
	if (ret)
		return ret;

	remain = args->size;
	user_data = u64_to_user_ptr(args->data_ptr);
	offset = offset_in_page(args->offset);
	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
		struct page *page = i915_gem_object_get_page(obj, idx);
		int length;

		length = remain;
		if (offset + length > PAGE_SIZE)
			length = PAGE_SIZE - offset;

		ret = shmem_pread(page, offset, length, user_data,
				  page_to_phys(page) & obj_do_bit17_swizzling,
				  needs_clflush);
		if (ret)
			break;

		remain -= length;
		user_data += length;
		offset = 0;
	}

	i915_gem_obj_finish_shmem_access(obj);
	return ret;
}

static inline bool
gtt_user_read(struct io_mapping *mapping,
	      loff_t base, int offset,
	      char __user *user_data, int length)
1031
{
1032
	void __iomem *vaddr;
1033
	unsigned long unwritten;
1034 1035

	/* We can use the cpu mem copy function because this is X86. */
1036 1037 1038 1039
	vaddr = io_mapping_map_atomic_wc(mapping, base);
	unwritten = __copy_to_user_inatomic(user_data,
					    (void __force *)vaddr + offset,
					    length);
1040 1041
	io_mapping_unmap_atomic(vaddr);
	if (unwritten) {
1042 1043 1044 1045
		vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
		unwritten = copy_to_user(user_data,
					 (void __force *)vaddr + offset,
					 length);
1046 1047
		io_mapping_unmap(vaddr);
	}
1048 1049 1050 1051
	return unwritten;
}

static int
1052 1053
i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
		   const struct drm_i915_gem_pread *args)
1054
{
1055 1056
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	struct i915_ggtt *ggtt = &i915->ggtt;
1057
	struct drm_mm_node node;
1058 1059 1060
	struct i915_vma *vma;
	void __user *user_data;
	u64 remain, offset;
1061 1062
	int ret;

1063 1064 1065 1066 1067 1068
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
	if (ret)
		return ret;

	intel_runtime_pm_get(i915);
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1069 1070 1071
				       PIN_MAPPABLE |
				       PIN_NONFAULT |
				       PIN_NONBLOCK);
1072 1073 1074
	if (!IS_ERR(vma)) {
		node.start = i915_ggtt_offset(vma);
		node.allocated = false;
1075
		ret = i915_vma_put_fence(vma);
1076 1077 1078 1079 1080
		if (ret) {
			i915_vma_unpin(vma);
			vma = ERR_PTR(ret);
		}
	}
C
Chris Wilson 已提交
1081
	if (IS_ERR(vma)) {
1082
		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1083
		if (ret)
1084 1085
			goto out_unlock;
		GEM_BUG_ON(!node.allocated);
1086 1087 1088 1089 1090 1091
	}

	ret = i915_gem_object_set_to_gtt_domain(obj, false);
	if (ret)
		goto out_unpin;

1092
	mutex_unlock(&i915->drm.struct_mutex);
1093

1094 1095 1096
	user_data = u64_to_user_ptr(args->data_ptr);
	remain = args->size;
	offset = args->offset;
1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112

	while (remain > 0) {
		/* Operation in this page
		 *
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
		 */
		u32 page_base = node.start;
		unsigned page_offset = offset_in_page(offset);
		unsigned page_length = PAGE_SIZE - page_offset;
		page_length = remain < page_length ? remain : page_length;
		if (node.allocated) {
			wmb();
			ggtt->base.insert_page(&ggtt->base,
					       i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1113
					       node.start, I915_CACHE_NONE, 0);
1114 1115 1116 1117
			wmb();
		} else {
			page_base += offset & PAGE_MASK;
		}
1118

1119
		if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
1120
				  user_data, page_length)) {
1121 1122 1123 1124 1125 1126 1127 1128 1129
			ret = -EFAULT;
			break;
		}

		remain -= page_length;
		user_data += page_length;
		offset += page_length;
	}

1130
	mutex_lock(&i915->drm.struct_mutex);
1131 1132 1133 1134
out_unpin:
	if (node.allocated) {
		wmb();
		ggtt->base.clear_range(&ggtt->base,
1135
				       node.start, node.size);
1136 1137
		remove_mappable_node(&node);
	} else {
C
Chris Wilson 已提交
1138
		i915_vma_unpin(vma);
1139
	}
1140 1141 1142
out_unlock:
	intel_runtime_pm_put(i915);
	mutex_unlock(&i915->drm.struct_mutex);
1143

1144 1145 1146
	return ret;
}

1147 1148
/**
 * Reads data from the object referenced by handle.
1149 1150 1151
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
1152 1153 1154 1155 1156
 *
 * On error, the contents of *data are undefined.
 */
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1157
		     struct drm_file *file)
1158 1159
{
	struct drm_i915_gem_pread *args = data;
1160
	struct drm_i915_gem_object *obj;
1161
	int ret;
1162

1163 1164 1165 1166
	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_WRITE,
1167
		       u64_to_user_ptr(args->data_ptr),
1168 1169 1170
		       args->size))
		return -EFAULT;

1171
	obj = i915_gem_object_lookup(file, args->handle);
1172 1173
	if (!obj)
		return -ENOENT;
1174

1175
	/* Bounds check source.  */
1176
	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
C
Chris Wilson 已提交
1177
		ret = -EINVAL;
1178
		goto out;
C
Chris Wilson 已提交
1179 1180
	}

C
Chris Wilson 已提交
1181 1182
	trace_i915_gem_object_pread(obj, args->offset, args->size);

1183 1184 1185 1186
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE,
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
1187
	if (ret)
1188
		goto out;
1189

1190
	ret = i915_gem_object_pin_pages(obj);
1191
	if (ret)
1192
		goto out;
1193

1194
	ret = i915_gem_shmem_pread(obj, args);
1195
	if (ret == -EFAULT || ret == -ENODEV)
1196
		ret = i915_gem_gtt_pread(obj, args);
1197

1198 1199
	i915_gem_object_unpin_pages(obj);
out:
C
Chris Wilson 已提交
1200
	i915_gem_object_put(obj);
1201
	return ret;
1202 1203
}

1204 1205
/* This is the fast write path which cannot handle
 * page faults in the source data
1206
 */
1207

1208 1209 1210 1211
static inline bool
ggtt_write(struct io_mapping *mapping,
	   loff_t base, int offset,
	   char __user *user_data, int length)
1212
{
1213
	void __iomem *vaddr;
1214
	unsigned long unwritten;
1215

1216
	/* We can use the cpu mem copy function because this is X86. */
1217 1218
	vaddr = io_mapping_map_atomic_wc(mapping, base);
	unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
1219
						      user_data, length);
1220 1221
	io_mapping_unmap_atomic(vaddr);
	if (unwritten) {
1222 1223 1224
		vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
		unwritten = copy_from_user((void __force *)vaddr + offset,
					   user_data, length);
1225 1226
		io_mapping_unmap(vaddr);
	}
1227 1228 1229 1230

	return unwritten;
}

1231 1232 1233
/**
 * This is the fast pwrite path, where we copy the data directly from the
 * user into the GTT, uncached.
1234
 * @obj: i915 GEM object
1235
 * @args: pwrite arguments structure
1236
 */
1237
static int
1238 1239
i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
			 const struct drm_i915_gem_pwrite *args)
1240
{
1241
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
1242 1243
	struct i915_ggtt *ggtt = &i915->ggtt;
	struct drm_mm_node node;
1244 1245 1246
	struct i915_vma *vma;
	u64 remain, offset;
	void __user *user_data;
1247
	int ret;
1248

1249 1250 1251
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
	if (ret)
		return ret;
D
Daniel Vetter 已提交
1252

1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269
	if (i915_gem_object_has_struct_page(obj)) {
		/*
		 * Avoid waking the device up if we can fallback, as
		 * waking/resuming is very slow (worst-case 10-100 ms
		 * depending on PCI sleeps and our own resume time).
		 * This easily dwarfs any performance advantage from
		 * using the cache bypass of indirect GGTT access.
		 */
		if (!intel_runtime_pm_get_if_in_use(i915)) {
			ret = -EFAULT;
			goto out_unlock;
		}
	} else {
		/* No backing pages, no fallback, we must force GGTT access */
		intel_runtime_pm_get(i915);
	}

C
Chris Wilson 已提交
1270
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1271 1272 1273
				       PIN_MAPPABLE |
				       PIN_NONFAULT |
				       PIN_NONBLOCK);
1274 1275 1276
	if (!IS_ERR(vma)) {
		node.start = i915_ggtt_offset(vma);
		node.allocated = false;
1277
		ret = i915_vma_put_fence(vma);
1278 1279 1280 1281 1282
		if (ret) {
			i915_vma_unpin(vma);
			vma = ERR_PTR(ret);
		}
	}
C
Chris Wilson 已提交
1283
	if (IS_ERR(vma)) {
1284
		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1285
		if (ret)
1286
			goto out_rpm;
1287
		GEM_BUG_ON(!node.allocated);
1288
	}
D
Daniel Vetter 已提交
1289 1290 1291 1292 1293

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		goto out_unpin;

1294 1295
	mutex_unlock(&i915->drm.struct_mutex);

1296
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1297

1298 1299 1300 1301
	user_data = u64_to_user_ptr(args->data_ptr);
	offset = args->offset;
	remain = args->size;
	while (remain) {
1302 1303
		/* Operation in this page
		 *
1304 1305 1306
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
1307
		 */
1308
		u32 page_base = node.start;
1309 1310
		unsigned int page_offset = offset_in_page(offset);
		unsigned int page_length = PAGE_SIZE - page_offset;
1311 1312 1313 1314 1315 1316 1317 1318 1319 1320
		page_length = remain < page_length ? remain : page_length;
		if (node.allocated) {
			wmb(); /* flush the write before we modify the GGTT */
			ggtt->base.insert_page(&ggtt->base,
					       i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
					       node.start, I915_CACHE_NONE, 0);
			wmb(); /* flush modifications to the GGTT (insert_page) */
		} else {
			page_base += offset & PAGE_MASK;
		}
1321
		/* If we get a fault while copying data, then (presumably) our
1322 1323
		 * source page isn't available.  Return the error and we'll
		 * retry in the slow path.
1324 1325
		 * If the object is non-shmem backed, we retry again with the
		 * path that handles page fault.
1326
		 */
1327
		if (ggtt_write(&ggtt->iomap, page_base, page_offset,
1328 1329 1330
			       user_data, page_length)) {
			ret = -EFAULT;
			break;
D
Daniel Vetter 已提交
1331
		}
1332

1333 1334 1335
		remain -= page_length;
		user_data += page_length;
		offset += page_length;
1336
	}
1337
	intel_fb_obj_flush(obj, ORIGIN_CPU);
1338 1339

	mutex_lock(&i915->drm.struct_mutex);
D
Daniel Vetter 已提交
1340
out_unpin:
1341 1342 1343
	if (node.allocated) {
		wmb();
		ggtt->base.clear_range(&ggtt->base,
1344
				       node.start, node.size);
1345 1346
		remove_mappable_node(&node);
	} else {
C
Chris Wilson 已提交
1347
		i915_vma_unpin(vma);
1348
	}
1349
out_rpm:
1350
	intel_runtime_pm_put(i915);
1351
out_unlock:
1352
	mutex_unlock(&i915->drm.struct_mutex);
1353
	return ret;
1354 1355
}

1356
static int
1357
shmem_pwrite_slow(struct page *page, int offset, int length,
1358 1359 1360 1361
		  char __user *user_data,
		  bool page_do_bit17_swizzling,
		  bool needs_clflush_before,
		  bool needs_clflush_after)
1362
{
1363 1364
	char *vaddr;
	int ret;
1365

1366
	vaddr = kmap(page);
1367
	if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1368
		shmem_clflush_swizzled_range(vaddr + offset, length,
1369
					     page_do_bit17_swizzling);
1370
	if (page_do_bit17_swizzling)
1371 1372
		ret = __copy_from_user_swizzled(vaddr, offset, user_data,
						length);
1373
	else
1374
		ret = __copy_from_user(vaddr + offset, user_data, length);
1375
	if (needs_clflush_after)
1376
		shmem_clflush_swizzled_range(vaddr + offset, length,
1377
					     page_do_bit17_swizzling);
1378
	kunmap(page);
1379

1380
	return ret ? -EFAULT : 0;
1381 1382
}

1383 1384 1385 1386 1387
/* Per-page copy function for the shmem pwrite fastpath.
 * Flushes invalid cachelines before writing to the target if
 * needs_clflush_before is set and flushes out any written cachelines after
 * writing if needs_clflush is set.
 */
1388
static int
1389 1390 1391 1392
shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
	     bool page_do_bit17_swizzling,
	     bool needs_clflush_before,
	     bool needs_clflush_after)
1393
{
1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425
	int ret;

	ret = -ENODEV;
	if (!page_do_bit17_swizzling) {
		char *vaddr = kmap_atomic(page);

		if (needs_clflush_before)
			drm_clflush_virt_range(vaddr + offset, len);
		ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
		if (needs_clflush_after)
			drm_clflush_virt_range(vaddr + offset, len);

		kunmap_atomic(vaddr);
	}
	if (ret == 0)
		return ret;

	return shmem_pwrite_slow(page, offset, len, user_data,
				 page_do_bit17_swizzling,
				 needs_clflush_before,
				 needs_clflush_after);
}

static int
i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
		      const struct drm_i915_gem_pwrite *args)
{
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	void __user *user_data;
	u64 remain;
	unsigned int obj_do_bit17_swizzling;
	unsigned int partial_cacheline_write;
1426
	unsigned int needs_clflush;
1427 1428
	unsigned int offset, idx;
	int ret;
1429

1430
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1431 1432 1433
	if (ret)
		return ret;

1434 1435 1436 1437
	ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
	mutex_unlock(&i915->drm.struct_mutex);
	if (ret)
		return ret;
1438

1439 1440 1441
	obj_do_bit17_swizzling = 0;
	if (i915_gem_object_needs_bit17_swizzle(obj))
		obj_do_bit17_swizzling = BIT(17);
1442

1443 1444 1445 1446 1447 1448 1449
	/* If we don't overwrite a cacheline completely we need to be
	 * careful to have up-to-date data by first clflushing. Don't
	 * overcomplicate things and flush the entire patch.
	 */
	partial_cacheline_write = 0;
	if (needs_clflush & CLFLUSH_BEFORE)
		partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
1450

1451 1452 1453 1454 1455 1456
	user_data = u64_to_user_ptr(args->data_ptr);
	remain = args->size;
	offset = offset_in_page(args->offset);
	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
		struct page *page = i915_gem_object_get_page(obj, idx);
		int length;
1457

1458 1459 1460
		length = remain;
		if (offset + length > PAGE_SIZE)
			length = PAGE_SIZE - offset;
1461

1462 1463 1464 1465
		ret = shmem_pwrite(page, offset, length, user_data,
				   page_to_phys(page) & obj_do_bit17_swizzling,
				   (offset | length) & partial_cacheline_write,
				   needs_clflush & CLFLUSH_AFTER);
1466
		if (ret)
1467
			break;
1468

1469 1470 1471
		remain -= length;
		user_data += length;
		offset = 0;
1472
	}
1473

1474
	intel_fb_obj_flush(obj, ORIGIN_CPU);
1475
	i915_gem_obj_finish_shmem_access(obj);
1476
	return ret;
1477 1478 1479 1480
}

/**
 * Writes data to the object referenced by handle.
1481 1482 1483
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1484 1485 1486 1487 1488
 *
 * On error, the contents of the buffer that were to be modified are undefined.
 */
int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1489
		      struct drm_file *file)
1490 1491
{
	struct drm_i915_gem_pwrite *args = data;
1492
	struct drm_i915_gem_object *obj;
1493 1494 1495 1496 1497 1498
	int ret;

	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_READ,
1499
		       u64_to_user_ptr(args->data_ptr),
1500 1501 1502
		       args->size))
		return -EFAULT;

1503
	obj = i915_gem_object_lookup(file, args->handle);
1504 1505
	if (!obj)
		return -ENOENT;
1506

1507
	/* Bounds check destination. */
1508
	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
C
Chris Wilson 已提交
1509
		ret = -EINVAL;
1510
		goto err;
C
Chris Wilson 已提交
1511 1512
	}

C
Chris Wilson 已提交
1513 1514
	trace_i915_gem_object_pwrite(obj, args->offset, args->size);

1515 1516 1517 1518 1519 1520
	ret = -ENODEV;
	if (obj->ops->pwrite)
		ret = obj->ops->pwrite(obj, args);
	if (ret != -ENODEV)
		goto err;

1521 1522 1523 1524 1525
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_ALL,
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
1526 1527 1528
	if (ret)
		goto err;

1529
	ret = i915_gem_object_pin_pages(obj);
1530
	if (ret)
1531
		goto err;
1532

D
Daniel Vetter 已提交
1533
	ret = -EFAULT;
1534 1535 1536 1537 1538 1539
	/* We can only do the GTT pwrite on untiled buffers, as otherwise
	 * it would end up going through the fenced access, and we'll get
	 * different detiling behavior between reading and writing.
	 * pread/pwrite currently are reading and writing from the CPU
	 * perspective, requiring manual detiling by the client.
	 */
1540
	if (!i915_gem_object_has_struct_page(obj) ||
1541
	    cpu_write_needs_clflush(obj))
D
Daniel Vetter 已提交
1542 1543
		/* Note that the gtt paths might fail with non-page-backed user
		 * pointers (e.g. gtt mappings when moving data between
1544 1545
		 * textures). Fallback to the shmem path in that case.
		 */
1546
		ret = i915_gem_gtt_pwrite_fast(obj, args);
1547

1548
	if (ret == -EFAULT || ret == -ENOSPC) {
1549 1550
		if (obj->phys_handle)
			ret = i915_gem_phys_pwrite(obj, args, file);
1551
		else
1552
			ret = i915_gem_shmem_pwrite(obj, args);
1553
	}
1554

1555
	i915_gem_object_unpin_pages(obj);
1556
err:
C
Chris Wilson 已提交
1557
	i915_gem_object_put(obj);
1558
	return ret;
1559 1560
}

1561 1562 1563 1564 1565 1566
static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *i915;
	struct list_head *list;
	struct i915_vma *vma;

1567 1568
	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));

1569
	for_each_ggtt_vma(vma, obj) {
1570 1571 1572 1573 1574 1575 1576 1577 1578 1579
		if (i915_vma_is_active(vma))
			continue;

		if (!drm_mm_node_allocated(&vma->node))
			continue;

		list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
	}

	i915 = to_i915(obj->base.dev);
1580
	spin_lock(&i915->mm.obj_lock);
1581
	list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
1582 1583
	list_move_tail(&obj->mm.link, list);
	spin_unlock(&i915->mm.obj_lock);
1584 1585
}

1586
/**
1587 1588
 * Called when user space prepares to use an object with the CPU, either
 * through the mmap ioctl's mapping or a GTT mapping.
1589 1590 1591
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1592 1593 1594
 */
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1595
			  struct drm_file *file)
1596 1597
{
	struct drm_i915_gem_set_domain *args = data;
1598
	struct drm_i915_gem_object *obj;
1599 1600
	uint32_t read_domains = args->read_domains;
	uint32_t write_domain = args->write_domain;
1601
	int err;
1602

1603
	/* Only handle setting domains to types used by the CPU. */
1604
	if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
1605 1606 1607 1608 1609 1610 1611 1612
		return -EINVAL;

	/* Having something in the write domain implies it's in the read
	 * domain, and only that read domain.  Enforce that in the request.
	 */
	if (write_domain != 0 && read_domains != write_domain)
		return -EINVAL;

1613
	obj = i915_gem_object_lookup(file, args->handle);
1614 1615
	if (!obj)
		return -ENOENT;
1616

1617 1618 1619 1620
	/* Try to flush the object off the GPU without holding the lock.
	 * We will repeat the flush holding the lock in the normal manner
	 * to catch cases where we are gazumped.
	 */
1621
	err = i915_gem_object_wait(obj,
1622 1623 1624 1625
				   I915_WAIT_INTERRUPTIBLE |
				   (write_domain ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
1626
	if (err)
C
Chris Wilson 已提交
1627
		goto out;
1628

T
Tina Zhang 已提交
1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641
	/*
	 * Proxy objects do not control access to the backing storage, ergo
	 * they cannot be used as a means to manipulate the cache domain
	 * tracking for that backing storage. The proxy object is always
	 * considered to be outside of any cache domain.
	 */
	if (i915_gem_object_is_proxy(obj)) {
		err = -ENXIO;
		goto out;
	}

	/*
	 * Flush and acquire obj->pages so that we are coherent through
1642 1643 1644 1645 1646 1647 1648 1649 1650
	 * direct access in memory with previous cached writes through
	 * shmemfs and that our cache domain tracking remains valid.
	 * For example, if the obj->filp was moved to swap without us
	 * being notified and releasing the pages, we would mistakenly
	 * continue to assume that the obj remained out of the CPU cached
	 * domain.
	 */
	err = i915_gem_object_pin_pages(obj);
	if (err)
C
Chris Wilson 已提交
1651
		goto out;
1652 1653 1654

	err = i915_mutex_lock_interruptible(dev);
	if (err)
C
Chris Wilson 已提交
1655
		goto out_unpin;
1656

1657 1658 1659 1660
	if (read_domains & I915_GEM_DOMAIN_WC)
		err = i915_gem_object_set_to_wc_domain(obj, write_domain);
	else if (read_domains & I915_GEM_DOMAIN_GTT)
		err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
1661
	else
1662
		err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
1663

1664 1665
	/* And bump the LRU for this access */
	i915_gem_object_bump_inactive_ggtt(obj);
1666

1667
	mutex_unlock(&dev->struct_mutex);
1668

1669
	if (write_domain != 0)
1670 1671
		intel_fb_obj_invalidate(obj,
					fb_write_origin(obj, write_domain));
1672

C
Chris Wilson 已提交
1673
out_unpin:
1674
	i915_gem_object_unpin_pages(obj);
C
Chris Wilson 已提交
1675 1676
out:
	i915_gem_object_put(obj);
1677
	return err;
1678 1679 1680 1681
}

/**
 * Called when user space has done writes to this buffer
1682 1683 1684
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1685 1686 1687
 */
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1688
			 struct drm_file *file)
1689 1690
{
	struct drm_i915_gem_sw_finish *args = data;
1691
	struct drm_i915_gem_object *obj;
1692

1693
	obj = i915_gem_object_lookup(file, args->handle);
1694 1695
	if (!obj)
		return -ENOENT;
1696

T
Tina Zhang 已提交
1697 1698 1699 1700 1701
	/*
	 * Proxy objects are barred from CPU access, so there is no
	 * need to ban sw_finish as it is a nop.
	 */

1702
	/* Pinned buffers may be scanout, so flush the cache */
1703
	i915_gem_object_flush_if_display(obj);
C
Chris Wilson 已提交
1704
	i915_gem_object_put(obj);
1705 1706

	return 0;
1707 1708 1709
}

/**
1710 1711 1712 1713 1714
 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
 *			 it is mapped to.
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1715 1716 1717
 *
 * While the mapping holds a reference on the contents of the object, it doesn't
 * imply a ref on the object itself.
1718 1719 1720 1721 1722 1723 1724 1725 1726 1727
 *
 * IMPORTANT:
 *
 * DRM driver writers who look a this function as an example for how to do GEM
 * mmap support, please don't implement mmap support like here. The modern way
 * to implement DRM mmap support is with an mmap offset ioctl (like
 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
 * That way debug tooling like valgrind will understand what's going on, hiding
 * the mmap call in a driver private ioctl will break that. The i915 driver only
 * does cpu mmaps this way because we didn't know better.
1728 1729 1730
 */
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1731
		    struct drm_file *file)
1732 1733
{
	struct drm_i915_gem_mmap *args = data;
1734
	struct drm_i915_gem_object *obj;
1735 1736
	unsigned long addr;

1737 1738 1739
	if (args->flags & ~(I915_MMAP_WC))
		return -EINVAL;

1740
	if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1741 1742
		return -ENODEV;

1743 1744
	obj = i915_gem_object_lookup(file, args->handle);
	if (!obj)
1745
		return -ENOENT;
1746

1747 1748 1749
	/* prime objects have no backing filp to GEM mmap
	 * pages from.
	 */
1750
	if (!obj->base.filp) {
C
Chris Wilson 已提交
1751
		i915_gem_object_put(obj);
1752
		return -ENXIO;
1753 1754
	}

1755
	addr = vm_mmap(obj->base.filp, 0, args->size,
1756 1757
		       PROT_READ | PROT_WRITE, MAP_SHARED,
		       args->offset);
1758 1759 1760 1761
	if (args->flags & I915_MMAP_WC) {
		struct mm_struct *mm = current->mm;
		struct vm_area_struct *vma;

1762
		if (down_write_killable(&mm->mmap_sem)) {
C
Chris Wilson 已提交
1763
			i915_gem_object_put(obj);
1764 1765
			return -EINTR;
		}
1766 1767 1768 1769 1770 1771 1772
		vma = find_vma(mm, addr);
		if (vma)
			vma->vm_page_prot =
				pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
		else
			addr = -ENOMEM;
		up_write(&mm->mmap_sem);
1773 1774

		/* This may race, but that's ok, it only gets set */
1775
		WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
1776
	}
C
Chris Wilson 已提交
1777
	i915_gem_object_put(obj);
1778 1779 1780 1781 1782 1783 1784 1785
	if (IS_ERR((void *)addr))
		return addr;

	args->addr_ptr = (uint64_t) addr;

	return 0;
}

1786 1787
static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
{
1788
	return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
1789 1790
}

1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810
/**
 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
 *
 * A history of the GTT mmap interface:
 *
 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
 *     aligned and suitable for fencing, and still fit into the available
 *     mappable space left by the pinned display objects. A classic problem
 *     we called the page-fault-of-doom where we would ping-pong between
 *     two objects that could not fit inside the GTT and so the memcpy
 *     would page one object in at the expense of the other between every
 *     single byte.
 *
 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
 *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
 *     object is too large for the available space (or simply too large
 *     for the mappable aperture!), a view is created instead and faulted
 *     into userspace. (This view is aligned and sized appropriately for
 *     fenced access.)
 *
1811 1812 1813
 * 2 - Recognise WC as a separate cache domain so that we can flush the
 *     delayed writes via GTT before performing direct access via WC.
 *
1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840
 * Restrictions:
 *
 *  * snoopable objects cannot be accessed via the GTT. It can cause machine
 *    hangs on some architectures, corruption on others. An attempt to service
 *    a GTT page fault from a snoopable object will generate a SIGBUS.
 *
 *  * the object must be able to fit into RAM (physical memory, though no
 *    limited to the mappable aperture).
 *
 *
 * Caveats:
 *
 *  * a new GTT page fault will synchronize rendering from the GPU and flush
 *    all data to system memory. Subsequent access will not be synchronized.
 *
 *  * all mappings are revoked on runtime device suspend.
 *
 *  * there are only 8, 16 or 32 fence registers to share between all users
 *    (older machines require fence register for display and blitter access
 *    as well). Contention of the fence registers will cause the previous users
 *    to be unmapped and any new access will generate new page faults.
 *
 *  * running out of memory while servicing a fault may generate a SIGBUS,
 *    rather than the expected SIGSEGV.
 */
int i915_gem_mmap_gtt_version(void)
{
1841
	return 2;
1842 1843
}

1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854
static inline struct i915_ggtt_view
compute_partial_view(struct drm_i915_gem_object *obj,
		     pgoff_t page_offset,
		     unsigned int chunk)
{
	struct i915_ggtt_view view;

	if (i915_gem_object_is_tiled(obj))
		chunk = roundup(chunk, tile_row_pages(obj));

	view.type = I915_GGTT_VIEW_PARTIAL;
1855 1856
	view.partial.offset = rounddown(page_offset, chunk);
	view.partial.size =
1857
		min_t(unsigned int, chunk,
1858
		      (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
1859 1860 1861 1862 1863 1864 1865 1866

	/* If the partial covers the entire object, just create a normal VMA. */
	if (chunk >= obj->base.size >> PAGE_SHIFT)
		view.type = I915_GGTT_VIEW_NORMAL;

	return view;
}

1867 1868
/**
 * i915_gem_fault - fault a page into the GTT
1869
 * @vmf: fault info
1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880
 *
 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
 * from userspace.  The fault handler takes care of binding the object to
 * the GTT (if needed), allocating and programming a fence register (again,
 * only if needed based on whether the old reg is still valid or the object
 * is tiled) and inserting a new PTE into the faulting process.
 *
 * Note that the faulting process may involve evicting existing objects
 * from the GTT and/or fence registers to make room.  So performance may
 * suffer if the GTT working set is large or there are few fence registers
 * left.
1881 1882 1883
 *
 * The current feature set supported by i915_gem_fault() and thus GTT mmaps
 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
1884
 */
1885
int i915_gem_fault(struct vm_fault *vmf)
1886
{
1887
#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
1888
	struct vm_area_struct *area = vmf->vma;
C
Chris Wilson 已提交
1889
	struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
1890
	struct drm_device *dev = obj->base.dev;
1891 1892
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
1893
	bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
C
Chris Wilson 已提交
1894
	struct i915_vma *vma;
1895
	pgoff_t page_offset;
1896
	unsigned int flags;
1897
	int ret;
1898

1899
	/* We don't use vmf->pgoff since that has the fake offset */
1900
	page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
1901

C
Chris Wilson 已提交
1902 1903
	trace_i915_gem_object_fault(obj, page_offset, true, write);

1904
	/* Try to flush the object off the GPU first without holding the lock.
1905
	 * Upon acquiring the lock, we will perform our sanity checks and then
1906 1907 1908
	 * repeat the flush holding the lock in the normal manner to catch cases
	 * where we are gazumped.
	 */
1909 1910 1911 1912
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
1913
	if (ret)
1914 1915
		goto err;

1916 1917 1918 1919
	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		goto err;

1920 1921 1922 1923 1924
	intel_runtime_pm_get(dev_priv);

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto err_rpm;
1925

1926
	/* Access to snoopable pages through the GTT is incoherent. */
1927
	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
1928
		ret = -EFAULT;
1929
		goto err_unlock;
1930 1931
	}

1932 1933 1934 1935 1936 1937 1938 1939
	/* If the object is smaller than a couple of partial vma, it is
	 * not worth only creating a single partial vma - we may as well
	 * clear enough space for the full object.
	 */
	flags = PIN_MAPPABLE;
	if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
		flags |= PIN_NONBLOCK | PIN_NONFAULT;

1940
	/* Now pin it into the GTT as needed */
1941
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
1942 1943
	if (IS_ERR(vma)) {
		/* Use a partial view if it is bigger than available space */
1944
		struct i915_ggtt_view view =
1945
			compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
1946

1947 1948 1949 1950 1951
		/* Userspace is now writing through an untracked VMA, abandon
		 * all hope that the hardware is able to track future writes.
		 */
		obj->frontbuffer_ggtt_origin = ORIGIN_CPU;

1952 1953
		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
	}
C
Chris Wilson 已提交
1954 1955
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
1956
		goto err_unlock;
C
Chris Wilson 已提交
1957
	}
1958

1959 1960
	ret = i915_gem_object_set_to_gtt_domain(obj, write);
	if (ret)
1961
		goto err_unpin;
1962

1963
	ret = i915_vma_pin_fence(vma);
1964
	if (ret)
1965
		goto err_unpin;
1966

1967
	/* Finally, remap it using the new GTT offset */
1968
	ret = remap_io_mapping(area,
1969
			       area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
1970
			       (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
1971
			       min_t(u64, vma->size, area->vm_end - area->vm_start),
1972
			       &ggtt->iomap);
1973 1974
	if (ret)
		goto err_fence;
1975

1976 1977 1978 1979 1980 1981
	/* Mark as being mmapped into userspace for later revocation */
	assert_rpm_wakelock_held(dev_priv);
	if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
		list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
	GEM_BUG_ON(!obj->userfault_count);

1982 1983
	i915_vma_set_ggtt_write(vma);

1984
err_fence:
1985
	i915_vma_unpin_fence(vma);
1986
err_unpin:
C
Chris Wilson 已提交
1987
	__i915_vma_unpin(vma);
1988
err_unlock:
1989
	mutex_unlock(&dev->struct_mutex);
1990 1991
err_rpm:
	intel_runtime_pm_put(dev_priv);
1992
	i915_gem_object_unpin_pages(obj);
1993
err:
1994
	switch (ret) {
1995
	case -EIO:
1996 1997 1998 1999 2000 2001 2002
		/*
		 * We eat errors when the gpu is terminally wedged to avoid
		 * userspace unduly crashing (gl has no provisions for mmaps to
		 * fail). But any other -EIO isn't ours (e.g. swap in failure)
		 * and so needs to be reported.
		 */
		if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
2003 2004 2005
			ret = VM_FAULT_SIGBUS;
			break;
		}
2006
	case -EAGAIN:
D
Daniel Vetter 已提交
2007 2008 2009 2010
		/*
		 * EAGAIN means the gpu is hung and we'll wait for the error
		 * handler to reset everything when re-faulting in
		 * i915_mutex_lock_interruptible.
2011
		 */
2012 2013
	case 0:
	case -ERESTARTSYS:
2014
	case -EINTR:
2015 2016 2017 2018 2019
	case -EBUSY:
		/*
		 * EBUSY is ok: this just means that another thread
		 * already did the job.
		 */
2020 2021
		ret = VM_FAULT_NOPAGE;
		break;
2022
	case -ENOMEM:
2023 2024
		ret = VM_FAULT_OOM;
		break;
2025
	case -ENOSPC:
2026
	case -EFAULT:
2027 2028
		ret = VM_FAULT_SIGBUS;
		break;
2029
	default:
2030
		WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
2031 2032
		ret = VM_FAULT_SIGBUS;
		break;
2033
	}
2034
	return ret;
2035 2036
}

2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047
static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
{
	struct i915_vma *vma;

	GEM_BUG_ON(!obj->userfault_count);

	obj->userfault_count = 0;
	list_del(&obj->userfault_link);
	drm_vma_node_unmap(&obj->base.vma_node,
			   obj->base.dev->anon_inode->i_mapping);

2048
	for_each_ggtt_vma(vma, obj)
2049 2050 2051
		i915_vma_unset_userfault(vma);
}

2052 2053 2054 2055
/**
 * i915_gem_release_mmap - remove physical page mappings
 * @obj: obj in question
 *
2056
 * Preserve the reservation of the mmapping with the DRM core code, but
2057 2058 2059 2060 2061 2062 2063 2064 2065
 * relinquish ownership of the pages back to the system.
 *
 * It is vital that we remove the page mapping if we have mapped a tiled
 * object through the GTT and then lose the fence register due to
 * resource pressure. Similarly if the object has been moved out of the
 * aperture, than pages mapped into userspace must be revoked. Removing the
 * mapping will then trigger a page fault on the next user access, allowing
 * fixup by i915_gem_fault().
 */
2066
void
2067
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
2068
{
2069 2070
	struct drm_i915_private *i915 = to_i915(obj->base.dev);

2071 2072 2073
	/* Serialisation between user GTT access and our code depends upon
	 * revoking the CPU's PTE whilst the mutex is held. The next user
	 * pagefault then has to wait until we release the mutex.
2074 2075 2076 2077
	 *
	 * Note that RPM complicates somewhat by adding an additional
	 * requirement that operations to the GGTT be made holding the RPM
	 * wakeref.
2078
	 */
2079
	lockdep_assert_held(&i915->drm.struct_mutex);
2080
	intel_runtime_pm_get(i915);
2081

2082
	if (!obj->userfault_count)
2083
		goto out;
2084

2085
	__i915_gem_object_release_mmap(obj);
2086 2087 2088 2089 2090 2091 2092 2093 2094

	/* Ensure that the CPU's PTE are revoked and there are not outstanding
	 * memory transactions from userspace before we return. The TLB
	 * flushing implied above by changing the PTE above *should* be
	 * sufficient, an extra barrier here just provides us with a bit
	 * of paranoid documentation about our requirement to serialise
	 * memory writes before touching registers / GSM.
	 */
	wmb();
2095 2096 2097

out:
	intel_runtime_pm_put(i915);
2098 2099
}

2100
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
2101
{
2102
	struct drm_i915_gem_object *obj, *on;
2103
	int i;
2104

2105 2106 2107 2108 2109 2110
	/*
	 * Only called during RPM suspend. All users of the userfault_list
	 * must be holding an RPM wakeref to ensure that this can not
	 * run concurrently with themselves (and use the struct_mutex for
	 * protection between themselves).
	 */
2111

2112
	list_for_each_entry_safe(obj, on,
2113 2114
				 &dev_priv->mm.userfault_list, userfault_link)
		__i915_gem_object_release_mmap(obj);
2115 2116 2117 2118 2119 2120 2121 2122

	/* The fence will be lost when the device powers down. If any were
	 * in use by hardware (i.e. they are pinned), we should not be powering
	 * down! All other fences will be reacquired by the user upon waking.
	 */
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];

2123 2124 2125 2126 2127 2128 2129 2130 2131 2132
		/* Ideally we want to assert that the fence register is not
		 * live at this point (i.e. that no piece of code will be
		 * trying to write through fence + GTT, as that both violates
		 * our tracking of activity and associated locking/barriers,
		 * but also is illegal given that the hw is powered down).
		 *
		 * Previously we used reg->pin_count as a "liveness" indicator.
		 * That is not sufficient, and we need a more fine-grained
		 * tool if we want to have a sanity check here.
		 */
2133 2134 2135 2136

		if (!reg->vma)
			continue;

2137
		GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
2138 2139
		reg->dirty = true;
	}
2140 2141
}

2142 2143
static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
{
2144
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2145
	int err;
2146

2147
	err = drm_gem_create_mmap_offset(&obj->base);
2148
	if (likely(!err))
2149
		return 0;
2150

2151 2152 2153 2154 2155
	/* Attempt to reap some mmap space from dead objects */
	do {
		err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
		if (err)
			break;
2156

2157
		i915_gem_drain_freed_objects(dev_priv);
2158
		err = drm_gem_create_mmap_offset(&obj->base);
2159 2160 2161 2162
		if (!err)
			break;

	} while (flush_delayed_work(&dev_priv->gt.retire_work));
2163

2164
	return err;
2165 2166 2167 2168 2169 2170 2171
}

static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
{
	drm_gem_free_mmap_offset(&obj->base);
}

2172
int
2173 2174
i915_gem_mmap_gtt(struct drm_file *file,
		  struct drm_device *dev,
2175
		  uint32_t handle,
2176
		  uint64_t *offset)
2177
{
2178
	struct drm_i915_gem_object *obj;
2179 2180
	int ret;

2181
	obj = i915_gem_object_lookup(file, handle);
2182 2183
	if (!obj)
		return -ENOENT;
2184

2185
	ret = i915_gem_object_create_mmap_offset(obj);
2186 2187
	if (ret == 0)
		*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2188

C
Chris Wilson 已提交
2189
	i915_gem_object_put(obj);
2190
	return ret;
2191 2192
}

2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213
/**
 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
 * @dev: DRM device
 * @data: GTT mapping ioctl data
 * @file: GEM object info
 *
 * Simply returns the fake offset to userspace so it can mmap it.
 * The mmap call will end up in drm_gem_mmap(), which will set things
 * up so we can get faults in the handler above.
 *
 * The fault handler will take care of binding the object into the GTT
 * (since it may have been evicted to make room for something), allocating
 * a fence register, and mapping the appropriate aperture address into
 * userspace.
 */
int
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file)
{
	struct drm_i915_gem_mmap_gtt *args = data;

2214
	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2215 2216
}

D
Daniel Vetter 已提交
2217 2218 2219
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2220
{
2221
	i915_gem_object_free_mmap_offset(obj);
2222

2223 2224
	if (obj->base.filp == NULL)
		return;
2225

D
Daniel Vetter 已提交
2226 2227 2228 2229 2230
	/* Our goal here is to return as much of the memory as
	 * is possible back to the system as we are called from OOM.
	 * To do this we must instruct the shmfs to drop all of its
	 * backing pages, *now*.
	 */
2231
	shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
C
Chris Wilson 已提交
2232
	obj->mm.madv = __I915_MADV_PURGED;
2233
	obj->mm.pages = ERR_PTR(-EFAULT);
D
Daniel Vetter 已提交
2234
}
2235

2236
/* Try to discard unwanted pages */
2237
void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
D
Daniel Vetter 已提交
2238
{
2239 2240
	struct address_space *mapping;

2241
	lockdep_assert_held(&obj->mm.lock);
2242
	GEM_BUG_ON(i915_gem_object_has_pages(obj));
2243

C
Chris Wilson 已提交
2244
	switch (obj->mm.madv) {
2245 2246 2247 2248 2249 2250 2251 2252 2253
	case I915_MADV_DONTNEED:
		i915_gem_object_truncate(obj);
	case __I915_MADV_PURGED:
		return;
	}

	if (obj->base.filp == NULL)
		return;

2254
	mapping = obj->base.filp->f_mapping,
2255
	invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2256 2257
}

2258
static void
2259 2260
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
			      struct sg_table *pages)
2261
{
2262 2263
	struct sgt_iter sgt_iter;
	struct page *page;
2264

2265
	__i915_gem_object_release_shmem(obj, pages, true);
2266

2267
	i915_gem_gtt_finish_pages(obj, pages);
I
Imre Deak 已提交
2268

2269
	if (i915_gem_object_needs_bit17_swizzle(obj))
2270
		i915_gem_object_save_bit_17_swizzle(obj, pages);
2271

2272
	for_each_sgt_page(page, sgt_iter, pages) {
C
Chris Wilson 已提交
2273
		if (obj->mm.dirty)
2274
			set_page_dirty(page);
2275

C
Chris Wilson 已提交
2276
		if (obj->mm.madv == I915_MADV_WILLNEED)
2277
			mark_page_accessed(page);
2278

2279
		put_page(page);
2280
	}
C
Chris Wilson 已提交
2281
	obj->mm.dirty = false;
2282

2283 2284
	sg_free_table(pages);
	kfree(pages);
2285
}
C
Chris Wilson 已提交
2286

2287 2288 2289
static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
{
	struct radix_tree_iter iter;
2290
	void __rcu **slot;
2291

2292
	rcu_read_lock();
C
Chris Wilson 已提交
2293 2294
	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
2295
	rcu_read_unlock();
2296 2297
}

2298 2299
void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
				 enum i915_mm_subclass subclass)
2300
{
2301
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
2302
	struct sg_table *pages;
2303

C
Chris Wilson 已提交
2304
	if (i915_gem_object_has_pinned_pages(obj))
2305
		return;
2306

2307
	GEM_BUG_ON(obj->bind_count);
2308
	if (!i915_gem_object_has_pages(obj))
2309 2310 2311
		return;

	/* May be called by shrinker from within get_pages() (on another bo) */
2312
	mutex_lock_nested(&obj->mm.lock, subclass);
2313 2314
	if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
		goto unlock;
B
Ben Widawsky 已提交
2315

2316 2317 2318
	/* ->put_pages might need to allocate memory for the bit17 swizzle
	 * array, hence protect them from being reaped by removing them from gtt
	 * lists early. */
2319 2320
	pages = fetch_and_zero(&obj->mm.pages);
	GEM_BUG_ON(!pages);
2321

2322 2323 2324 2325
	spin_lock(&i915->mm.obj_lock);
	list_del(&obj->mm.link);
	spin_unlock(&i915->mm.obj_lock);

C
Chris Wilson 已提交
2326
	if (obj->mm.mapping) {
2327 2328
		void *ptr;

2329
		ptr = page_mask_bits(obj->mm.mapping);
2330 2331
		if (is_vmalloc_addr(ptr))
			vunmap(ptr);
2332
		else
2333 2334
			kunmap(kmap_to_page(ptr));

C
Chris Wilson 已提交
2335
		obj->mm.mapping = NULL;
2336 2337
	}

2338 2339
	__i915_gem_object_reset_page_iter(obj);

2340 2341 2342
	if (!IS_ERR(pages))
		obj->ops->put_pages(obj, pages);

2343 2344
	obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;

2345 2346
unlock:
	mutex_unlock(&obj->mm.lock);
C
Chris Wilson 已提交
2347 2348
}

2349
static bool i915_sg_trim(struct sg_table *orig_st)
2350 2351 2352 2353 2354 2355
{
	struct sg_table new_st;
	struct scatterlist *sg, *new_sg;
	unsigned int i;

	if (orig_st->nents == orig_st->orig_nents)
2356
		return false;
2357

2358
	if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
2359
		return false;
2360 2361 2362 2363 2364 2365 2366

	new_sg = new_st.sgl;
	for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
		sg_set_page(new_sg, sg_page(sg), sg->length, 0);
		/* called before being DMA mapped, no need to copy sg->dma_* */
		new_sg = sg_next(new_sg);
	}
2367
	GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
2368 2369 2370 2371

	sg_free_table(orig_st);

	*orig_st = new_st;
2372
	return true;
2373 2374
}

2375
static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2376
{
2377
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2378 2379
	const unsigned long page_count = obj->base.size / PAGE_SIZE;
	unsigned long i;
2380
	struct address_space *mapping;
2381 2382
	struct sg_table *st;
	struct scatterlist *sg;
2383
	struct sgt_iter sgt_iter;
2384
	struct page *page;
2385
	unsigned long last_pfn = 0;	/* suppress gcc warning */
2386
	unsigned int max_segment = i915_sg_segment_size();
M
Matthew Auld 已提交
2387
	unsigned int sg_page_sizes;
2388
	gfp_t noreclaim;
I
Imre Deak 已提交
2389
	int ret;
2390

C
Chris Wilson 已提交
2391 2392 2393 2394
	/* Assert that the object is not currently in any GPU domain. As it
	 * wasn't in the GTT, there shouldn't be any way it could have been in
	 * a GPU cache
	 */
2395 2396
	GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
	GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
C
Chris Wilson 已提交
2397

2398 2399
	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
2400
		return -ENOMEM;
2401

2402
rebuild_st:
2403 2404
	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
		kfree(st);
2405
		return -ENOMEM;
2406
	}
2407

2408 2409 2410 2411 2412
	/* Get the list of pages out of our struct file.  They'll be pinned
	 * at this point until we release them.
	 *
	 * Fail silently without starting the shrinker
	 */
2413
	mapping = obj->base.filp->f_mapping;
2414
	noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
2415 2416
	noreclaim |= __GFP_NORETRY | __GFP_NOWARN;

2417 2418
	sg = st->sgl;
	st->nents = 0;
M
Matthew Auld 已提交
2419
	sg_page_sizes = 0;
2420
	for (i = 0; i < page_count; i++) {
2421 2422 2423 2424 2425 2426 2427
		const unsigned int shrink[] = {
			I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
			0,
		}, *s = shrink;
		gfp_t gfp = noreclaim;

		do {
C
Chris Wilson 已提交
2428
			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2429 2430 2431 2432 2433 2434 2435 2436
			if (likely(!IS_ERR(page)))
				break;

			if (!*s) {
				ret = PTR_ERR(page);
				goto err_sg;
			}

2437
			i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
2438
			cond_resched();
2439

C
Chris Wilson 已提交
2440 2441 2442
			/* We've tried hard to allocate the memory by reaping
			 * our own buffer, now let the real VM do its job and
			 * go down in flames if truly OOM.
2443 2444 2445 2446
			 *
			 * However, since graphics tend to be disposable,
			 * defer the oom here by reporting the ENOMEM back
			 * to userspace.
C
Chris Wilson 已提交
2447
			 */
2448 2449 2450
			if (!*s) {
				/* reclaim and warn, but no oom */
				gfp = mapping_gfp_mask(mapping);
2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462

				/* Our bo are always dirty and so we require
				 * kswapd to reclaim our pages (direct reclaim
				 * does not effectively begin pageout of our
				 * buffers on its own). However, direct reclaim
				 * only waits for kswapd when under allocation
				 * congestion. So as a result __GFP_RECLAIM is
				 * unreliable and fails to actually reclaim our
				 * dirty pages -- unless you try over and over
				 * again with !__GFP_NORETRY. However, we still
				 * want to fail this allocation rather than
				 * trigger the out-of-memory killer and for
M
Michal Hocko 已提交
2463
				 * this we want __GFP_RETRY_MAYFAIL.
2464
				 */
M
Michal Hocko 已提交
2465
				gfp |= __GFP_RETRY_MAYFAIL;
I
Imre Deak 已提交
2466
			}
2467 2468
		} while (1);

2469 2470 2471
		if (!i ||
		    sg->length >= max_segment ||
		    page_to_pfn(page) != last_pfn + 1) {
2472
			if (i) {
M
Matthew Auld 已提交
2473
				sg_page_sizes |= sg->length;
2474
				sg = sg_next(sg);
2475
			}
2476 2477 2478 2479 2480 2481
			st->nents++;
			sg_set_page(sg, page, PAGE_SIZE, 0);
		} else {
			sg->length += PAGE_SIZE;
		}
		last_pfn = page_to_pfn(page);
2482 2483 2484

		/* Check that the i965g/gm workaround works. */
		WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2485
	}
2486
	if (sg) { /* loop terminated early; short sg table */
M
Matthew Auld 已提交
2487
		sg_page_sizes |= sg->length;
2488
		sg_mark_end(sg);
2489
	}
2490

2491 2492 2493
	/* Trim unused sg entries to avoid wasting memory. */
	i915_sg_trim(st);

2494
	ret = i915_gem_gtt_prepare_pages(obj, st);
2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513
	if (ret) {
		/* DMA remapping failed? One possible cause is that
		 * it could not reserve enough large entries, asking
		 * for PAGE_SIZE chunks instead may be helpful.
		 */
		if (max_segment > PAGE_SIZE) {
			for_each_sgt_page(page, sgt_iter, st)
				put_page(page);
			sg_free_table(st);

			max_segment = PAGE_SIZE;
			goto rebuild_st;
		} else {
			dev_warn(&dev_priv->drm.pdev->dev,
				 "Failed to DMA remap %lu pages\n",
				 page_count);
			goto err_pages;
		}
	}
I
Imre Deak 已提交
2514

2515
	if (i915_gem_object_needs_bit17_swizzle(obj))
2516
		i915_gem_object_do_bit_17_swizzle(obj, st);
2517

M
Matthew Auld 已提交
2518
	__i915_gem_object_set_pages(obj, st, sg_page_sizes);
2519 2520

	return 0;
2521

2522
err_sg:
2523
	sg_mark_end(sg);
2524
err_pages:
2525 2526
	for_each_sgt_page(page, sgt_iter, st)
		put_page(page);
2527 2528
	sg_free_table(st);
	kfree(st);
2529 2530 2531 2532 2533 2534 2535 2536 2537

	/* shmemfs first checks if there is enough memory to allocate the page
	 * and reports ENOSPC should there be insufficient, along with the usual
	 * ENOMEM for a genuine allocation failure.
	 *
	 * We use ENOSPC in our driver to mean that we have run out of aperture
	 * space and so want to translate the error from shmemfs back to our
	 * usual understanding of ENOMEM.
	 */
I
Imre Deak 已提交
2538 2539 2540
	if (ret == -ENOSPC)
		ret = -ENOMEM;

2541
	return ret;
2542 2543 2544
}

void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
2545
				 struct sg_table *pages,
M
Matthew Auld 已提交
2546
				 unsigned int sg_page_sizes)
2547
{
2548 2549 2550 2551
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	unsigned long supported = INTEL_INFO(i915)->page_sizes;
	int i;

2552
	lockdep_assert_held(&obj->mm.lock);
2553 2554 2555 2556 2557

	obj->mm.get_page.sg_pos = pages->sgl;
	obj->mm.get_page.sg_idx = 0;

	obj->mm.pages = pages;
2558 2559

	if (i915_gem_object_is_tiled(obj) &&
2560
	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
2561 2562 2563 2564
		GEM_BUG_ON(obj->mm.quirked);
		__i915_gem_object_pin_pages(obj);
		obj->mm.quirked = true;
	}
2565

M
Matthew Auld 已提交
2566 2567
	GEM_BUG_ON(!sg_page_sizes);
	obj->mm.page_sizes.phys = sg_page_sizes;
2568 2569

	/*
M
Matthew Auld 已提交
2570 2571 2572 2573 2574 2575
	 * Calculate the supported page-sizes which fit into the given
	 * sg_page_sizes. This will give us the page-sizes which we may be able
	 * to use opportunistically when later inserting into the GTT. For
	 * example if phys=2G, then in theory we should be able to use 1G, 2M,
	 * 64K or 4K pages, although in practice this will depend on a number of
	 * other factors.
2576 2577 2578 2579 2580 2581 2582
	 */
	obj->mm.page_sizes.sg = 0;
	for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
		if (obj->mm.page_sizes.phys & ~0u << i)
			obj->mm.page_sizes.sg |= BIT(i);
	}
	GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
2583 2584 2585 2586

	spin_lock(&i915->mm.obj_lock);
	list_add(&obj->mm.link, &i915->mm.unbound_list);
	spin_unlock(&i915->mm.obj_lock);
2587 2588 2589 2590
}

static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
2591
	int err;
2592 2593 2594 2595 2596 2597

	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
		DRM_DEBUG("Attempting to obtain a purgeable object\n");
		return -EFAULT;
	}

2598 2599
	err = obj->ops->get_pages(obj);
	GEM_BUG_ON(!err && IS_ERR_OR_NULL(obj->mm.pages));
2600

2601
	return err;
2602 2603
}

2604
/* Ensure that the associated pages are gathered from the backing storage
2605
 * and pinned into our object. i915_gem_object_pin_pages() may be called
2606
 * multiple times before they are released by a single call to
2607
 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
2608 2609 2610
 * either as a result of memory pressure (reaping pages under the shrinker)
 * or as the object is itself released.
 */
C
Chris Wilson 已提交
2611
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2612
{
2613
	int err;
2614

2615 2616 2617
	err = mutex_lock_interruptible(&obj->mm.lock);
	if (err)
		return err;
2618

2619
	if (unlikely(!i915_gem_object_has_pages(obj))) {
2620 2621
		GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));

2622 2623 2624
		err = ____i915_gem_object_get_pages(obj);
		if (err)
			goto unlock;
2625

2626 2627 2628
		smp_mb__before_atomic();
	}
	atomic_inc(&obj->mm.pages_pin_count);
2629

2630 2631
unlock:
	mutex_unlock(&obj->mm.lock);
2632
	return err;
2633 2634
}

2635
/* The 'mapping' part of i915_gem_object_pin_map() below */
2636 2637
static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
				 enum i915_map_type type)
2638 2639
{
	unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
C
Chris Wilson 已提交
2640
	struct sg_table *sgt = obj->mm.pages;
2641 2642
	struct sgt_iter sgt_iter;
	struct page *page;
2643 2644
	struct page *stack_pages[32];
	struct page **pages = stack_pages;
2645
	unsigned long i = 0;
2646
	pgprot_t pgprot;
2647 2648 2649
	void *addr;

	/* A single page can always be kmapped */
2650
	if (n_pages == 1 && type == I915_MAP_WB)
2651 2652
		return kmap(sg_page(sgt->sgl));

2653 2654
	if (n_pages > ARRAY_SIZE(stack_pages)) {
		/* Too big for stack -- allocate temporary array instead */
2655
		pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
2656 2657 2658
		if (!pages)
			return NULL;
	}
2659

2660 2661
	for_each_sgt_page(page, sgt_iter, sgt)
		pages[i++] = page;
2662 2663 2664 2665

	/* Check that we have the expected number of pages */
	GEM_BUG_ON(i != n_pages);

2666
	switch (type) {
2667 2668 2669
	default:
		MISSING_CASE(type);
		/* fallthrough to use PAGE_KERNEL anyway */
2670 2671 2672 2673 2674 2675 2676 2677
	case I915_MAP_WB:
		pgprot = PAGE_KERNEL;
		break;
	case I915_MAP_WC:
		pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
		break;
	}
	addr = vmap(pages, n_pages, 0, pgprot);
2678

2679
	if (pages != stack_pages)
M
Michal Hocko 已提交
2680
		kvfree(pages);
2681 2682 2683 2684 2685

	return addr;
}

/* get, pin, and map the pages of the object into kernel space */
2686 2687
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
			      enum i915_map_type type)
2688
{
2689 2690 2691
	enum i915_map_type has_type;
	bool pinned;
	void *ptr;
2692 2693
	int ret;

T
Tina Zhang 已提交
2694 2695
	if (unlikely(!i915_gem_object_has_struct_page(obj)))
		return ERR_PTR(-ENXIO);
2696

2697
	ret = mutex_lock_interruptible(&obj->mm.lock);
2698 2699 2700
	if (ret)
		return ERR_PTR(ret);

2701 2702 2703
	pinned = !(type & I915_MAP_OVERRIDE);
	type &= ~I915_MAP_OVERRIDE;

2704
	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
2705
		if (unlikely(!i915_gem_object_has_pages(obj))) {
2706 2707
			GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));

2708 2709 2710
			ret = ____i915_gem_object_get_pages(obj);
			if (ret)
				goto err_unlock;
2711

2712 2713 2714
			smp_mb__before_atomic();
		}
		atomic_inc(&obj->mm.pages_pin_count);
2715 2716
		pinned = false;
	}
2717
	GEM_BUG_ON(!i915_gem_object_has_pages(obj));
2718

2719
	ptr = page_unpack_bits(obj->mm.mapping, &has_type);
2720 2721 2722
	if (ptr && has_type != type) {
		if (pinned) {
			ret = -EBUSY;
2723
			goto err_unpin;
2724
		}
2725 2726 2727 2728 2729 2730

		if (is_vmalloc_addr(ptr))
			vunmap(ptr);
		else
			kunmap(kmap_to_page(ptr));

C
Chris Wilson 已提交
2731
		ptr = obj->mm.mapping = NULL;
2732 2733
	}

2734 2735 2736 2737
	if (!ptr) {
		ptr = i915_gem_object_map(obj, type);
		if (!ptr) {
			ret = -ENOMEM;
2738
			goto err_unpin;
2739 2740
		}

2741
		obj->mm.mapping = page_pack_bits(ptr, type);
2742 2743
	}

2744 2745
out_unlock:
	mutex_unlock(&obj->mm.lock);
2746 2747
	return ptr;

2748 2749 2750 2751 2752
err_unpin:
	atomic_dec(&obj->mm.pages_pin_count);
err_unlock:
	ptr = ERR_PTR(ret);
	goto out_unlock;
2753 2754
}

2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765 2766 2767 2768 2769 2770 2771
static int
i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
			   const struct drm_i915_gem_pwrite *arg)
{
	struct address_space *mapping = obj->base.filp->f_mapping;
	char __user *user_data = u64_to_user_ptr(arg->data_ptr);
	u64 remain, offset;
	unsigned int pg;

	/* Before we instantiate/pin the backing store for our use, we
	 * can prepopulate the shmemfs filp efficiently using a write into
	 * the pagecache. We avoid the penalty of instantiating all the
	 * pages, important if the user is just writing to a few and never
	 * uses the object on the GPU, and using a direct write into shmemfs
	 * allows it to avoid the cost of retrieving a page (either swapin
	 * or clearing-before-use) before it is overwritten.
	 */
2772
	if (i915_gem_object_has_pages(obj))
2773 2774
		return -ENODEV;

2775 2776 2777
	if (obj->mm.madv != I915_MADV_WILLNEED)
		return -EFAULT;

2778 2779 2780 2781 2782 2783 2784 2785 2786 2787 2788 2789 2790 2791 2792 2793 2794 2795 2796 2797 2798 2799 2800 2801 2802 2803 2804 2805 2806 2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818 2819 2820 2821 2822 2823 2824 2825 2826
	/* Before the pages are instantiated the object is treated as being
	 * in the CPU domain. The pages will be clflushed as required before
	 * use, and we can freely write into the pages directly. If userspace
	 * races pwrite with any other operation; corruption will ensue -
	 * that is userspace's prerogative!
	 */

	remain = arg->size;
	offset = arg->offset;
	pg = offset_in_page(offset);

	do {
		unsigned int len, unwritten;
		struct page *page;
		void *data, *vaddr;
		int err;

		len = PAGE_SIZE - pg;
		if (len > remain)
			len = remain;

		err = pagecache_write_begin(obj->base.filp, mapping,
					    offset, len, 0,
					    &page, &data);
		if (err < 0)
			return err;

		vaddr = kmap(page);
		unwritten = copy_from_user(vaddr + pg, user_data, len);
		kunmap(page);

		err = pagecache_write_end(obj->base.filp, mapping,
					  offset, len, len - unwritten,
					  page, data);
		if (err < 0)
			return err;

		if (unwritten)
			return -EFAULT;

		remain -= len;
		user_data += len;
		offset += len;
		pg = 0;
	} while (remain);

	return 0;
}

2827 2828
static bool ban_context(const struct i915_gem_context *ctx,
			unsigned int score)
2829
{
2830
	return (i915_gem_context_is_bannable(ctx) &&
2831
		score >= CONTEXT_SCORE_BAN_THRESHOLD);
2832 2833
}

2834
static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
2835
{
2836 2837
	unsigned int score;
	bool banned;
2838

2839
	atomic_inc(&ctx->guilty_count);
2840

2841 2842 2843 2844 2845
	score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
	banned = ban_context(ctx, score);
	DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
			 ctx->name, score, yesno(banned));
	if (!banned)
2846 2847
		return;

2848 2849 2850 2851 2852 2853
	i915_gem_context_set_banned(ctx);
	if (!IS_ERR_OR_NULL(ctx->file_priv)) {
		atomic_inc(&ctx->file_priv->context_bans);
		DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
				 ctx->name, atomic_read(&ctx->file_priv->context_bans));
	}
2854 2855 2856 2857
}

static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
{
2858
	atomic_inc(&ctx->active_count);
2859 2860
}

2861
struct drm_i915_gem_request *
2862
i915_gem_find_active_request(struct intel_engine_cs *engine)
2863
{
2864 2865
	struct drm_i915_gem_request *request, *active = NULL;
	unsigned long flags;
2866

2867 2868 2869 2870 2871 2872 2873 2874
	/* We are called by the error capture and reset at a random
	 * point in time. In particular, note that neither is crucially
	 * ordered with an interrupt. After a hang, the GPU is dead and we
	 * assume that no more writes can happen (we waited long enough for
	 * all writes that were in transaction to be flushed) - adding an
	 * extra delay for a recent interrupt is pointless. Hence, we do
	 * not need an engine->irq_seqno_barrier() before the seqno reads.
	 */
2875
	spin_lock_irqsave(&engine->timeline->lock, flags);
2876
	list_for_each_entry(request, &engine->timeline->requests, link) {
2877 2878
		if (__i915_gem_request_completed(request,
						 request->global_seqno))
2879
			continue;
2880

2881
		GEM_BUG_ON(request->engine != engine);
2882 2883
		GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
				    &request->fence.flags));
2884 2885 2886

		active = request;
		break;
2887
	}
2888
	spin_unlock_irqrestore(&engine->timeline->lock, flags);
2889

2890
	return active;
2891 2892
}

2893 2894 2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906
static bool engine_stalled(struct intel_engine_cs *engine)
{
	if (!engine->hangcheck.stalled)
		return false;

	/* Check for possible seqno movement after hang declaration */
	if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) {
		DRM_DEBUG_DRIVER("%s pardoned\n", engine->name);
		return false;
	}

	return true;
}

2907 2908 2909 2910 2911 2912 2913 2914 2915
/*
 * Ensure irq handler finishes, and not run again.
 * Also return the active request so that we only search for it once.
 */
struct drm_i915_gem_request *
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
{
	struct drm_i915_gem_request *request = NULL;

2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926
	/*
	 * During the reset sequence, we must prevent the engine from
	 * entering RC6. As the context state is undefined until we restart
	 * the engine, if it does enter RC6 during the reset, the state
	 * written to the powercontext is undefined and so we may lose
	 * GPU state upon resume, i.e. fail to restart after a reset.
	 */
	intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);

	/*
	 * Prevent the signaler thread from updating the request
2927 2928 2929 2930 2931 2932 2933 2934 2935 2936
	 * state (by calling dma_fence_signal) as we are processing
	 * the reset. The write from the GPU of the seqno is
	 * asynchronous and the signaler thread may see a different
	 * value to us and declare the request complete, even though
	 * the reset routine have picked that request as the active
	 * (incomplete) request. This conflict is not handled
	 * gracefully!
	 */
	kthread_park(engine->breadcrumbs.signaler);

2937 2938
	/*
	 * Prevent request submission to the hardware until we have
2939 2940
	 * completed the reset in i915_gem_reset_finish(). If a request
	 * is completed by one engine, it may then queue a request
2941
	 * to a second via its execlists->tasklet *just* as we are
2942
	 * calling engine->init_hw() and also writing the ELSP.
2943
	 * Turning off the execlists->tasklet until the reset is over
2944 2945
	 * prevents the race.
	 */
2946 2947
	tasklet_kill(&engine->execlists.tasklet);
	tasklet_disable(&engine->execlists.tasklet);
2948

2949 2950 2951 2952 2953 2954 2955 2956 2957 2958
	/*
	 * We're using worker to queue preemption requests from the tasklet in
	 * GuC submission mode.
	 * Even though tasklet was disabled, we may still have a worker queued.
	 * Let's make sure that all workers scheduled before disabling the
	 * tasklet are completed before continuing with the reset.
	 */
	if (engine->i915->guc.preempt_wq)
		flush_workqueue(engine->i915->guc.preempt_wq);

2959 2960 2961
	if (engine->irq_seqno_barrier)
		engine->irq_seqno_barrier(engine);

2962 2963 2964
	request = i915_gem_find_active_request(engine);
	if (request && request->fence.error == -EIO)
		request = ERR_PTR(-EIO); /* Previous reset failed! */
2965 2966 2967 2968

	return request;
}

2969
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
2970 2971
{
	struct intel_engine_cs *engine;
2972
	struct drm_i915_gem_request *request;
2973
	enum intel_engine_id id;
2974
	int err = 0;
2975

2976
	for_each_engine(engine, dev_priv, id) {
2977 2978 2979 2980
		request = i915_gem_reset_prepare_engine(engine);
		if (IS_ERR(request)) {
			err = PTR_ERR(request);
			continue;
2981
		}
2982 2983

		engine->hangcheck.active_request = request;
2984 2985
	}

2986
	i915_gem_revoke_fences(dev_priv);
2987 2988

	return err;
2989 2990
}

2991
static void skip_request(struct drm_i915_gem_request *request)
2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005
{
	void *vaddr = request->ring->vaddr;
	u32 head;

	/* As this request likely depends on state from the lost
	 * context, clear out all the user operations leaving the
	 * breadcrumb at the end (so we get the fence notifications).
	 */
	head = request->head;
	if (request->postfix < head) {
		memset(vaddr + head, 0, request->ring->size - head);
		head = 0;
	}
	memset(vaddr + head, 0, request->postfix - head);
3006 3007

	dma_fence_set_error(&request->fence, -EIO);
3008 3009
}

3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032
static void engine_skip_context(struct drm_i915_gem_request *request)
{
	struct intel_engine_cs *engine = request->engine;
	struct i915_gem_context *hung_ctx = request->ctx;
	struct intel_timeline *timeline;
	unsigned long flags;

	timeline = i915_gem_context_lookup_timeline(hung_ctx, engine);

	spin_lock_irqsave(&engine->timeline->lock, flags);
	spin_lock(&timeline->lock);

	list_for_each_entry_continue(request, &engine->timeline->requests, link)
		if (request->ctx == hung_ctx)
			skip_request(request);

	list_for_each_entry(request, &timeline->requests, link)
		skip_request(request);

	spin_unlock(&timeline->lock);
	spin_unlock_irqrestore(&engine->timeline->lock, flags);
}

3033 3034 3035 3036
/* Returns the request if it was guilty of the hang */
static struct drm_i915_gem_request *
i915_gem_reset_request(struct intel_engine_cs *engine,
		       struct drm_i915_gem_request *request)
3037
{
3038 3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058
	/* The guilty request will get skipped on a hung engine.
	 *
	 * Users of client default contexts do not rely on logical
	 * state preserved between batches so it is safe to execute
	 * queued requests following the hang. Non default contexts
	 * rely on preserved state, so skipping a batch loses the
	 * evolution of the state and it needs to be considered corrupted.
	 * Executing more queued batches on top of corrupted state is
	 * risky. But we take the risk by trying to advance through
	 * the queued requests in order to make the client behaviour
	 * more predictable around resets, by not throwing away random
	 * amount of batches it has prepared for execution. Sophisticated
	 * clients can use gem_reset_stats_ioctl and dma fence status
	 * (exported via sync_file info ioctl on explicit fences) to observe
	 * when it loses the context state and should rebuild accordingly.
	 *
	 * The context ban, and ultimately the client ban, mechanism are safety
	 * valves if client submission ends up resulting in nothing more than
	 * subsequent hangs.
	 */

3059
	if (engine_stalled(engine)) {
3060 3061
		i915_gem_context_mark_guilty(request->ctx);
		skip_request(request);
3062 3063 3064 3065

		/* If this context is now banned, skip all pending requests. */
		if (i915_gem_context_is_banned(request->ctx))
			engine_skip_context(request);
3066
	} else {
3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078 3079 3080 3081 3082 3083
		/*
		 * Since this is not the hung engine, it may have advanced
		 * since the hang declaration. Double check by refinding
		 * the active request at the time of the reset.
		 */
		request = i915_gem_find_active_request(engine);
		if (request) {
			i915_gem_context_mark_innocent(request->ctx);
			dma_fence_set_error(&request->fence, -EAGAIN);

			/* Rewind the engine to replay the incomplete rq */
			spin_lock_irq(&engine->timeline->lock);
			request = list_prev_entry(request, link);
			if (&request->link == &engine->timeline->requests)
				request = NULL;
			spin_unlock_irq(&engine->timeline->lock);
		}
3084 3085
	}

3086
	return request;
3087 3088
}

3089 3090
void i915_gem_reset_engine(struct intel_engine_cs *engine,
			   struct drm_i915_gem_request *request)
3091
{
3092 3093
	engine->irq_posted = 0;

3094 3095 3096 3097
	if (request)
		request = i915_gem_reset_request(engine, request);

	if (request) {
3098 3099 3100
		DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
				 engine->name, request->global_seqno);
	}
3101 3102 3103

	/* Setup the CS to resume from the breadcrumb of the hung request */
	engine->reset_hw(engine, request);
3104
}
3105

3106
void i915_gem_reset(struct drm_i915_private *dev_priv)
3107
{
3108
	struct intel_engine_cs *engine;
3109
	enum intel_engine_id id;
3110

3111 3112
	lockdep_assert_held(&dev_priv->drm.struct_mutex);

3113 3114
	i915_gem_retire_requests(dev_priv);

3115 3116 3117
	for_each_engine(engine, dev_priv, id) {
		struct i915_gem_context *ctx;

3118
		i915_gem_reset_engine(engine, engine->hangcheck.active_request);
3119 3120 3121 3122
		ctx = fetch_and_zero(&engine->last_retired_context);
		if (ctx)
			engine->context_unpin(engine, ctx);
	}
3123

3124
	i915_gem_restore_fences(dev_priv);
3125 3126 3127 3128 3129 3130 3131

	if (dev_priv->gt.awake) {
		intel_sanitize_gt_powersave(dev_priv);
		intel_enable_gt_powersave(dev_priv);
		if (INTEL_GEN(dev_priv) >= 6)
			gen6_rps_busy(dev_priv);
	}
3132 3133
}

3134 3135
void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
{
3136
	tasklet_enable(&engine->execlists.tasklet);
3137
	kthread_unpark(engine->breadcrumbs.signaler);
3138 3139

	intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
3140 3141
}

3142 3143
void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
{
3144 3145 3146
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

3147
	lockdep_assert_held(&dev_priv->drm.struct_mutex);
3148

3149
	for_each_engine(engine, dev_priv, id) {
3150
		engine->hangcheck.active_request = NULL;
3151
		i915_gem_reset_finish_engine(engine);
3152
	}
3153 3154
}

3155
static void nop_submit_request(struct drm_i915_gem_request *request)
3156 3157 3158 3159 3160 3161 3162
{
	dma_fence_set_error(&request->fence, -EIO);

	i915_gem_request_submit(request);
}

static void nop_complete_submit_request(struct drm_i915_gem_request *request)
3163
{
3164 3165
	unsigned long flags;

3166
	dma_fence_set_error(&request->fence, -EIO);
3167 3168 3169

	spin_lock_irqsave(&request->engine->timeline->lock, flags);
	__i915_gem_request_submit(request);
3170
	intel_engine_init_global_seqno(request->engine, request->global_seqno);
3171
	spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
3172 3173
}

3174
void i915_gem_set_wedged(struct drm_i915_private *i915)
3175
{
3176 3177 3178 3179 3180 3181 3182 3183 3184 3185 3186 3187 3188 3189 3190
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	/*
	 * First, stop submission to hw, but do not yet complete requests by
	 * rolling the global seqno forward (since this would complete requests
	 * for which we haven't set the fence error to EIO yet).
	 */
	for_each_engine(engine, i915, id)
		engine->submit_request = nop_submit_request;

	/*
	 * Make sure no one is running the old callback before we proceed with
	 * cancelling requests and resetting the completion tracking. Otherwise
	 * we might submit a request to the hardware which never completes.
3191
	 */
3192
	synchronize_rcu();
3193

3194 3195 3196
	for_each_engine(engine, i915, id) {
		/* Mark all executing requests as skipped */
		engine->cancel_requests(engine);
3197

3198 3199 3200 3201 3202 3203 3204 3205 3206 3207 3208
		/*
		 * Only once we've force-cancelled all in-flight requests can we
		 * start to complete all requests.
		 */
		engine->submit_request = nop_complete_submit_request;
	}

	/*
	 * Make sure no request can slip through without getting completed by
	 * either this call here to intel_engine_init_global_seqno, or the one
	 * in nop_complete_submit_request.
3209
	 */
3210
	synchronize_rcu();
3211

3212 3213
	for_each_engine(engine, i915, id) {
		unsigned long flags;
3214

3215 3216 3217 3218 3219 3220 3221 3222 3223
		/* Mark all pending requests as complete so that any concurrent
		 * (lockless) lookup doesn't try and wait upon the request as we
		 * reset it.
		 */
		spin_lock_irqsave(&engine->timeline->lock, flags);
		intel_engine_init_global_seqno(engine,
					       intel_engine_last_submit(engine));
		spin_unlock_irqrestore(&engine->timeline->lock, flags);
	}
3224

3225 3226
	set_bit(I915_WEDGED, &i915->gpu_error.flags);
	wake_up_all(&i915->gpu_error.reset_queue);
3227 3228
}

3229 3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245 3246 3247 3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258 3259 3260 3261 3262 3263 3264 3265 3266 3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278 3279 3280
bool i915_gem_unset_wedged(struct drm_i915_private *i915)
{
	struct i915_gem_timeline *tl;
	int i;

	lockdep_assert_held(&i915->drm.struct_mutex);
	if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
		return true;

	/* Before unwedging, make sure that all pending operations
	 * are flushed and errored out - we may have requests waiting upon
	 * third party fences. We marked all inflight requests as EIO, and
	 * every execbuf since returned EIO, for consistency we want all
	 * the currently pending requests to also be marked as EIO, which
	 * is done inside our nop_submit_request - and so we must wait.
	 *
	 * No more can be submitted until we reset the wedged bit.
	 */
	list_for_each_entry(tl, &i915->gt.timelines, link) {
		for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
			struct drm_i915_gem_request *rq;

			rq = i915_gem_active_peek(&tl->engine[i].last_request,
						  &i915->drm.struct_mutex);
			if (!rq)
				continue;

			/* We can't use our normal waiter as we want to
			 * avoid recursively trying to handle the current
			 * reset. The basic dma_fence_default_wait() installs
			 * a callback for dma_fence_signal(), which is
			 * triggered by our nop handler (indirectly, the
			 * callback enables the signaler thread which is
			 * woken by the nop_submit_request() advancing the seqno
			 * and when the seqno passes the fence, the signaler
			 * then signals the fence waking us up).
			 */
			if (dma_fence_default_wait(&rq->fence, true,
						   MAX_SCHEDULE_TIMEOUT) < 0)
				return false;
		}
	}

	/* Undo nop_submit_request. We prevent all new i915 requests from
	 * being queued (by disallowing execbuf whilst wedged) so having
	 * waited for all active requests above, we know the system is idle
	 * and do not have to worry about a thread being inside
	 * engine->submit_request() as we swap over. So unlike installing
	 * the nop_submit_request on reset, we can do this from normal
	 * context and do not require stop_machine().
	 */
	intel_engines_reset_default_submission(i915);
3281
	i915_gem_contexts_lost(i915);
3282 3283 3284 3285 3286 3287 3288

	smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
	clear_bit(I915_WEDGED, &i915->gpu_error.flags);

	return true;
}

3289
static void
3290 3291
i915_gem_retire_work_handler(struct work_struct *work)
{
3292
	struct drm_i915_private *dev_priv =
3293
		container_of(work, typeof(*dev_priv), gt.retire_work.work);
3294
	struct drm_device *dev = &dev_priv->drm;
3295

3296
	/* Come back later if the device is busy... */
3297
	if (mutex_trylock(&dev->struct_mutex)) {
3298
		i915_gem_retire_requests(dev_priv);
3299
		mutex_unlock(&dev->struct_mutex);
3300
	}
3301 3302 3303 3304 3305

	/* Keep the retire handler running until we are finally idle.
	 * We do not need to do this test under locking as in the worst-case
	 * we queue the retire worker once too often.
	 */
3306 3307
	if (READ_ONCE(dev_priv->gt.awake)) {
		i915_queue_hangcheck(dev_priv);
3308 3309
		queue_delayed_work(dev_priv->wq,
				   &dev_priv->gt.retire_work,
3310
				   round_jiffies_up_relative(HZ));
3311
	}
3312
}
3313

3314 3315 3316 3317 3318 3319 3320
static inline bool
new_requests_since_last_retire(const struct drm_i915_private *i915)
{
	return (READ_ONCE(i915->gt.active_requests) ||
		work_pending(&i915->gt.idle_work.work));
}

3321 3322 3323 3324
static void
i915_gem_idle_work_handler(struct work_struct *work)
{
	struct drm_i915_private *dev_priv =
3325 3326
		container_of(work, typeof(*dev_priv), gt.idle_work.work);
	bool rearm_hangcheck;
3327
	ktime_t end;
3328 3329 3330 3331

	if (!READ_ONCE(dev_priv->gt.awake))
		return;

3332 3333 3334 3335
	/*
	 * Wait for last execlists context complete, but bail out in case a
	 * new request is submitted.
	 */
3336
	end = ktime_add_ms(ktime_get(), I915_IDLE_ENGINES_TIMEOUT);
3337 3338 3339 3340 3341 3342 3343 3344 3345
	do {
		if (new_requests_since_last_retire(dev_priv))
			return;

		if (intel_engines_are_idle(dev_priv))
			break;

		usleep_range(100, 500);
	} while (ktime_before(ktime_get(), end));
3346 3347 3348 3349

	rearm_hangcheck =
		cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);

3350
	if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
3351 3352 3353 3354 3355 3356 3357
		/* Currently busy, come back later */
		mod_delayed_work(dev_priv->wq,
				 &dev_priv->gt.idle_work,
				 msecs_to_jiffies(50));
		goto out_rearm;
	}

3358 3359 3360 3361
	/*
	 * New request retired after this work handler started, extend active
	 * period until next instance of the work.
	 */
3362
	if (new_requests_since_last_retire(dev_priv))
3363
		goto out_unlock;
3364

3365 3366 3367 3368 3369 3370 3371 3372 3373 3374 3375 3376 3377
	/*
	 * Be paranoid and flush a concurrent interrupt to make sure
	 * we don't reactivate any irq tasklets after parking.
	 *
	 * FIXME: Note that even though we have waited for execlists to be idle,
	 * there may still be an in-flight interrupt even though the CSB
	 * is now empty. synchronize_irq() makes sure that a residual interrupt
	 * is completed before we continue, but it doesn't prevent the HW from
	 * raising a spurious interrupt later. To complete the shield we should
	 * coordinate disabling the CS irq with flushing the interrupts.
	 */
	synchronize_irq(dev_priv->drm.irq);

3378
	intel_engines_park(dev_priv);
3379 3380
	i915_gem_timelines_park(dev_priv);

3381
	i915_pmu_gt_parked(dev_priv);
3382

3383 3384 3385
	GEM_BUG_ON(!dev_priv->gt.awake);
	dev_priv->gt.awake = false;
	rearm_hangcheck = false;
3386

3387 3388
	if (INTEL_GEN(dev_priv) >= 6)
		gen6_rps_idle(dev_priv);
3389 3390 3391

	intel_display_power_put(dev_priv, POWER_DOMAIN_GT_IRQ);

3392 3393
	intel_runtime_pm_put(dev_priv);
out_unlock:
3394
	mutex_unlock(&dev_priv->drm.struct_mutex);
3395

3396 3397 3398 3399
out_rearm:
	if (rearm_hangcheck) {
		GEM_BUG_ON(!dev_priv->gt.awake);
		i915_queue_hangcheck(dev_priv);
3400
	}
3401 3402
}

3403 3404
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
3405
	struct drm_i915_private *i915 = to_i915(gem->dev);
3406 3407
	struct drm_i915_gem_object *obj = to_intel_bo(gem);
	struct drm_i915_file_private *fpriv = file->driver_priv;
3408
	struct i915_lut_handle *lut, *ln;
3409

3410 3411 3412 3413 3414 3415
	mutex_lock(&i915->drm.struct_mutex);

	list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
		struct i915_gem_context *ctx = lut->ctx;
		struct i915_vma *vma;

3416
		GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF));
3417 3418 3419 3420
		if (ctx->file_priv != fpriv)
			continue;

		vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
3421 3422 3423 3424 3425 3426 3427
		GEM_BUG_ON(vma->obj != obj);

		/* We allow the process to have multiple handles to the same
		 * vma, in the same fd namespace, by virtue of flink/open.
		 */
		GEM_BUG_ON(!vma->open_count);
		if (!--vma->open_count && !i915_vma_is_ggtt(vma))
3428
			i915_vma_close(vma);
3429

3430 3431
		list_del(&lut->obj_link);
		list_del(&lut->ctx_link);
3432

3433 3434
		kmem_cache_free(i915->luts, lut);
		__i915_gem_object_release_unless_active(obj);
3435
	}
3436 3437

	mutex_unlock(&i915->drm.struct_mutex);
3438 3439
}

3440 3441 3442 3443 3444 3445 3446 3447 3448 3449 3450
static unsigned long to_wait_timeout(s64 timeout_ns)
{
	if (timeout_ns < 0)
		return MAX_SCHEDULE_TIMEOUT;

	if (timeout_ns == 0)
		return 0;

	return nsecs_to_jiffies_timeout(timeout_ns);
}

3451 3452
/**
 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
3453 3454 3455
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
3456 3457 3458 3459 3460 3461 3462
 *
 * Returns 0 if successful, else an error is returned with the remaining time in
 * the timeout parameter.
 *  -ETIME: object is still busy after timeout
 *  -ERESTARTSYS: signal interrupted the wait
 *  -ENONENT: object doesn't exist
 * Also possible, but rare:
3463
 *  -EAGAIN: incomplete, restart syscall
3464 3465 3466 3467 3468 3469 3470 3471 3472 3473 3474 3475 3476 3477 3478 3479
 *  -ENOMEM: damn
 *  -ENODEV: Internal IRQ fail
 *  -E?: The add request failed
 *
 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
 * non-zero timeout parameter the wait ioctl will wait for the given number of
 * nanoseconds on an object becoming unbusy. Since the wait itself does so
 * without holding struct_mutex the object may become re-busied before this
 * function completes. A similar but shorter * race condition exists in the busy
 * ioctl
 */
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
	struct drm_i915_gem_wait *args = data;
	struct drm_i915_gem_object *obj;
3480 3481
	ktime_t start;
	long ret;
3482

3483 3484 3485
	if (args->flags != 0)
		return -EINVAL;

3486
	obj = i915_gem_object_lookup(file, args->bo_handle);
3487
	if (!obj)
3488 3489
		return -ENOENT;

3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500
	start = ktime_get();

	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
				   to_wait_timeout(args->timeout_ns),
				   to_rps_client(file));

	if (args->timeout_ns > 0) {
		args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
		if (args->timeout_ns < 0)
			args->timeout_ns = 0;
3501 3502 3503 3504 3505 3506 3507 3508 3509 3510

		/*
		 * Apparently ktime isn't accurate enough and occasionally has a
		 * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
		 * things up to make the test happy. We allow up to 1 jiffy.
		 *
		 * This is a regression from the timespec->ktime conversion.
		 */
		if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
			args->timeout_ns = 0;
3511 3512 3513 3514

		/* Asked to wait beyond the jiffie/scheduler precision? */
		if (ret == -ETIME && args->timeout_ns)
			ret = -EAGAIN;
3515 3516
	}

C
Chris Wilson 已提交
3517
	i915_gem_object_put(obj);
3518
	return ret;
3519 3520
}

3521
static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
3522
{
3523
	int ret, i;
3524

3525 3526 3527 3528 3529
	for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
		ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
		if (ret)
			return ret;
	}
3530

3531 3532 3533
	return 0;
}

3534 3535
static int wait_for_engines(struct drm_i915_private *i915)
{
3536
	if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548
		dev_err(i915->drm.dev,
			"Failed to idle engines, declaring wedged!\n");
		if (drm_debug & DRM_UT_DRIVER) {
			struct drm_printer p = drm_debug_printer(__func__);
			struct intel_engine_cs *engine;
			enum intel_engine_id id;

			for_each_engine(engine, i915, id)
				intel_engine_dump(engine, &p,
						  "%s", engine->name);
		}

3549 3550
		i915_gem_set_wedged(i915);
		return -EIO;
3551 3552 3553 3554 3555
	}

	return 0;
}

3556 3557 3558 3559
int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
{
	int ret;

3560 3561 3562 3563
	/* If the device is asleep, we have no requests outstanding */
	if (!READ_ONCE(i915->gt.awake))
		return 0;

3564 3565 3566 3567 3568 3569 3570 3571 3572 3573
	if (flags & I915_WAIT_LOCKED) {
		struct i915_gem_timeline *tl;

		lockdep_assert_held(&i915->drm.struct_mutex);

		list_for_each_entry(tl, &i915->gt.timelines, link) {
			ret = wait_for_timeline(tl, flags);
			if (ret)
				return ret;
		}
3574
		i915_gem_retire_requests(i915);
3575 3576

		ret = wait_for_engines(i915);
3577 3578
	} else {
		ret = wait_for_timeline(&i915->gt.global_timeline, flags);
3579
	}
3580

3581
	return ret;
3582 3583
}

3584 3585
static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
{
3586 3587 3588 3589 3590 3591 3592
	/*
	 * We manually flush the CPU domain so that we can override and
	 * force the flush for the display, and perform it asyncrhonously.
	 */
	flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
	if (obj->cache_dirty)
		i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
3593 3594 3595 3596 3597
	obj->base.write_domain = 0;
}

void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
{
3598
	if (!READ_ONCE(obj->pin_global))
3599 3600 3601 3602 3603 3604 3605
		return;

	mutex_lock(&obj->base.dev->struct_mutex);
	__i915_gem_object_flush_for_display(obj);
	mutex_unlock(&obj->base.dev->struct_mutex);
}

3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668
/**
 * Moves a single object to the WC read, and possibly write domain.
 * @obj: object to act on
 * @write: ask for write access or read only
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
int
i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
{
	int ret;

	lockdep_assert_held(&obj->base.dev->struct_mutex);

	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   (write ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
	if (ret)
		return ret;

	if (obj->base.write_domain == I915_GEM_DOMAIN_WC)
		return 0;

	/* Flush and acquire obj->pages so that we are coherent through
	 * direct access in memory with previous cached writes through
	 * shmemfs and that our cache domain tracking remains valid.
	 * For example, if the obj->filp was moved to swap without us
	 * being notified and releasing the pages, we would mistakenly
	 * continue to assume that the obj remained out of the CPU cached
	 * domain.
	 */
	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		return ret;

	flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);

	/* Serialise direct access to this object with the barriers for
	 * coherent writes from the GPU, by effectively invalidating the
	 * WC domain upon first access.
	 */
	if ((obj->base.read_domains & I915_GEM_DOMAIN_WC) == 0)
		mb();

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
	GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_WC) != 0);
	obj->base.read_domains |= I915_GEM_DOMAIN_WC;
	if (write) {
		obj->base.read_domains = I915_GEM_DOMAIN_WC;
		obj->base.write_domain = I915_GEM_DOMAIN_WC;
		obj->mm.dirty = true;
	}

	i915_gem_object_unpin_pages(obj);
	return 0;
}

3669 3670
/**
 * Moves a single object to the GTT read, and possibly write domain.
3671 3672
 * @obj: object to act on
 * @write: ask for write access or read only
3673 3674 3675 3676
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
J
Jesse Barnes 已提交
3677
int
3678
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3679
{
3680
	int ret;
3681

3682
	lockdep_assert_held(&obj->base.dev->struct_mutex);
3683

3684 3685 3686 3687 3688 3689
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   (write ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
3690 3691 3692
	if (ret)
		return ret;

3693 3694 3695
	if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
		return 0;

3696 3697 3698 3699 3700 3701 3702 3703
	/* Flush and acquire obj->pages so that we are coherent through
	 * direct access in memory with previous cached writes through
	 * shmemfs and that our cache domain tracking remains valid.
	 * For example, if the obj->filp was moved to swap without us
	 * being notified and releasing the pages, we would mistakenly
	 * continue to assume that the obj remained out of the CPU cached
	 * domain.
	 */
C
Chris Wilson 已提交
3704
	ret = i915_gem_object_pin_pages(obj);
3705 3706 3707
	if (ret)
		return ret;

3708
	flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
C
Chris Wilson 已提交
3709

3710 3711 3712 3713 3714 3715 3716
	/* Serialise direct access to this object with the barriers for
	 * coherent writes from the GPU, by effectively invalidating the
	 * GTT domain upon first access.
	 */
	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
		mb();

3717 3718 3719
	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3720
	GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3721
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3722
	if (write) {
3723 3724
		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
C
Chris Wilson 已提交
3725
		obj->mm.dirty = true;
3726 3727
	}

C
Chris Wilson 已提交
3728
	i915_gem_object_unpin_pages(obj);
3729 3730 3731
	return 0;
}

3732 3733
/**
 * Changes the cache-level of an object across all VMA.
3734 3735
 * @obj: object to act on
 * @cache_level: new cache level to set for the object
3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746
 *
 * After this function returns, the object will be in the new cache-level
 * across all GTT and the contents of the backing storage will be coherent,
 * with respect to the new cache-level. In order to keep the backing storage
 * coherent for all users, we only allow a single cache level to be set
 * globally on the object and prevent it from being changed whilst the
 * hardware is reading from the object. That is if the object is currently
 * on the scanout it will be set to uncached (or equivalent display
 * cache coherency) and all non-MOCS GPU access will also be uncached so
 * that all direct access to the scanout remains coherent.
 */
3747 3748 3749
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
				    enum i915_cache_level cache_level)
{
3750
	struct i915_vma *vma;
3751
	int ret;
3752

3753 3754
	lockdep_assert_held(&obj->base.dev->struct_mutex);

3755
	if (obj->cache_level == cache_level)
3756
		return 0;
3757

3758 3759 3760 3761 3762
	/* Inspect the list of currently bound VMA and unbind any that would
	 * be invalid given the new cache-level. This is principally to
	 * catch the issue of the CS prefetch crossing page boundaries and
	 * reading an invalid PTE on older architectures.
	 */
3763 3764
restart:
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
3765 3766 3767
		if (!drm_mm_node_allocated(&vma->node))
			continue;

3768
		if (i915_vma_is_pinned(vma)) {
3769 3770 3771 3772
			DRM_DEBUG("can not change the cache level of pinned objects\n");
			return -EBUSY;
		}

3773 3774
		if (!i915_vma_is_closed(vma) &&
		    i915_gem_valid_gtt_space(vma, cache_level))
3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785
			continue;

		ret = i915_vma_unbind(vma);
		if (ret)
			return ret;

		/* As unbinding may affect other elements in the
		 * obj->vma_list (due to side-effects from retiring
		 * an active vma), play safe and restart the iterator.
		 */
		goto restart;
3786 3787
	}

3788 3789 3790 3791 3792 3793 3794
	/* We can reuse the existing drm_mm nodes but need to change the
	 * cache-level on the PTE. We could simply unbind them all and
	 * rebind with the correct cache-level on next use. However since
	 * we already have a valid slot, dma mapping, pages etc, we may as
	 * rewrite the PTE in the belief that doing so tramples upon less
	 * state and so involves less work.
	 */
3795
	if (obj->bind_count) {
3796 3797 3798 3799
		/* Before we change the PTE, the GPU must not be accessing it.
		 * If we wait upon the object, we know that all the bound
		 * VMA are no longer active.
		 */
3800 3801 3802 3803 3804 3805
		ret = i915_gem_object_wait(obj,
					   I915_WAIT_INTERRUPTIBLE |
					   I915_WAIT_LOCKED |
					   I915_WAIT_ALL,
					   MAX_SCHEDULE_TIMEOUT,
					   NULL);
3806 3807 3808
		if (ret)
			return ret;

3809 3810
		if (!HAS_LLC(to_i915(obj->base.dev)) &&
		    cache_level != I915_CACHE_NONE) {
3811 3812 3813 3814 3815 3816 3817 3818 3819 3820 3821 3822 3823 3824 3825 3826
			/* Access to snoopable pages through the GTT is
			 * incoherent and on some machines causes a hard
			 * lockup. Relinquish the CPU mmaping to force
			 * userspace to refault in the pages and we can
			 * then double check if the GTT mapping is still
			 * valid for that pointer access.
			 */
			i915_gem_release_mmap(obj);

			/* As we no longer need a fence for GTT access,
			 * we can relinquish it now (and so prevent having
			 * to steal a fence from someone else on the next
			 * fence request). Note GPU activity would have
			 * dropped the fence as all snoopable access is
			 * supposed to be linear.
			 */
3827
			for_each_ggtt_vma(vma, obj) {
3828 3829 3830 3831
				ret = i915_vma_put_fence(vma);
				if (ret)
					return ret;
			}
3832 3833 3834 3835 3836 3837 3838 3839
		} else {
			/* We either have incoherent backing store and
			 * so no GTT access or the architecture is fully
			 * coherent. In such cases, existing GTT mmaps
			 * ignore the cache bit in the PTE and we can
			 * rewrite it without confusing the GPU or having
			 * to force userspace to fault back in its mmaps.
			 */
3840 3841
		}

3842
		list_for_each_entry(vma, &obj->vma_list, obj_link) {
3843 3844 3845 3846 3847 3848 3849
			if (!drm_mm_node_allocated(&vma->node))
				continue;

			ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
			if (ret)
				return ret;
		}
3850 3851
	}

3852
	list_for_each_entry(vma, &obj->vma_list, obj_link)
3853
		vma->node.color = cache_level;
3854
	i915_gem_object_set_cache_coherency(obj, cache_level);
3855
	obj->cache_dirty = true; /* Always invalidate stale cachelines */
3856

3857 3858 3859
	return 0;
}

B
Ben Widawsky 已提交
3860 3861
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file)
3862
{
B
Ben Widawsky 已提交
3863
	struct drm_i915_gem_caching *args = data;
3864
	struct drm_i915_gem_object *obj;
3865
	int err = 0;
3866

3867 3868 3869 3870 3871 3872
	rcu_read_lock();
	obj = i915_gem_object_lookup_rcu(file, args->handle);
	if (!obj) {
		err = -ENOENT;
		goto out;
	}
3873

3874 3875 3876 3877 3878 3879
	switch (obj->cache_level) {
	case I915_CACHE_LLC:
	case I915_CACHE_L3_LLC:
		args->caching = I915_CACHING_CACHED;
		break;

3880 3881 3882 3883
	case I915_CACHE_WT:
		args->caching = I915_CACHING_DISPLAY;
		break;

3884 3885 3886 3887
	default:
		args->caching = I915_CACHING_NONE;
		break;
	}
3888 3889 3890
out:
	rcu_read_unlock();
	return err;
3891 3892
}

B
Ben Widawsky 已提交
3893 3894
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file)
3895
{
3896
	struct drm_i915_private *i915 = to_i915(dev);
B
Ben Widawsky 已提交
3897
	struct drm_i915_gem_caching *args = data;
3898 3899
	struct drm_i915_gem_object *obj;
	enum i915_cache_level level;
3900
	int ret = 0;
3901

B
Ben Widawsky 已提交
3902 3903
	switch (args->caching) {
	case I915_CACHING_NONE:
3904 3905
		level = I915_CACHE_NONE;
		break;
B
Ben Widawsky 已提交
3906
	case I915_CACHING_CACHED:
3907 3908 3909 3910 3911 3912
		/*
		 * Due to a HW issue on BXT A stepping, GPU stores via a
		 * snooped mapping may leave stale data in a corresponding CPU
		 * cacheline, whereas normally such cachelines would get
		 * invalidated.
		 */
3913
		if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
3914 3915
			return -ENODEV;

3916 3917
		level = I915_CACHE_LLC;
		break;
3918
	case I915_CACHING_DISPLAY:
3919
		level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
3920
		break;
3921 3922 3923 3924
	default:
		return -EINVAL;
	}

3925 3926 3927 3928
	obj = i915_gem_object_lookup(file, args->handle);
	if (!obj)
		return -ENOENT;

T
Tina Zhang 已提交
3929 3930 3931 3932 3933 3934 3935 3936 3937
	/*
	 * The caching mode of proxy object is handled by its generator, and
	 * not allowed to be changed by userspace.
	 */
	if (i915_gem_object_is_proxy(obj)) {
		ret = -ENXIO;
		goto out;
	}

3938 3939 3940 3941 3942 3943 3944
	if (obj->cache_level == level)
		goto out;

	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE,
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
B
Ben Widawsky 已提交
3945
	if (ret)
3946
		goto out;
B
Ben Widawsky 已提交
3947

3948 3949 3950
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto out;
3951 3952 3953

	ret = i915_gem_object_set_cache_level(obj, level);
	mutex_unlock(&dev->struct_mutex);
3954 3955 3956

out:
	i915_gem_object_put(obj);
3957 3958 3959
	return ret;
}

3960
/*
3961 3962 3963
 * Prepare buffer for display plane (scanout, cursors, etc).
 * Can be called from an uninterruptible phase (modesetting) and allows
 * any flushes to be pipelined (for pageflips).
3964
 */
C
Chris Wilson 已提交
3965
struct i915_vma *
3966 3967
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
				     u32 alignment,
3968
				     const struct i915_ggtt_view *view)
3969
{
C
Chris Wilson 已提交
3970
	struct i915_vma *vma;
3971 3972
	int ret;

3973 3974
	lockdep_assert_held(&obj->base.dev->struct_mutex);

3975
	/* Mark the global pin early so that we account for the
3976 3977
	 * display coherency whilst setting up the cache domains.
	 */
3978
	obj->pin_global++;
3979

3980 3981 3982 3983 3984 3985 3986 3987 3988
	/* The display engine is not coherent with the LLC cache on gen6.  As
	 * a result, we make sure that the pinning that is about to occur is
	 * done with uncached PTEs. This is lowest common denominator for all
	 * chipsets.
	 *
	 * However for gen6+, we could do better by using the GFDT bit instead
	 * of uncaching, which would allow us to flush all the LLC-cached data
	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
	 */
3989
	ret = i915_gem_object_set_cache_level(obj,
3990 3991
					      HAS_WT(to_i915(obj->base.dev)) ?
					      I915_CACHE_WT : I915_CACHE_NONE);
C
Chris Wilson 已提交
3992 3993
	if (ret) {
		vma = ERR_PTR(ret);
3994
		goto err_unpin_global;
C
Chris Wilson 已提交
3995
	}
3996

3997 3998
	/* As the user may map the buffer once pinned in the display plane
	 * (e.g. libkms for the bootup splash), we have to ensure that we
3999 4000 4001 4002
	 * always use map_and_fenceable for all scanout buffers. However,
	 * it may simply be too big to fit into mappable, in which case
	 * put it anyway and hope that userspace can cope (but always first
	 * try to preserve the existing ABI).
4003
	 */
4004
	vma = ERR_PTR(-ENOSPC);
4005
	if (!view || view->type == I915_GGTT_VIEW_NORMAL)
4006 4007
		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
					       PIN_MAPPABLE | PIN_NONBLOCK);
4008 4009 4010 4011 4012 4013 4014 4015 4016 4017 4018 4019 4020 4021 4022 4023
	if (IS_ERR(vma)) {
		struct drm_i915_private *i915 = to_i915(obj->base.dev);
		unsigned int flags;

		/* Valleyview is definitely limited to scanning out the first
		 * 512MiB. Lets presume this behaviour was inherited from the
		 * g4x display engine and that all earlier gen are similarly
		 * limited. Testing suggests that it is a little more
		 * complicated than this. For example, Cherryview appears quite
		 * happy to scanout from anywhere within its global aperture.
		 */
		flags = 0;
		if (HAS_GMCH_DISPLAY(i915))
			flags = PIN_MAPPABLE;
		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
	}
C
Chris Wilson 已提交
4024
	if (IS_ERR(vma))
4025
		goto err_unpin_global;
4026

4027 4028
	vma->display_alignment = max_t(u64, vma->display_alignment, alignment);

4029
	/* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
4030
	__i915_gem_object_flush_for_display(obj);
4031
	intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
4032

4033 4034 4035
	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
4036
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
4037

C
Chris Wilson 已提交
4038
	return vma;
4039

4040 4041
err_unpin_global:
	obj->pin_global--;
C
Chris Wilson 已提交
4042
	return vma;
4043 4044 4045
}

void
C
Chris Wilson 已提交
4046
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
4047
{
4048
	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
4049

4050
	if (WARN_ON(vma->obj->pin_global == 0))
4051 4052
		return;

4053
	if (--vma->obj->pin_global == 0)
4054
		vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
4055

4056
	/* Bump the LRU to try and avoid premature eviction whilst flipping  */
4057
	i915_gem_object_bump_inactive_ggtt(vma->obj);
4058

C
Chris Wilson 已提交
4059
	i915_vma_unpin(vma);
4060 4061
}

4062 4063
/**
 * Moves a single object to the CPU read, and possibly write domain.
4064 4065
 * @obj: object to act on
 * @write: requesting write or read-only access
4066 4067 4068 4069
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
4070
int
4071
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
4072 4073 4074
{
	int ret;

4075
	lockdep_assert_held(&obj->base.dev->struct_mutex);
4076

4077 4078 4079 4080 4081 4082
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   (write ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
4083 4084 4085
	if (ret)
		return ret;

4086
	flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
4087

4088
	/* Flush the CPU cache if it's still invalid. */
4089
	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
4090
		i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
4091
		obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
4092 4093 4094 4095 4096
	}

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
4097
	GEM_BUG_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
4098 4099 4100 4101

	/* If we're writing through the CPU, then the GPU read domains will
	 * need to be invalidated at next use.
	 */
4102 4103
	if (write)
		__start_cpu_write(obj);
4104 4105 4106 4107

	return 0;
}

4108 4109 4110
/* Throttle our rendering by waiting until the ring has completed our requests
 * emitted over 20 msec ago.
 *
4111 4112 4113 4114
 * Note that if we were to use the current jiffies each time around the loop,
 * we wouldn't escape the function with any frames outstanding if the time to
 * render a frame was over 20ms.
 *
4115 4116 4117
 * This should get us reasonable parallelism between CPU and GPU but also
 * relatively low latency when blocking on a particular request to finish.
 */
4118
static int
4119
i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4120
{
4121
	struct drm_i915_private *dev_priv = to_i915(dev);
4122
	struct drm_i915_file_private *file_priv = file->driver_priv;
4123
	unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
4124
	struct drm_i915_gem_request *request, *target = NULL;
4125
	long ret;
4126

4127 4128 4129
	/* ABI: return -EIO if already wedged */
	if (i915_terminally_wedged(&dev_priv->gpu_error))
		return -EIO;
4130

4131
	spin_lock(&file_priv->mm.lock);
4132
	list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
4133 4134
		if (time_after_eq(request->emitted_jiffies, recent_enough))
			break;
4135

4136 4137 4138 4139
		if (target) {
			list_del(&target->client_link);
			target->file_priv = NULL;
		}
4140

4141
		target = request;
4142
	}
4143
	if (target)
4144
		i915_gem_request_get(target);
4145
	spin_unlock(&file_priv->mm.lock);
4146

4147
	if (target == NULL)
4148
		return 0;
4149

4150 4151 4152
	ret = i915_wait_request(target,
				I915_WAIT_INTERRUPTIBLE,
				MAX_SCHEDULE_TIMEOUT);
4153
	i915_gem_request_put(target);
4154

4155
	return ret < 0 ? ret : 0;
4156 4157
}

C
Chris Wilson 已提交
4158
struct i915_vma *
4159 4160
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
			 const struct i915_ggtt_view *view,
4161
			 u64 size,
4162 4163
			 u64 alignment,
			 u64 flags)
4164
{
4165 4166
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
	struct i915_address_space *vm = &dev_priv->ggtt.base;
4167 4168
	struct i915_vma *vma;
	int ret;
4169

4170 4171
	lockdep_assert_held(&obj->base.dev->struct_mutex);

4172 4173 4174 4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189 4190 4191 4192 4193 4194 4195 4196 4197 4198 4199 4200 4201 4202
	if (!view && flags & PIN_MAPPABLE) {
		/* If the required space is larger than the available
		 * aperture, we will not able to find a slot for the
		 * object and unbinding the object now will be in
		 * vain. Worse, doing so may cause us to ping-pong
		 * the object in and out of the Global GTT and
		 * waste a lot of cycles under the mutex.
		 */
		if (obj->base.size > dev_priv->ggtt.mappable_end)
			return ERR_PTR(-E2BIG);

		/* If NONBLOCK is set the caller is optimistically
		 * trying to cache the full object within the mappable
		 * aperture, and *must* have a fallback in place for
		 * situations where we cannot bind the object. We
		 * can be a little more lax here and use the fallback
		 * more often to avoid costly migrations of ourselves
		 * and other objects within the aperture.
		 *
		 * Half-the-aperture is used as a simple heuristic.
		 * More interesting would to do search for a free
		 * block prior to making the commitment to unbind.
		 * That caters for the self-harm case, and with a
		 * little more heuristics (e.g. NOFAULT, NOEVICT)
		 * we could try to minimise harm to others.
		 */
		if (flags & PIN_NONBLOCK &&
		    obj->base.size > dev_priv->ggtt.mappable_end / 2)
			return ERR_PTR(-ENOSPC);
	}

4203
	vma = i915_vma_instance(obj, vm, view);
4204
	if (unlikely(IS_ERR(vma)))
C
Chris Wilson 已提交
4205
		return vma;
4206 4207

	if (i915_vma_misplaced(vma, size, alignment, flags)) {
4208 4209 4210
		if (flags & PIN_NONBLOCK) {
			if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
				return ERR_PTR(-ENOSPC);
4211

4212
			if (flags & PIN_MAPPABLE &&
4213
			    vma->fence_size > dev_priv->ggtt.mappable_end / 2)
4214 4215 4216
				return ERR_PTR(-ENOSPC);
		}

4217 4218
		WARN(i915_vma_is_pinned(vma),
		     "bo is already pinned in ggtt with incorrect alignment:"
4219 4220 4221
		     " offset=%08x, req.alignment=%llx,"
		     " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
		     i915_ggtt_offset(vma), alignment,
4222
		     !!(flags & PIN_MAPPABLE),
4223
		     i915_vma_is_map_and_fenceable(vma));
4224 4225
		ret = i915_vma_unbind(vma);
		if (ret)
C
Chris Wilson 已提交
4226
			return ERR_PTR(ret);
4227 4228
	}

C
Chris Wilson 已提交
4229 4230 4231
	ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
	if (ret)
		return ERR_PTR(ret);
4232

C
Chris Wilson 已提交
4233
	return vma;
4234 4235
}

4236
static __always_inline unsigned int __busy_read_flag(unsigned int id)
4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250
{
	/* Note that we could alias engines in the execbuf API, but
	 * that would be very unwise as it prevents userspace from
	 * fine control over engine selection. Ahem.
	 *
	 * This should be something like EXEC_MAX_ENGINE instead of
	 * I915_NUM_ENGINES.
	 */
	BUILD_BUG_ON(I915_NUM_ENGINES > 16);
	return 0x10000 << id;
}

static __always_inline unsigned int __busy_write_id(unsigned int id)
{
4251 4252 4253 4254 4255 4256 4257 4258 4259
	/* The uABI guarantees an active writer is also amongst the read
	 * engines. This would be true if we accessed the activity tracking
	 * under the lock, but as we perform the lookup of the object and
	 * its activity locklessly we can not guarantee that the last_write
	 * being active implies that we have set the same engine flag from
	 * last_read - hence we always set both read and write busy for
	 * last_write.
	 */
	return id | __busy_read_flag(id);
4260 4261
}

4262
static __always_inline unsigned int
4263
__busy_set_if_active(const struct dma_fence *fence,
4264 4265
		     unsigned int (*flag)(unsigned int id))
{
4266
	struct drm_i915_gem_request *rq;
4267

4268 4269 4270 4271
	/* We have to check the current hw status of the fence as the uABI
	 * guarantees forward progress. We could rely on the idle worker
	 * to eventually flush us, but to minimise latency just ask the
	 * hardware.
4272
	 *
4273
	 * Note we only report on the status of native fences.
4274
	 */
4275 4276 4277 4278 4279 4280 4281 4282
	if (!dma_fence_is_i915(fence))
		return 0;

	/* opencode to_request() in order to avoid const warnings */
	rq = container_of(fence, struct drm_i915_gem_request, fence);
	if (i915_gem_request_completed(rq))
		return 0;

4283
	return flag(rq->engine->uabi_id);
4284 4285
}

4286
static __always_inline unsigned int
4287
busy_check_reader(const struct dma_fence *fence)
4288
{
4289
	return __busy_set_if_active(fence, __busy_read_flag);
4290 4291
}

4292
static __always_inline unsigned int
4293
busy_check_writer(const struct dma_fence *fence)
4294
{
4295 4296 4297 4298
	if (!fence)
		return 0;

	return __busy_set_if_active(fence, __busy_write_id);
4299 4300
}

4301 4302
int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4303
		    struct drm_file *file)
4304 4305
{
	struct drm_i915_gem_busy *args = data;
4306
	struct drm_i915_gem_object *obj;
4307 4308
	struct reservation_object_list *list;
	unsigned int seq;
4309
	int err;
4310

4311
	err = -ENOENT;
4312 4313
	rcu_read_lock();
	obj = i915_gem_object_lookup_rcu(file, args->handle);
4314
	if (!obj)
4315
		goto out;
4316

4317 4318 4319 4320 4321 4322 4323 4324 4325 4326 4327 4328 4329 4330 4331 4332 4333 4334
	/* A discrepancy here is that we do not report the status of
	 * non-i915 fences, i.e. even though we may report the object as idle,
	 * a call to set-domain may still stall waiting for foreign rendering.
	 * This also means that wait-ioctl may report an object as busy,
	 * where busy-ioctl considers it idle.
	 *
	 * We trade the ability to warn of foreign fences to report on which
	 * i915 engines are active for the object.
	 *
	 * Alternatively, we can trade that extra information on read/write
	 * activity with
	 *	args->busy =
	 *		!reservation_object_test_signaled_rcu(obj->resv, true);
	 * to report the overall busyness. This is what the wait-ioctl does.
	 *
	 */
retry:
	seq = raw_read_seqcount(&obj->resv->seq);
4335

4336 4337
	/* Translate the exclusive fence to the READ *and* WRITE engine */
	args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
4338

4339 4340 4341 4342
	/* Translate shared fences to READ set of engines */
	list = rcu_dereference(obj->resv->fence);
	if (list) {
		unsigned int shared_count = list->shared_count, i;
4343

4344 4345 4346 4347 4348 4349
		for (i = 0; i < shared_count; ++i) {
			struct dma_fence *fence =
				rcu_dereference(list->shared[i]);

			args->busy |= busy_check_reader(fence);
		}
4350
	}
4351

4352 4353 4354 4355
	if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
		goto retry;

	err = 0;
4356 4357 4358
out:
	rcu_read_unlock();
	return err;
4359 4360 4361 4362 4363 4364
}

int
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file_priv)
{
4365
	return i915_gem_ring_throttle(dev, file_priv);
4366 4367
}

4368 4369 4370 4371
int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
4372
	struct drm_i915_private *dev_priv = to_i915(dev);
4373
	struct drm_i915_gem_madvise *args = data;
4374
	struct drm_i915_gem_object *obj;
4375
	int err;
4376 4377 4378 4379 4380 4381 4382 4383 4384

	switch (args->madv) {
	case I915_MADV_DONTNEED:
	case I915_MADV_WILLNEED:
	    break;
	default:
	    return -EINVAL;
	}

4385
	obj = i915_gem_object_lookup(file_priv, args->handle);
4386 4387 4388 4389 4390 4391
	if (!obj)
		return -ENOENT;

	err = mutex_lock_interruptible(&obj->mm.lock);
	if (err)
		goto out;
4392

4393
	if (i915_gem_object_has_pages(obj) &&
4394
	    i915_gem_object_is_tiled(obj) &&
4395
	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4396 4397
		if (obj->mm.madv == I915_MADV_WILLNEED) {
			GEM_BUG_ON(!obj->mm.quirked);
C
Chris Wilson 已提交
4398
			__i915_gem_object_unpin_pages(obj);
4399 4400 4401
			obj->mm.quirked = false;
		}
		if (args->madv == I915_MADV_WILLNEED) {
4402
			GEM_BUG_ON(obj->mm.quirked);
C
Chris Wilson 已提交
4403
			__i915_gem_object_pin_pages(obj);
4404 4405
			obj->mm.quirked = true;
		}
4406 4407
	}

C
Chris Wilson 已提交
4408 4409
	if (obj->mm.madv != __I915_MADV_PURGED)
		obj->mm.madv = args->madv;
4410

C
Chris Wilson 已提交
4411
	/* if the object is no longer attached, discard its backing storage */
4412 4413
	if (obj->mm.madv == I915_MADV_DONTNEED &&
	    !i915_gem_object_has_pages(obj))
4414 4415
		i915_gem_object_truncate(obj);

C
Chris Wilson 已提交
4416
	args->retained = obj->mm.madv != __I915_MADV_PURGED;
4417
	mutex_unlock(&obj->mm.lock);
C
Chris Wilson 已提交
4418

4419
out:
4420
	i915_gem_object_put(obj);
4421
	return err;
4422 4423
}

4424 4425 4426 4427 4428 4429 4430
static void
frontbuffer_retire(struct i915_gem_active *active,
		   struct drm_i915_gem_request *request)
{
	struct drm_i915_gem_object *obj =
		container_of(active, typeof(*obj), frontbuffer_write);

4431
	intel_fb_obj_flush(obj, ORIGIN_CS);
4432 4433
}

4434 4435
void i915_gem_object_init(struct drm_i915_gem_object *obj,
			  const struct drm_i915_gem_object_ops *ops)
4436
{
4437 4438
	mutex_init(&obj->mm.lock);

B
Ben Widawsky 已提交
4439
	INIT_LIST_HEAD(&obj->vma_list);
4440
	INIT_LIST_HEAD(&obj->lut_list);
4441
	INIT_LIST_HEAD(&obj->batch_pool_link);
4442

4443 4444
	obj->ops = ops;

4445 4446 4447
	reservation_object_init(&obj->__builtin_resv);
	obj->resv = &obj->__builtin_resv;

4448
	obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
4449
	init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
C
Chris Wilson 已提交
4450 4451 4452 4453

	obj->mm.madv = I915_MADV_WILLNEED;
	INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
	mutex_init(&obj->mm.get_page.lock);
4454

4455
	i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
4456 4457
}

4458
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4459 4460
	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
		 I915_GEM_OBJECT_IS_SHRINKABLE,
4461

4462 4463
	.get_pages = i915_gem_object_get_pages_gtt,
	.put_pages = i915_gem_object_put_pages_gtt,
4464 4465

	.pwrite = i915_gem_object_pwrite_gtt,
4466 4467
};

M
Matthew Auld 已提交
4468 4469 4470 4471 4472 4473 4474 4475 4476 4477 4478 4479 4480 4481 4482 4483 4484 4485 4486 4487 4488 4489 4490 4491
static int i915_gem_object_create_shmem(struct drm_device *dev,
					struct drm_gem_object *obj,
					size_t size)
{
	struct drm_i915_private *i915 = to_i915(dev);
	unsigned long flags = VM_NORESERVE;
	struct file *filp;

	drm_gem_private_object_init(dev, obj, size);

	if (i915->mm.gemfs)
		filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
						 flags);
	else
		filp = shmem_file_setup("i915", size, flags);

	if (IS_ERR(filp))
		return PTR_ERR(filp);

	obj->filp = filp;

	return 0;
}

4492
struct drm_i915_gem_object *
4493
i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
4494
{
4495
	struct drm_i915_gem_object *obj;
4496
	struct address_space *mapping;
4497
	unsigned int cache_level;
D
Daniel Vetter 已提交
4498
	gfp_t mask;
4499
	int ret;
4500

4501 4502 4503 4504 4505
	/* There is a prevalence of the assumption that we fit the object's
	 * page count inside a 32bit _signed_ variable. Let's document this and
	 * catch if we ever need to fix it. In the meantime, if you do spot
	 * such a local variable, please consider fixing!
	 */
4506
	if (size >> PAGE_SHIFT > INT_MAX)
4507 4508 4509 4510 4511
		return ERR_PTR(-E2BIG);

	if (overflows_type(size, obj->base.size))
		return ERR_PTR(-E2BIG);

4512
	obj = i915_gem_object_alloc(dev_priv);
4513
	if (obj == NULL)
4514
		return ERR_PTR(-ENOMEM);
4515

M
Matthew Auld 已提交
4516
	ret = i915_gem_object_create_shmem(&dev_priv->drm, &obj->base, size);
4517 4518
	if (ret)
		goto fail;
4519

4520
	mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4521
	if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) {
4522 4523 4524 4525 4526
		/* 965gm cannot relocate objects above 4GiB. */
		mask &= ~__GFP_HIGHMEM;
		mask |= __GFP_DMA32;
	}

4527
	mapping = obj->base.filp->f_mapping;
4528
	mapping_set_gfp_mask(mapping, mask);
4529
	GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
4530

4531
	i915_gem_object_init(obj, &i915_gem_object_ops);
4532

4533 4534
	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4535

4536
	if (HAS_LLC(dev_priv))
4537
		/* On some devices, we can have the GPU use the LLC (the CPU
4538 4539 4540 4541 4542 4543 4544 4545 4546 4547 4548
		 * cache) for about a 10% performance improvement
		 * compared to uncached.  Graphics requests other than
		 * display scanout are coherent with the CPU in
		 * accessing this cache.  This means in this mode we
		 * don't need to clflush on the CPU side, and on the
		 * GPU side we only need to flush internal caches to
		 * get data visible to the CPU.
		 *
		 * However, we maintain the display planes as UC, and so
		 * need to rebind when first used as such.
		 */
4549 4550 4551
		cache_level = I915_CACHE_LLC;
	else
		cache_level = I915_CACHE_NONE;
4552

4553
	i915_gem_object_set_cache_coherency(obj, cache_level);
4554

4555 4556
	trace_i915_gem_object_create(obj);

4557
	return obj;
4558 4559 4560 4561

fail:
	i915_gem_object_free(obj);
	return ERR_PTR(ret);
4562 4563
}

4564 4565 4566 4567 4568 4569 4570 4571
static bool discard_backing_storage(struct drm_i915_gem_object *obj)
{
	/* If we are the last user of the backing storage (be it shmemfs
	 * pages or stolen etc), we know that the pages are going to be
	 * immediately released. In this case, we can then skip copying
	 * back the contents from the GPU.
	 */

C
Chris Wilson 已提交
4572
	if (obj->mm.madv != I915_MADV_WILLNEED)
4573 4574 4575 4576 4577 4578 4579 4580 4581 4582 4583 4584 4585 4586 4587
		return false;

	if (obj->base.filp == NULL)
		return true;

	/* At first glance, this looks racy, but then again so would be
	 * userspace racing mmap against close. However, the first external
	 * reference to the filp can only be obtained through the
	 * i915_gem_mmap_ioctl() which safeguards us against the user
	 * acquiring such a reference whilst we are in the middle of
	 * freeing the object.
	 */
	return atomic_long_read(&obj->base.filp->f_count) == 1;
}

4588 4589
static void __i915_gem_free_objects(struct drm_i915_private *i915,
				    struct llist_node *freed)
4590
{
4591
	struct drm_i915_gem_object *obj, *on;
4592

4593
	intel_runtime_pm_get(i915);
4594
	llist_for_each_entry_safe(obj, on, freed, freed) {
4595 4596 4597 4598
		struct i915_vma *vma, *vn;

		trace_i915_gem_object_destroy(obj);

4599 4600
		mutex_lock(&i915->drm.struct_mutex);

4601 4602 4603 4604 4605 4606 4607
		GEM_BUG_ON(i915_gem_object_is_active(obj));
		list_for_each_entry_safe(vma, vn,
					 &obj->vma_list, obj_link) {
			GEM_BUG_ON(i915_vma_is_active(vma));
			vma->flags &= ~I915_VMA_PIN_MASK;
			i915_vma_close(vma);
		}
4608 4609
		GEM_BUG_ON(!list_empty(&obj->vma_list));
		GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
4610

4611 4612 4613 4614 4615 4616 4617 4618 4619 4620 4621 4622
		/* This serializes freeing with the shrinker. Since the free
		 * is delayed, first by RCU then by the workqueue, we want the
		 * shrinker to be able to free pages of unreferenced objects,
		 * or else we may oom whilst there are plenty of deferred
		 * freed objects.
		 */
		if (i915_gem_object_has_pages(obj)) {
			spin_lock(&i915->mm.obj_lock);
			list_del_init(&obj->mm.link);
			spin_unlock(&i915->mm.obj_lock);
		}

4623
		mutex_unlock(&i915->drm.struct_mutex);
4624 4625

		GEM_BUG_ON(obj->bind_count);
4626
		GEM_BUG_ON(obj->userfault_count);
4627
		GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
4628
		GEM_BUG_ON(!list_empty(&obj->lut_list));
4629 4630 4631

		if (obj->ops->release)
			obj->ops->release(obj);
4632

4633 4634
		if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
			atomic_set(&obj->mm.pages_pin_count, 0);
4635
		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
4636
		GEM_BUG_ON(i915_gem_object_has_pages(obj));
4637 4638 4639 4640

		if (obj->base.import_attach)
			drm_prime_gem_destroy(&obj->base, NULL);

4641
		reservation_object_fini(&obj->__builtin_resv);
4642 4643 4644 4645 4646
		drm_gem_object_release(&obj->base);
		i915_gem_info_remove_obj(i915, obj->base.size);

		kfree(obj->bit_17);
		i915_gem_object_free(obj);
4647 4648 4649

		if (on)
			cond_resched();
4650
	}
4651
	intel_runtime_pm_put(i915);
4652 4653 4654 4655 4656 4657
}

static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
{
	struct llist_node *freed;

4658 4659 4660 4661 4662 4663 4664 4665 4666 4667
	/* Free the oldest, most stale object to keep the free_list short */
	freed = NULL;
	if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
		/* Only one consumer of llist_del_first() allowed */
		spin_lock(&i915->mm.free_lock);
		freed = llist_del_first(&i915->mm.free_list);
		spin_unlock(&i915->mm.free_lock);
	}
	if (unlikely(freed)) {
		freed->next = NULL;
4668
		__i915_gem_free_objects(i915, freed);
4669
	}
4670 4671 4672 4673 4674 4675 4676
}

static void __i915_gem_free_work(struct work_struct *work)
{
	struct drm_i915_private *i915 =
		container_of(work, struct drm_i915_private, mm.free_work);
	struct llist_node *freed;
4677

4678 4679 4680 4681 4682 4683 4684
	/* All file-owned VMA should have been released by this point through
	 * i915_gem_close_object(), or earlier by i915_gem_context_close().
	 * However, the object may also be bound into the global GTT (e.g.
	 * older GPUs without per-process support, or for direct access through
	 * the GTT either for the user or for scanout). Those VMA still need to
	 * unbound now.
	 */
4685

4686
	spin_lock(&i915->mm.free_lock);
4687
	while ((freed = llist_del_all(&i915->mm.free_list))) {
4688 4689
		spin_unlock(&i915->mm.free_lock);

4690
		__i915_gem_free_objects(i915, freed);
4691
		if (need_resched())
4692 4693 4694
			return;

		spin_lock(&i915->mm.free_lock);
4695
	}
4696
	spin_unlock(&i915->mm.free_lock);
4697
}
4698

4699 4700 4701 4702 4703 4704 4705 4706 4707 4708 4709 4710 4711 4712
static void __i915_gem_free_object_rcu(struct rcu_head *head)
{
	struct drm_i915_gem_object *obj =
		container_of(head, typeof(*obj), rcu);
	struct drm_i915_private *i915 = to_i915(obj->base.dev);

	/* We can't simply use call_rcu() from i915_gem_free_object()
	 * as we need to block whilst unbinding, and the call_rcu
	 * task may be called from softirq context. So we take a
	 * detour through a worker.
	 */
	if (llist_add(&obj->freed, &i915->mm.free_list))
		schedule_work(&i915->mm.free_work);
}
4713

4714 4715 4716
void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
C
Chris Wilson 已提交
4717

4718 4719 4720
	if (obj->mm.quirked)
		__i915_gem_object_unpin_pages(obj);

4721
	if (discard_backing_storage(obj))
C
Chris Wilson 已提交
4722
		obj->mm.madv = I915_MADV_DONTNEED;
4723

4724 4725 4726 4727 4728 4729
	/* Before we free the object, make sure any pure RCU-only
	 * read-side critical sections are complete, e.g.
	 * i915_gem_busy_ioctl(). For the corresponding synchronized
	 * lookup see i915_gem_object_lookup_rcu().
	 */
	call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
4730 4731
}

4732 4733 4734 4735
void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
{
	lockdep_assert_held(&obj->base.dev->struct_mutex);

4736 4737
	if (!i915_gem_object_has_active_reference(obj) &&
	    i915_gem_object_is_active(obj))
4738 4739 4740 4741 4742
		i915_gem_object_set_active_reference(obj);
	else
		i915_gem_object_put(obj);
}

4743
static void assert_kernel_context_is_current(struct drm_i915_private *i915)
4744
{
4745
	struct i915_gem_context *kernel_context = i915->kernel_context;
4746 4747 4748
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

4749 4750 4751 4752
	for_each_engine(engine, i915, id) {
		GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline->last_request));
		GEM_BUG_ON(engine->last_retired_context != kernel_context);
	}
4753 4754
}

4755 4756
void i915_gem_sanitize(struct drm_i915_private *i915)
{
4757 4758 4759 4760 4761 4762
	if (i915_terminally_wedged(&i915->gpu_error)) {
		mutex_lock(&i915->drm.struct_mutex);
		i915_gem_unset_wedged(i915);
		mutex_unlock(&i915->drm.struct_mutex);
	}

4763 4764 4765 4766 4767 4768
	/*
	 * If we inherit context state from the BIOS or earlier occupants
	 * of the GPU, the GPU may be in an inconsistent state when we
	 * try to take over. The only way to remove the earlier state
	 * is by resetting. However, resetting on earlier gen is tricky as
	 * it may impact the display and we are uncertain about the stability
4769
	 * of the reset, so this could be applied to even earlier gen.
4770
	 */
4771
	if (INTEL_GEN(i915) >= 5) {
4772 4773 4774 4775 4776
		int reset = intel_gpu_reset(i915, ALL_ENGINES);
		WARN_ON(reset && reset != -ENODEV);
	}
}

4777
int i915_gem_suspend(struct drm_i915_private *dev_priv)
4778
{
4779
	struct drm_device *dev = &dev_priv->drm;
4780
	int ret;
4781

4782
	intel_runtime_pm_get(dev_priv);
4783 4784
	intel_suspend_gt_powersave(dev_priv);

4785
	mutex_lock(&dev->struct_mutex);
4786 4787 4788 4789 4790 4791 4792 4793 4794

	/* We have to flush all the executing contexts to main memory so
	 * that they can saved in the hibernation image. To ensure the last
	 * context image is coherent, we have to switch away from it. That
	 * leaves the dev_priv->kernel_context still active when
	 * we actually suspend, and its image in memory may not match the GPU
	 * state. Fortunately, the kernel_context is disposable and we do
	 * not rely on its state.
	 */
4795 4796 4797 4798
	if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
		ret = i915_gem_switch_to_kernel_context(dev_priv);
		if (ret)
			goto err_unlock;
4799

4800 4801 4802 4803 4804
		ret = i915_gem_wait_for_idle(dev_priv,
					     I915_WAIT_INTERRUPTIBLE |
					     I915_WAIT_LOCKED);
		if (ret && ret != -EIO)
			goto err_unlock;
4805

4806 4807
		assert_kernel_context_is_current(dev_priv);
	}
4808
	i915_gem_contexts_lost(dev_priv);
4809 4810
	mutex_unlock(&dev->struct_mutex);

4811 4812
	intel_guc_suspend(dev_priv);

4813
	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4814
	cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4815 4816 4817 4818

	/* As the idle_work is rearming if it detects a race, play safe and
	 * repeat the flush until it is definitely idle.
	 */
4819
	drain_delayed_work(&dev_priv->gt.idle_work);
4820

4821 4822 4823
	/* Assert that we sucessfully flushed all the work and
	 * reset the GPU back to its idle, low power state.
	 */
4824
	WARN_ON(dev_priv->gt.awake);
4825 4826
	if (WARN_ON(!intel_engines_are_idle(dev_priv)))
		i915_gem_set_wedged(dev_priv); /* no hope, discard everything */
4827

4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846
	/*
	 * Neither the BIOS, ourselves or any other kernel
	 * expects the system to be in execlists mode on startup,
	 * so we need to reset the GPU back to legacy mode. And the only
	 * known way to disable logical contexts is through a GPU reset.
	 *
	 * So in order to leave the system in a known default configuration,
	 * always reset the GPU upon unload and suspend. Afterwards we then
	 * clean up the GEM state tracking, flushing off the requests and
	 * leaving the system in a known idle state.
	 *
	 * Note that is of the upmost importance that the GPU is idle and
	 * all stray writes are flushed *before* we dismantle the backing
	 * storage for the pinned objects.
	 *
	 * However, since we are uncertain that resetting the GPU on older
	 * machines is a good idea, we don't - just in case it leaves the
	 * machine in an unusable condition.
	 */
4847
	i915_gem_sanitize(dev_priv);
4848 4849 4850

	intel_runtime_pm_put(dev_priv);
	return 0;
4851

4852
err_unlock:
4853
	mutex_unlock(&dev->struct_mutex);
4854
	intel_runtime_pm_put(dev_priv);
4855
	return ret;
4856 4857
}

4858
void i915_gem_resume(struct drm_i915_private *i915)
4859
{
4860
	WARN_ON(i915->gt.awake);
4861

4862 4863
	mutex_lock(&i915->drm.struct_mutex);
	intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
4864

4865 4866
	i915_gem_restore_gtt_mappings(i915);
	i915_gem_restore_fences(i915);
4867

4868 4869
	/*
	 * As we didn't flush the kernel context before suspend, we cannot
4870 4871 4872
	 * guarantee that the context image is complete. So let's just reset
	 * it and start again.
	 */
4873
	i915->gt.resume(i915);
4874

4875 4876 4877
	if (i915_gem_init_hw(i915))
		goto err_wedged;

4878 4879
	intel_guc_resume(i915);

4880 4881 4882 4883 4884 4885 4886 4887 4888 4889
	/* Always reload a context for powersaving. */
	if (i915_gem_switch_to_kernel_context(i915))
		goto err_wedged;

out_unlock:
	intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
	mutex_unlock(&i915->drm.struct_mutex);
	return;

err_wedged:
4890 4891 4892 4893
	if (!i915_terminally_wedged(&i915->gpu_error)) {
		DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
		i915_gem_set_wedged(i915);
	}
4894
	goto out_unlock;
4895 4896
}

4897
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
4898
{
4899
	if (INTEL_GEN(dev_priv) < 5 ||
4900 4901 4902 4903 4904 4905
	    dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
		return;

	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
				 DISP_TILE_SURFACE_SWIZZLING);

4906
	if (IS_GEN5(dev_priv))
4907 4908
		return;

4909
	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4910
	if (IS_GEN6(dev_priv))
4911
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4912
	else if (IS_GEN7(dev_priv))
4913
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4914
	else if (IS_GEN8(dev_priv))
B
Ben Widawsky 已提交
4915
		I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4916 4917
	else
		BUG();
4918
}
D
Daniel Vetter 已提交
4919

4920
static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
4921 4922 4923 4924 4925 4926 4927
{
	I915_WRITE(RING_CTL(base), 0);
	I915_WRITE(RING_HEAD(base), 0);
	I915_WRITE(RING_TAIL(base), 0);
	I915_WRITE(RING_START(base), 0);
}

4928
static void init_unused_rings(struct drm_i915_private *dev_priv)
4929
{
4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941
	if (IS_I830(dev_priv)) {
		init_unused_ring(dev_priv, PRB1_BASE);
		init_unused_ring(dev_priv, SRB0_BASE);
		init_unused_ring(dev_priv, SRB1_BASE);
		init_unused_ring(dev_priv, SRB2_BASE);
		init_unused_ring(dev_priv, SRB3_BASE);
	} else if (IS_GEN2(dev_priv)) {
		init_unused_ring(dev_priv, SRB0_BASE);
		init_unused_ring(dev_priv, SRB1_BASE);
	} else if (IS_GEN3(dev_priv)) {
		init_unused_ring(dev_priv, PRB1_BASE);
		init_unused_ring(dev_priv, PRB2_BASE);
4942 4943 4944
	}
}

4945
static int __i915_gem_restart_engines(void *data)
4946
{
4947
	struct drm_i915_private *i915 = data;
4948
	struct intel_engine_cs *engine;
4949
	enum intel_engine_id id;
4950 4951 4952 4953 4954 4955 4956 4957 4958 4959 4960 4961 4962
	int err;

	for_each_engine(engine, i915, id) {
		err = engine->init_hw(engine);
		if (err)
			return err;
	}

	return 0;
}

int i915_gem_init_hw(struct drm_i915_private *dev_priv)
{
C
Chris Wilson 已提交
4963
	int ret;
4964

4965 4966
	dev_priv->gt.last_init_time = ktime_get();

4967 4968 4969
	/* Double layer security blanket, see i915_gem_init() */
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

4970
	if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
4971
		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4972

4973
	if (IS_HASWELL(dev_priv))
4974
		I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
4975
			   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4976

4977
	if (HAS_PCH_NOP(dev_priv)) {
4978
		if (IS_IVYBRIDGE(dev_priv)) {
4979 4980 4981
			u32 temp = I915_READ(GEN7_MSG_CTL);
			temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
			I915_WRITE(GEN7_MSG_CTL, temp);
4982
		} else if (INTEL_GEN(dev_priv) >= 7) {
4983 4984 4985 4986
			u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
			temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
			I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
		}
4987 4988
	}

4989
	i915_gem_init_swizzling(dev_priv);
4990

4991 4992 4993 4994 4995 4996
	/*
	 * At least 830 can leave some of the unused rings
	 * "active" (ie. head != tail) after resume which
	 * will prevent c3 entry. Makes sure all unused rings
	 * are totally idle.
	 */
4997
	init_unused_rings(dev_priv);
4998

4999
	BUG_ON(!dev_priv->kernel_context);
5000 5001 5002 5003
	if (i915_terminally_wedged(&dev_priv->gpu_error)) {
		ret = -EIO;
		goto out;
	}
5004

5005
	ret = i915_ppgtt_init_hw(dev_priv);
5006 5007 5008 5009 5010
	if (ret) {
		DRM_ERROR("PPGTT enable HW failed %d\n", ret);
		goto out;
	}

5011 5012 5013 5014 5015
	/* We can't enable contexts until all firmware is loaded */
	ret = intel_uc_init_hw(dev_priv);
	if (ret)
		goto out;

5016
	intel_mocs_init_l3cc_table(dev_priv);
5017

5018 5019
	/* Only when the HW is re-initialised, can we replay the requests */
	ret = __i915_gem_restart_engines(dev_priv);
5020 5021
out:
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5022
	return ret;
5023 5024
}

5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053
static int __intel_engines_record_defaults(struct drm_i915_private *i915)
{
	struct i915_gem_context *ctx;
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	int err;

	/*
	 * As we reset the gpu during very early sanitisation, the current
	 * register state on the GPU should reflect its defaults values.
	 * We load a context onto the hw (with restore-inhibit), then switch
	 * over to a second context to save that default register state. We
	 * can then prime every new context with that state so they all start
	 * from the same default HW values.
	 */

	ctx = i915_gem_context_create_kernel(i915, 0);
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);

	for_each_engine(engine, i915, id) {
		struct drm_i915_gem_request *rq;

		rq = i915_gem_request_alloc(engine, ctx);
		if (IS_ERR(rq)) {
			err = PTR_ERR(rq);
			goto out_ctx;
		}

5054
		err = 0;
5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133 5134 5135 5136 5137 5138
		if (engine->init_context)
			err = engine->init_context(rq);

		__i915_add_request(rq, true);
		if (err)
			goto err_active;
	}

	err = i915_gem_switch_to_kernel_context(i915);
	if (err)
		goto err_active;

	err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
	if (err)
		goto err_active;

	assert_kernel_context_is_current(i915);

	for_each_engine(engine, i915, id) {
		struct i915_vma *state;

		state = ctx->engine[id].state;
		if (!state)
			continue;

		/*
		 * As we will hold a reference to the logical state, it will
		 * not be torn down with the context, and importantly the
		 * object will hold onto its vma (making it possible for a
		 * stray GTT write to corrupt our defaults). Unmap the vma
		 * from the GTT to prevent such accidents and reclaim the
		 * space.
		 */
		err = i915_vma_unbind(state);
		if (err)
			goto err_active;

		err = i915_gem_object_set_to_cpu_domain(state->obj, false);
		if (err)
			goto err_active;

		engine->default_state = i915_gem_object_get(state->obj);
	}

	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
		unsigned int found = intel_engines_has_context_isolation(i915);

		/*
		 * Make sure that classes with multiple engine instances all
		 * share the same basic configuration.
		 */
		for_each_engine(engine, i915, id) {
			unsigned int bit = BIT(engine->uabi_class);
			unsigned int expected = engine->default_state ? bit : 0;

			if ((found & bit) != expected) {
				DRM_ERROR("mismatching default context state for class %d on engine %s\n",
					  engine->uabi_class, engine->name);
			}
		}
	}

out_ctx:
	i915_gem_context_set_closed(ctx);
	i915_gem_context_put(ctx);
	return err;

err_active:
	/*
	 * If we have to abandon now, we expect the engines to be idle
	 * and ready to be torn-down. First try to flush any remaining
	 * request, ensure we are pointing at the kernel context and
	 * then remove it.
	 */
	if (WARN_ON(i915_gem_switch_to_kernel_context(i915)))
		goto out_ctx;

	if (WARN_ON(i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED)))
		goto out_ctx;

	i915_gem_contexts_lost(i915);
	goto out_ctx;
}

5139
int i915_gem_init(struct drm_i915_private *dev_priv)
5140 5141 5142
{
	int ret;

5143 5144 5145 5146 5147 5148 5149 5150 5151
	/*
	 * We need to fallback to 4K pages since gvt gtt handling doesn't
	 * support huge page entries - we will need to check either hypervisor
	 * mm can support huge guest page or just do emulation in gvt.
	 */
	if (intel_vgpu_active(dev_priv))
		mkwrite_device_info(dev_priv)->page_sizes =
			I915_GTT_PAGE_SIZE_4K;

5152
	dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
5153

5154
	if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
5155
		dev_priv->gt.resume = intel_lr_context_resume;
5156
		dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
5157 5158 5159
	} else {
		dev_priv->gt.resume = intel_legacy_submission_resume;
		dev_priv->gt.cleanup_engine = intel_engine_cleanup;
5160 5161
	}

5162 5163 5164 5165
	ret = i915_gem_init_userptr(dev_priv);
	if (ret)
		return ret;

5166 5167 5168 5169 5170 5171
	/* This is just a security blanket to placate dragons.
	 * On some systems, we very sporadically observe that the first TLBs
	 * used by the CS may be stale, despite us poking the TLB reset. If
	 * we hold the forcewake during initialisation these problems
	 * just magically go away.
	 */
5172
	mutex_lock(&dev_priv->drm.struct_mutex);
5173 5174
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

5175
	ret = i915_gem_init_ggtt(dev_priv);
5176 5177 5178 5179
	if (ret) {
		GEM_BUG_ON(ret == -EIO);
		goto err_unlock;
	}
5180

5181
	ret = i915_gem_contexts_init(dev_priv);
5182 5183 5184 5185
	if (ret) {
		GEM_BUG_ON(ret == -EIO);
		goto err_ggtt;
	}
5186

5187
	ret = intel_engines_init(dev_priv);
5188 5189 5190 5191
	if (ret) {
		GEM_BUG_ON(ret == -EIO);
		goto err_context;
	}
5192

5193 5194
	intel_init_gt_powersave(dev_priv);

5195
	ret = i915_gem_init_hw(dev_priv);
5196
	if (ret)
5197
		goto err_pm;
5198 5199 5200 5201 5202 5203 5204 5205 5206 5207 5208 5209

	/*
	 * Despite its name intel_init_clock_gating applies both display
	 * clock gating workarounds; GT mmio workarounds and the occasional
	 * GT power context workaround. Worse, sometimes it includes a context
	 * register workaround which we need to apply before we record the
	 * default HW state for all contexts.
	 *
	 * FIXME: break up the workarounds and apply them at the right time!
	 */
	intel_init_clock_gating(dev_priv);

5210
	ret = __intel_engines_record_defaults(dev_priv);
5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221 5222 5223 5224 5225 5226 5227 5228 5229 5230 5231 5232 5233 5234 5235 5236 5237 5238 5239 5240 5241 5242 5243 5244 5245 5246 5247 5248 5249 5250 5251 5252 5253 5254
	if (ret)
		goto err_init_hw;

	if (i915_inject_load_failure()) {
		ret = -ENODEV;
		goto err_init_hw;
	}

	if (i915_inject_load_failure()) {
		ret = -EIO;
		goto err_init_hw;
	}

	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
	mutex_unlock(&dev_priv->drm.struct_mutex);

	return 0;

	/*
	 * Unwinding is complicated by that we want to handle -EIO to mean
	 * disable GPU submission but keep KMS alive. We want to mark the
	 * HW as irrevisibly wedged, but keep enough state around that the
	 * driver doesn't explode during runtime.
	 */
err_init_hw:
	i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
	i915_gem_contexts_lost(dev_priv);
	intel_uc_fini_hw(dev_priv);
err_pm:
	if (ret != -EIO) {
		intel_cleanup_gt_powersave(dev_priv);
		i915_gem_cleanup_engines(dev_priv);
	}
err_context:
	if (ret != -EIO)
		i915_gem_contexts_fini(dev_priv);
err_ggtt:
err_unlock:
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
	mutex_unlock(&dev_priv->drm.struct_mutex);

	if (ret != -EIO)
		i915_gem_cleanup_userptr(dev_priv);

5255
	if (ret == -EIO) {
5256 5257
		/*
		 * Allow engine initialisation to fail by marking the GPU as
5258 5259 5260
		 * wedged. But we only want to do this where the GPU is angry,
		 * for all other failure, such as an allocation failure, bail.
		 */
5261 5262 5263 5264
		if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
			DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
			i915_gem_set_wedged(dev_priv);
		}
5265
		ret = 0;
5266 5267
	}

5268
	i915_gem_drain_freed_objects(dev_priv);
5269
	return ret;
5270 5271
}

5272 5273 5274 5275 5276
void i915_gem_init_mmio(struct drm_i915_private *i915)
{
	i915_gem_sanitize(i915);
}

5277
void
5278
i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
5279
{
5280
	struct intel_engine_cs *engine;
5281
	enum intel_engine_id id;
5282

5283
	for_each_engine(engine, dev_priv, id)
5284
		dev_priv->gt.cleanup_engine(engine);
5285 5286
}

5287 5288 5289
void
i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
5290
	int i;
5291 5292 5293 5294

	if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
	    !IS_CHERRYVIEW(dev_priv))
		dev_priv->num_fence_regs = 32;
5295 5296 5297
	else if (INTEL_INFO(dev_priv)->gen >= 4 ||
		 IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
		 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
5298 5299 5300 5301
		dev_priv->num_fence_regs = 16;
	else
		dev_priv->num_fence_regs = 8;

5302
	if (intel_vgpu_active(dev_priv))
5303 5304 5305 5306
		dev_priv->num_fence_regs =
				I915_READ(vgtif_reg(avail_rs.fence_num));

	/* Initialize fence registers to zero */
5307 5308 5309 5310 5311 5312 5313
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
		struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];

		fence->i915 = dev_priv;
		fence->id = i;
		list_add_tail(&fence->link, &dev_priv->mm.fence_list);
	}
5314
	i915_gem_restore_fences(dev_priv);
5315

5316
	i915_gem_detect_bit_6_swizzle(dev_priv);
5317 5318
}

5319 5320 5321 5322 5323 5324 5325 5326 5327 5328 5329 5330 5331 5332 5333 5334
static void i915_gem_init__mm(struct drm_i915_private *i915)
{
	spin_lock_init(&i915->mm.object_stat_lock);
	spin_lock_init(&i915->mm.obj_lock);
	spin_lock_init(&i915->mm.free_lock);

	init_llist_head(&i915->mm.free_list);

	INIT_LIST_HEAD(&i915->mm.unbound_list);
	INIT_LIST_HEAD(&i915->mm.bound_list);
	INIT_LIST_HEAD(&i915->mm.fence_list);
	INIT_LIST_HEAD(&i915->mm.userfault_list);

	INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
}

5335
int
5336
i915_gem_load_init(struct drm_i915_private *dev_priv)
5337
{
5338
	int err = -ENOMEM;
5339

5340 5341
	dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
	if (!dev_priv->objects)
5342 5343
		goto err_out;

5344 5345
	dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
	if (!dev_priv->vmas)
5346 5347
		goto err_objects;

5348 5349 5350 5351
	dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0);
	if (!dev_priv->luts)
		goto err_vmas;

5352 5353 5354
	dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
					SLAB_HWCACHE_ALIGN |
					SLAB_RECLAIM_ACCOUNT |
5355
					SLAB_TYPESAFE_BY_RCU);
5356
	if (!dev_priv->requests)
5357
		goto err_luts;
5358

5359 5360 5361 5362 5363 5364
	dev_priv->dependencies = KMEM_CACHE(i915_dependency,
					    SLAB_HWCACHE_ALIGN |
					    SLAB_RECLAIM_ACCOUNT);
	if (!dev_priv->dependencies)
		goto err_requests;

5365 5366 5367 5368
	dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
	if (!dev_priv->priorities)
		goto err_dependencies;

5369 5370
	mutex_lock(&dev_priv->drm.struct_mutex);
	INIT_LIST_HEAD(&dev_priv->gt.timelines);
5371
	err = i915_gem_timeline_init__global(dev_priv);
5372 5373
	mutex_unlock(&dev_priv->drm.struct_mutex);
	if (err)
5374
		goto err_priorities;
5375

5376
	i915_gem_init__mm(dev_priv);
5377

5378
	INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
5379
			  i915_gem_retire_work_handler);
5380
	INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
5381
			  i915_gem_idle_work_handler);
5382
	init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
5383
	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
5384

5385 5386
	atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);

5387
	spin_lock_init(&dev_priv->fb_tracking.lock);
5388

M
Matthew Auld 已提交
5389 5390 5391 5392
	err = i915_gemfs_init(dev_priv);
	if (err)
		DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);

5393 5394
	return 0;

5395 5396
err_priorities:
	kmem_cache_destroy(dev_priv->priorities);
5397 5398
err_dependencies:
	kmem_cache_destroy(dev_priv->dependencies);
5399 5400
err_requests:
	kmem_cache_destroy(dev_priv->requests);
5401 5402
err_luts:
	kmem_cache_destroy(dev_priv->luts);
5403 5404 5405 5406 5407 5408
err_vmas:
	kmem_cache_destroy(dev_priv->vmas);
err_objects:
	kmem_cache_destroy(dev_priv->objects);
err_out:
	return err;
5409
}
5410

5411
void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
5412
{
5413
	i915_gem_drain_freed_objects(dev_priv);
5414
	WARN_ON(!llist_empty(&dev_priv->mm.free_list));
5415
	WARN_ON(dev_priv->mm.object_count);
5416

5417 5418 5419 5420 5421
	mutex_lock(&dev_priv->drm.struct_mutex);
	i915_gem_timeline_fini(&dev_priv->gt.global_timeline);
	WARN_ON(!list_empty(&dev_priv->gt.timelines));
	mutex_unlock(&dev_priv->drm.struct_mutex);

5422
	kmem_cache_destroy(dev_priv->priorities);
5423
	kmem_cache_destroy(dev_priv->dependencies);
5424
	kmem_cache_destroy(dev_priv->requests);
5425
	kmem_cache_destroy(dev_priv->luts);
5426 5427
	kmem_cache_destroy(dev_priv->vmas);
	kmem_cache_destroy(dev_priv->objects);
5428 5429 5430

	/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
	rcu_barrier();
M
Matthew Auld 已提交
5431 5432

	i915_gemfs_fini(dev_priv);
5433 5434
}

5435 5436
int i915_gem_freeze(struct drm_i915_private *dev_priv)
{
5437 5438 5439
	/* Discard all purgeable objects, let userspace recover those as
	 * required after resuming.
	 */
5440 5441 5442 5443 5444
	i915_gem_shrink_all(dev_priv);

	return 0;
}

5445 5446 5447
int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
{
	struct drm_i915_gem_object *obj;
5448 5449 5450 5451 5452
	struct list_head *phases[] = {
		&dev_priv->mm.unbound_list,
		&dev_priv->mm.bound_list,
		NULL
	}, **p;
5453 5454 5455 5456 5457 5458 5459 5460 5461 5462

	/* Called just before we write the hibernation image.
	 *
	 * We need to update the domain tracking to reflect that the CPU
	 * will be accessing all the pages to create and restore from the
	 * hibernation, and so upon restoration those pages will be in the
	 * CPU domain.
	 *
	 * To make sure the hibernation image contains the latest state,
	 * we update that state just before writing out the image.
5463 5464
	 *
	 * To try and reduce the hibernation image, we manually shrink
5465
	 * the objects as well, see i915_gem_freeze()
5466 5467
	 */

5468
	i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND);
5469
	i915_gem_drain_freed_objects(dev_priv);
5470

5471
	spin_lock(&dev_priv->mm.obj_lock);
5472
	for (p = phases; *p; p++) {
5473
		list_for_each_entry(obj, *p, mm.link)
5474
			__start_cpu_write(obj);
5475
	}
5476
	spin_unlock(&dev_priv->mm.obj_lock);
5477 5478 5479 5480

	return 0;
}

5481
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5482
{
5483
	struct drm_i915_file_private *file_priv = file->driver_priv;
5484
	struct drm_i915_gem_request *request;
5485 5486 5487 5488 5489

	/* Clean up our request list when the client is going away, so that
	 * later retire_requests won't dereference our soon-to-be-gone
	 * file_priv.
	 */
5490
	spin_lock(&file_priv->mm.lock);
5491
	list_for_each_entry(request, &file_priv->mm.request_list, client_link)
5492
		request->file_priv = NULL;
5493
	spin_unlock(&file_priv->mm.lock);
5494 5495
}

5496
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
5497 5498
{
	struct drm_i915_file_private *file_priv;
5499
	int ret;
5500

5501
	DRM_DEBUG("\n");
5502 5503 5504 5505 5506 5507

	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
	if (!file_priv)
		return -ENOMEM;

	file->driver_priv = file_priv;
5508
	file_priv->dev_priv = i915;
5509
	file_priv->file = file;
5510 5511 5512 5513

	spin_lock_init(&file_priv->mm.lock);
	INIT_LIST_HEAD(&file_priv->mm.request_list);

5514
	file_priv->bsd_engine = -1;
5515

5516
	ret = i915_gem_context_open(i915, file);
5517 5518
	if (ret)
		kfree(file_priv);
5519

5520
	return ret;
5521 5522
}

5523 5524
/**
 * i915_gem_track_fb - update frontbuffer tracking
5525 5526 5527
 * @old: current GEM buffer for the frontbuffer slots
 * @new: new GEM buffer for the frontbuffer slots
 * @frontbuffer_bits: bitmask of frontbuffer slots
5528 5529 5530 5531
 *
 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
 * from @old and setting them in @new. Both @old and @new can be NULL.
 */
5532 5533 5534 5535
void i915_gem_track_fb(struct drm_i915_gem_object *old,
		       struct drm_i915_gem_object *new,
		       unsigned frontbuffer_bits)
{
5536 5537 5538 5539 5540 5541 5542 5543 5544
	/* Control of individual bits within the mask are guarded by
	 * the owning plane->mutex, i.e. we can never see concurrent
	 * manipulation of individual bits. But since the bitfield as a whole
	 * is updated using RMW, we need to use atomics in order to update
	 * the bits.
	 */
	BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
		     sizeof(atomic_t) * BITS_PER_BYTE);

5545
	if (old) {
5546 5547
		WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
		atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
5548 5549 5550
	}

	if (new) {
5551 5552
		WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
		atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
5553 5554 5555
	}
}

5556 5557
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
5558
i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
5559 5560 5561
			         const void *data, size_t size)
{
	struct drm_i915_gem_object *obj;
5562 5563 5564
	struct file *file;
	size_t offset;
	int err;
5565

5566
	obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
5567
	if (IS_ERR(obj))
5568 5569
		return obj;

5570
	GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
5571

5572 5573 5574 5575 5576 5577
	file = obj->base.filp;
	offset = 0;
	do {
		unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
		struct page *page;
		void *pgdata, *vaddr;
5578

5579 5580 5581 5582 5583
		err = pagecache_write_begin(file, file->f_mapping,
					    offset, len, 0,
					    &page, &pgdata);
		if (err < 0)
			goto fail;
5584

5585 5586 5587 5588 5589 5590 5591 5592 5593 5594 5595 5596 5597 5598
		vaddr = kmap(page);
		memcpy(vaddr, data, len);
		kunmap(page);

		err = pagecache_write_end(file, file->f_mapping,
					  offset, len, len,
					  page, pgdata);
		if (err < 0)
			goto fail;

		size -= len;
		data += len;
		offset += len;
	} while (size);
5599 5600 5601 5602

	return obj;

fail:
5603
	i915_gem_object_put(obj);
5604
	return ERR_PTR(err);
5605
}
5606 5607 5608 5609 5610 5611

struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
		       unsigned int n,
		       unsigned int *offset)
{
C
Chris Wilson 已提交
5612
	struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
5613 5614 5615 5616 5617
	struct scatterlist *sg;
	unsigned int idx, count;

	might_sleep();
	GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
C
Chris Wilson 已提交
5618
	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
5619 5620 5621 5622 5623 5624 5625 5626 5627 5628 5629 5630 5631 5632 5633 5634 5635 5636 5637 5638 5639 5640 5641 5642 5643 5644 5645 5646 5647 5648 5649 5650 5651 5652 5653 5654 5655 5656 5657 5658 5659 5660 5661 5662 5663 5664 5665 5666 5667 5668 5669 5670 5671 5672 5673 5674 5675 5676 5677 5678 5679 5680 5681 5682 5683 5684 5685 5686 5687 5688 5689 5690 5691 5692 5693 5694 5695 5696 5697 5698 5699 5700 5701 5702 5703 5704 5705 5706 5707 5708 5709 5710 5711 5712 5713 5714 5715 5716 5717 5718 5719 5720 5721 5722 5723 5724 5725 5726 5727 5728 5729 5730 5731 5732 5733 5734 5735 5736 5737 5738 5739 5740 5741 5742

	/* As we iterate forward through the sg, we record each entry in a
	 * radixtree for quick repeated (backwards) lookups. If we have seen
	 * this index previously, we will have an entry for it.
	 *
	 * Initial lookup is O(N), but this is amortized to O(1) for
	 * sequential page access (where each new request is consecutive
	 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
	 * i.e. O(1) with a large constant!
	 */
	if (n < READ_ONCE(iter->sg_idx))
		goto lookup;

	mutex_lock(&iter->lock);

	/* We prefer to reuse the last sg so that repeated lookup of this
	 * (or the subsequent) sg are fast - comparing against the last
	 * sg is faster than going through the radixtree.
	 */

	sg = iter->sg_pos;
	idx = iter->sg_idx;
	count = __sg_page_count(sg);

	while (idx + count <= n) {
		unsigned long exception, i;
		int ret;

		/* If we cannot allocate and insert this entry, or the
		 * individual pages from this range, cancel updating the
		 * sg_idx so that on this lookup we are forced to linearly
		 * scan onwards, but on future lookups we will try the
		 * insertion again (in which case we need to be careful of
		 * the error return reporting that we have already inserted
		 * this index).
		 */
		ret = radix_tree_insert(&iter->radix, idx, sg);
		if (ret && ret != -EEXIST)
			goto scan;

		exception =
			RADIX_TREE_EXCEPTIONAL_ENTRY |
			idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
		for (i = 1; i < count; i++) {
			ret = radix_tree_insert(&iter->radix, idx + i,
						(void *)exception);
			if (ret && ret != -EEXIST)
				goto scan;
		}

		idx += count;
		sg = ____sg_next(sg);
		count = __sg_page_count(sg);
	}

scan:
	iter->sg_pos = sg;
	iter->sg_idx = idx;

	mutex_unlock(&iter->lock);

	if (unlikely(n < idx)) /* insertion completed by another thread */
		goto lookup;

	/* In case we failed to insert the entry into the radixtree, we need
	 * to look beyond the current sg.
	 */
	while (idx + count <= n) {
		idx += count;
		sg = ____sg_next(sg);
		count = __sg_page_count(sg);
	}

	*offset = n - idx;
	return sg;

lookup:
	rcu_read_lock();

	sg = radix_tree_lookup(&iter->radix, n);
	GEM_BUG_ON(!sg);

	/* If this index is in the middle of multi-page sg entry,
	 * the radixtree will contain an exceptional entry that points
	 * to the start of that range. We will return the pointer to
	 * the base page and the offset of this page within the
	 * sg entry's range.
	 */
	*offset = 0;
	if (unlikely(radix_tree_exception(sg))) {
		unsigned long base =
			(unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;

		sg = radix_tree_lookup(&iter->radix, base);
		GEM_BUG_ON(!sg);

		*offset = n - base;
	}

	rcu_read_unlock();

	return sg;
}

struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
{
	struct scatterlist *sg;
	unsigned int offset;

	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));

	sg = i915_gem_object_get_sg(obj, n, &offset);
	return nth_page(sg_page(sg), offset);
}

/* Like i915_gem_object_get_page(), but mark the returned page dirty */
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
			       unsigned int n)
{
	struct page *page;

	page = i915_gem_object_get_page(obj, n);
C
Chris Wilson 已提交
5743
	if (!obj->mm.dirty)
5744 5745 5746 5747 5748 5749 5750 5751 5752 5753 5754 5755 5756 5757 5758
		set_page_dirty(page);

	return page;
}

dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
				unsigned long n)
{
	struct scatterlist *sg;
	unsigned int offset;

	sg = i915_gem_object_get_sg(obj, n, &offset);
	return sg_dma_address(sg) + (offset << PAGE_SHIFT);
}
5759

5760 5761 5762 5763 5764 5765 5766 5767 5768 5769 5770 5771 5772 5773 5774 5775 5776 5777 5778 5779 5780 5781 5782 5783 5784 5785 5786 5787 5788 5789 5790 5791 5792 5793 5794
int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
{
	struct sg_table *pages;
	int err;

	if (align > obj->base.size)
		return -EINVAL;

	if (obj->ops == &i915_gem_phys_ops)
		return 0;

	if (obj->ops != &i915_gem_object_ops)
		return -EINVAL;

	err = i915_gem_object_unbind(obj);
	if (err)
		return err;

	mutex_lock(&obj->mm.lock);

	if (obj->mm.madv != I915_MADV_WILLNEED) {
		err = -EFAULT;
		goto err_unlock;
	}

	if (obj->mm.quirked) {
		err = -EFAULT;
		goto err_unlock;
	}

	if (obj->mm.mapping) {
		err = -EBUSY;
		goto err_unlock;
	}

5795 5796 5797 5798 5799 5800 5801 5802 5803 5804 5805
	pages = fetch_and_zero(&obj->mm.pages);
	if (pages) {
		struct drm_i915_private *i915 = to_i915(obj->base.dev);

		__i915_gem_object_reset_page_iter(obj);

		spin_lock(&i915->mm.obj_lock);
		list_del(&obj->mm.link);
		spin_unlock(&i915->mm.obj_lock);
	}

5806 5807
	obj->ops = &i915_gem_phys_ops;

5808
	err = ____i915_gem_object_get_pages(obj);
5809 5810 5811 5812 5813 5814 5815 5816 5817 5818 5819 5820 5821 5822 5823 5824 5825 5826 5827
	if (err)
		goto err_xfer;

	/* Perma-pin (until release) the physical set of pages */
	__i915_gem_object_pin_pages(obj);

	if (!IS_ERR_OR_NULL(pages))
		i915_gem_object_ops.put_pages(obj, pages);
	mutex_unlock(&obj->mm.lock);
	return 0;

err_xfer:
	obj->ops = &i915_gem_object_ops;
	obj->mm.pages = pages;
err_unlock:
	mutex_unlock(&obj->mm.lock);
	return err;
}

5828 5829
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/scatterlist.c"
5830
#include "selftests/mock_gem_device.c"
5831
#include "selftests/huge_gem_object.c"
M
Matthew Auld 已提交
5832
#include "selftests/huge_pages.c"
5833
#include "selftests/i915_gem_object.c"
5834
#include "selftests/i915_gem_coherency.c"
5835
#endif