i915_gem.c 135.8 KB
Newer Older
1
/*
2
 * Copyright © 2008-2015 Intel Corporation
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *
 */

28
#include <drm/drmP.h>
29
#include <drm/drm_vma_manager.h>
30
#include <drm/i915_drm.h>
31
#include "i915_drv.h"
32
#include "i915_vgpu.h"
C
Chris Wilson 已提交
33
#include "i915_trace.h"
34
#include "intel_drv.h"
35
#include "intel_frontbuffer.h"
36
#include "intel_mocs.h"
37
#include <linux/reservation.h>
38
#include <linux/shmem_fs.h>
39
#include <linux/slab.h>
40
#include <linux/swap.h>
J
Jesse Barnes 已提交
41
#include <linux/pci.h>
42
#include <linux/dma-buf.h>
43

44
static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
45
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
46
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
47

48 49 50 51 52 53
static bool cpu_cache_is_coherent(struct drm_device *dev,
				  enum i915_cache_level level)
{
	return HAS_LLC(dev) || level != I915_CACHE_NONE;
}

54 55
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
56 57 58
	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
		return false;

59 60 61 62 63 64
	if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
		return true;

	return obj->pin_display;
}

65
static int
66
insert_mappable_node(struct i915_ggtt *ggtt,
67 68 69
                     struct drm_mm_node *node, u32 size)
{
	memset(node, 0, sizeof(*node));
70 71 72
	return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node,
						   size, 0, -1,
						   0, ggtt->mappable_end,
73 74 75 76 77 78 79 80 81 82
						   DRM_MM_SEARCH_DEFAULT,
						   DRM_MM_CREATE_DEFAULT);
}

static void
remove_mappable_node(struct drm_mm_node *node)
{
	drm_mm_remove_node(node);
}

83 84
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
85
				  u64 size)
86
{
87
	spin_lock(&dev_priv->mm.object_stat_lock);
88 89
	dev_priv->mm.object_count++;
	dev_priv->mm.object_memory += size;
90
	spin_unlock(&dev_priv->mm.object_stat_lock);
91 92 93
}

static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
94
				     u64 size)
95
{
96
	spin_lock(&dev_priv->mm.object_stat_lock);
97 98
	dev_priv->mm.object_count--;
	dev_priv->mm.object_memory -= size;
99
	spin_unlock(&dev_priv->mm.object_stat_lock);
100 101
}

102
static int
103
i915_gem_wait_for_error(struct i915_gpu_error *error)
104 105 106
{
	int ret;

107 108
	might_sleep();

109
	if (!i915_reset_in_progress(error))
110 111
		return 0;

112 113 114 115 116
	/*
	 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
	 * userspace. If it takes that long something really bad is going on and
	 * we should simply try to bail out and fail as gracefully as possible.
	 */
117
	ret = wait_event_interruptible_timeout(error->reset_queue,
118
					       !i915_reset_in_progress(error),
119
					       I915_RESET_TIMEOUT);
120 121 122 123
	if (ret == 0) {
		DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
		return -EIO;
	} else if (ret < 0) {
124
		return ret;
125 126
	} else {
		return 0;
127
	}
128 129
}

130
int i915_mutex_lock_interruptible(struct drm_device *dev)
131
{
132
	struct drm_i915_private *dev_priv = to_i915(dev);
133 134
	int ret;

135
	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
136 137 138 139 140 141 142 143 144
	if (ret)
		return ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	return 0;
}
145

146 147
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
148
			    struct drm_file *file)
149
{
150
	struct drm_i915_private *dev_priv = to_i915(dev);
151
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
152
	struct drm_i915_gem_get_aperture *args = data;
153
	struct i915_vma *vma;
154
	size_t pinned;
155

156
	pinned = 0;
157
	mutex_lock(&dev->struct_mutex);
158
	list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
159
		if (i915_vma_is_pinned(vma))
160
			pinned += vma->node.size;
161
	list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
162
		if (i915_vma_is_pinned(vma))
163
			pinned += vma->node.size;
164
	mutex_unlock(&dev->struct_mutex);
165

166
	args->aper_size = ggtt->base.total;
167
	args->aper_available_size = args->aper_size - pinned;
168

169 170 171
	return 0;
}

172
static struct sg_table *
173
i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
174
{
175
	struct address_space *mapping = obj->base.filp->f_mapping;
176 177 178 179
	char *vaddr = obj->phys_handle->vaddr;
	struct sg_table *st;
	struct scatterlist *sg;
	int i;
180

181
	if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
182
		return ERR_PTR(-EINVAL);
183 184 185 186 187 188 189

	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
		struct page *page;
		char *src;

		page = shmem_read_mapping_page(mapping, i);
		if (IS_ERR(page))
190
			return ERR_CAST(page);
191 192 193 194 195 196

		src = kmap_atomic(page);
		memcpy(vaddr, src, PAGE_SIZE);
		drm_clflush_virt_range(vaddr, PAGE_SIZE);
		kunmap_atomic(src);

197
		put_page(page);
198 199 200
		vaddr += PAGE_SIZE;
	}

201
	i915_gem_chipset_flush(to_i915(obj->base.dev));
202 203 204

	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
205
		return ERR_PTR(-ENOMEM);
206 207 208

	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
		kfree(st);
209
		return ERR_PTR(-ENOMEM);
210 211 212 213 214
	}

	sg = st->sgl;
	sg->offset = 0;
	sg->length = obj->base.size;
215

216 217 218
	sg_dma_address(sg) = obj->phys_handle->busaddr;
	sg_dma_len(sg) = obj->base.size;

219
	return st;
220 221 222
}

static void
223
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj)
224
{
C
Chris Wilson 已提交
225
	GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
226

C
Chris Wilson 已提交
227 228
	if (obj->mm.madv == I915_MADV_DONTNEED)
		obj->mm.dirty = false;
229

230 231 232 233 234 235 236 237 238 239 240 241 242
	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
		i915_gem_clflush_object(obj, false);

	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}

static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
			       struct sg_table *pages)
{
	__i915_gem_object_release_shmem(obj);

C
Chris Wilson 已提交
243
	if (obj->mm.dirty) {
244
		struct address_space *mapping = obj->base.filp->f_mapping;
245
		char *vaddr = obj->phys_handle->vaddr;
246 247 248
		int i;

		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
249 250 251 252 253 254 255 256 257 258 259 260 261
			struct page *page;
			char *dst;

			page = shmem_read_mapping_page(mapping, i);
			if (IS_ERR(page))
				continue;

			dst = kmap_atomic(page);
			drm_clflush_virt_range(vaddr, PAGE_SIZE);
			memcpy(dst, vaddr, PAGE_SIZE);
			kunmap_atomic(dst);

			set_page_dirty(page);
C
Chris Wilson 已提交
262
			if (obj->mm.madv == I915_MADV_WILLNEED)
263
				mark_page_accessed(page);
264
			put_page(page);
265 266
			vaddr += PAGE_SIZE;
		}
C
Chris Wilson 已提交
267
		obj->mm.dirty = false;
268 269
	}

270 271
	sg_free_table(pages);
	kfree(pages);
272 273 274 275 276 277
}

static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{
	drm_pci_free(obj->base.dev, obj->phys_handle);
C
Chris Wilson 已提交
278
	i915_gem_object_unpin_pages(obj);
279 280 281 282 283 284 285 286
}

static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
	.get_pages = i915_gem_object_get_pages_phys,
	.put_pages = i915_gem_object_put_pages_phys,
	.release = i915_gem_object_release_phys,
};

287
int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
288 289 290
{
	struct i915_vma *vma;
	LIST_HEAD(still_in_list);
291 292 293
	int ret;

	lockdep_assert_held(&obj->base.dev->struct_mutex);
294

295 296 297 298
	/* Closed vma are removed from the obj->vma_list - but they may
	 * still have an active binding on the object. To remove those we
	 * must wait for all rendering to complete to the object (as unbinding
	 * must anyway), and retire the requests.
299
	 */
300 301 302 303 304 305
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   I915_WAIT_ALL,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
306 307 308 309 310
	if (ret)
		return ret;

	i915_gem_retire_requests(to_i915(obj->base.dev));

311 312 313 314 315 316 317 318 319 320 321 322 323
	while ((vma = list_first_entry_or_null(&obj->vma_list,
					       struct i915_vma,
					       obj_link))) {
		list_move_tail(&vma->obj_link, &still_in_list);
		ret = i915_vma_unbind(vma);
		if (ret)
			break;
	}
	list_splice(&still_in_list, &obj->vma_list);

	return ret;
}

324 325 326 327 328
static long
i915_gem_object_wait_fence(struct dma_fence *fence,
			   unsigned int flags,
			   long timeout,
			   struct intel_rps_client *rps)
329
{
330
	struct drm_i915_gem_request *rq;
331

332
	BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
333

334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
		return timeout;

	if (!dma_fence_is_i915(fence))
		return dma_fence_wait_timeout(fence,
					      flags & I915_WAIT_INTERRUPTIBLE,
					      timeout);

	rq = to_request(fence);
	if (i915_gem_request_completed(rq))
		goto out;

	/* This client is about to stall waiting for the GPU. In many cases
	 * this is undesirable and limits the throughput of the system, as
	 * many clients cannot continue processing user input/output whilst
	 * blocked. RPS autotuning may take tens of milliseconds to respond
	 * to the GPU load and thus incurs additional latency for the client.
	 * We can circumvent that by promoting the GPU frequency to maximum
	 * before we wait. This makes the GPU throttle up much more quickly
	 * (good for benchmarks and user experience, e.g. window animations),
	 * but at a cost of spending more power processing the workload
	 * (bad for battery). Not all clients even want their results
	 * immediately and for them we should just let the GPU select its own
	 * frequency to maximise efficiency. To prevent a single client from
	 * forcing the clocks too high for the whole system, we only allow
	 * each client to waitboost once in a busy period.
	 */
	if (rps) {
		if (INTEL_GEN(rq->i915) >= 6)
			gen6_rps_boost(rq->i915, rps, rq->emitted_jiffies);
		else
			rps = NULL;
366 367
	}

368 369 370 371 372 373
	timeout = i915_wait_request(rq, flags, timeout);

out:
	if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
		i915_gem_request_retire_upto(rq);

374
	if (rps && rq->global_seqno == intel_engine_last_submit(rq->engine)) {
375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403
		/* The GPU is now idle and this client has stalled.
		 * Since no other client has submitted a request in the
		 * meantime, assume that this client is the only one
		 * supplying work to the GPU but is unable to keep that
		 * work supplied because it is waiting. Since the GPU is
		 * then never kept fully busy, RPS autoclocking will
		 * keep the clocks relatively low, causing further delays.
		 * Compensate by giving the synchronous client credit for
		 * a waitboost next time.
		 */
		spin_lock(&rq->i915->rps.client_lock);
		list_del_init(&rps->link);
		spin_unlock(&rq->i915->rps.client_lock);
	}

	return timeout;
}

static long
i915_gem_object_wait_reservation(struct reservation_object *resv,
				 unsigned int flags,
				 long timeout,
				 struct intel_rps_client *rps)
{
	struct dma_fence *excl;

	if (flags & I915_WAIT_ALL) {
		struct dma_fence **shared;
		unsigned int count, i;
404 405
		int ret;

406 407
		ret = reservation_object_get_fences_rcu(resv,
							&excl, &count, &shared);
408 409 410
		if (ret)
			return ret;

411 412 413 414 415 416
		for (i = 0; i < count; i++) {
			timeout = i915_gem_object_wait_fence(shared[i],
							     flags, timeout,
							     rps);
			if (timeout <= 0)
				break;
417

418 419 420 421 422 423 424 425
			dma_fence_put(shared[i]);
		}

		for (; i < count; i++)
			dma_fence_put(shared[i]);
		kfree(shared);
	} else {
		excl = reservation_object_get_excl_rcu(resv);
426 427
	}

428 429 430 431 432 433
	if (excl && timeout > 0)
		timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);

	dma_fence_put(excl);

	return timeout;
434 435
}

436 437 438 439 440 441
/**
 * Waits for rendering to the object to be completed
 * @obj: i915 gem object
 * @flags: how to wait (under a lock, for all rendering or just for writes etc)
 * @timeout: how long to wait
 * @rps: client (user process) to charge for any waitboosting
442
 */
443 444 445 446 447
int
i915_gem_object_wait(struct drm_i915_gem_object *obj,
		     unsigned int flags,
		     long timeout,
		     struct intel_rps_client *rps)
448
{
449 450 451 452 453 454 455
	might_sleep();
#if IS_ENABLED(CONFIG_LOCKDEP)
	GEM_BUG_ON(debug_locks &&
		   !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
		   !!(flags & I915_WAIT_LOCKED));
#endif
	GEM_BUG_ON(timeout < 0);
456

457 458 459
	timeout = i915_gem_object_wait_reservation(obj->resv,
						   flags, timeout,
						   rps);
460
	return timeout < 0 ? timeout : 0;
461 462 463 464 465 466 467 468 469
}

static struct intel_rps_client *to_rps_client(struct drm_file *file)
{
	struct drm_i915_file_private *fpriv = file->driver_priv;

	return &fpriv->rps;
}

470 471 472 473 474
int
i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
			    int align)
{
	drm_dma_handle_t *phys;
475
	int ret;
476 477 478 479 480 481 482 483

	if (obj->phys_handle) {
		if ((unsigned long)obj->phys_handle->vaddr & (align -1))
			return -EBUSY;

		return 0;
	}

C
Chris Wilson 已提交
484
	if (obj->mm.madv != I915_MADV_WILLNEED)
485 486 487 488 489
		return -EFAULT;

	if (obj->base.filp == NULL)
		return -EINVAL;

C
Chris Wilson 已提交
490 491 492 493
	ret = i915_gem_object_unbind(obj);
	if (ret)
		return ret;

494
	__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
495 496
	if (obj->mm.pages)
		return -EBUSY;
497

498 499 500 501 502 503
	/* create a new object */
	phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
	if (!phys)
		return -ENOMEM;

	obj->phys_handle = phys;
504 505
	obj->ops = &i915_gem_phys_ops;

C
Chris Wilson 已提交
506
	return i915_gem_object_pin_pages(obj);
507 508 509 510 511
}

static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pwrite *args,
512
		     struct drm_file *file)
513 514 515
{
	struct drm_device *dev = obj->base.dev;
	void *vaddr = obj->phys_handle->vaddr + args->offset;
516
	char __user *user_data = u64_to_user_ptr(args->data_ptr);
517
	int ret;
518 519 520 521

	/* We manually control the domain here and pretend that it
	 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
	 */
522 523 524 525 526 527
	lockdep_assert_held(&obj->base.dev->struct_mutex);
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   I915_WAIT_ALL,
				   MAX_SCHEDULE_TIMEOUT,
528
				   to_rps_client(file));
529 530
	if (ret)
		return ret;
531

532
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
533 534 535 536 537 538 539 540 541 542
	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
		unsigned long unwritten;

		/* The physical object once assigned is fixed for the lifetime
		 * of the obj, so we can safely drop the lock and continue
		 * to access vaddr.
		 */
		mutex_unlock(&dev->struct_mutex);
		unwritten = copy_from_user(vaddr, user_data, args->size);
		mutex_lock(&dev->struct_mutex);
543 544 545 546
		if (unwritten) {
			ret = -EFAULT;
			goto out;
		}
547 548
	}

549
	drm_clflush_virt_range(vaddr, args->size);
550
	i915_gem_chipset_flush(to_i915(dev));
551 552

out:
553
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
554
	return ret;
555 556
}

557 558
void *i915_gem_object_alloc(struct drm_device *dev)
{
559
	struct drm_i915_private *dev_priv = to_i915(dev);
560
	return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
561 562 563 564
}

void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
565
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
566
	kmem_cache_free(dev_priv->objects, obj);
567 568
}

569 570 571 572 573
static int
i915_gem_create(struct drm_file *file,
		struct drm_device *dev,
		uint64_t size,
		uint32_t *handle_p)
574
{
575
	struct drm_i915_gem_object *obj;
576 577
	int ret;
	u32 handle;
578

579
	size = roundup(size, PAGE_SIZE);
580 581
	if (size == 0)
		return -EINVAL;
582 583

	/* Allocate the new object */
584
	obj = i915_gem_object_create(dev, size);
585 586
	if (IS_ERR(obj))
		return PTR_ERR(obj);
587

588
	ret = drm_gem_handle_create(file, &obj->base, &handle);
589
	/* drop reference from allocate - handle holds it now */
C
Chris Wilson 已提交
590
	i915_gem_object_put(obj);
591 592
	if (ret)
		return ret;
593

594
	*handle_p = handle;
595 596 597
	return 0;
}

598 599 600 601 602 603
int
i915_gem_dumb_create(struct drm_file *file,
		     struct drm_device *dev,
		     struct drm_mode_create_dumb *args)
{
	/* have to work out size/pitch and return them */
604
	args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
605 606
	args->size = args->pitch * args->height;
	return i915_gem_create(file, dev,
607
			       args->size, &args->handle);
608 609 610 611
}

/**
 * Creates a new mm object and returns a handle to it.
612 613 614
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
615 616 617 618 619 620
 */
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file)
{
	struct drm_i915_gem_create *args = data;
621

622 623
	i915_gem_flush_free_objects(to_i915(dev));

624
	return i915_gem_create(file, dev,
625
			       args->size, &args->handle);
626 627
}

628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
			const char *gpu_vaddr, int gpu_offset,
			int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_to_user(cpu_vaddr + cpu_offset,
				     gpu_vaddr + swizzled_gpu_offset,
				     this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

654
static inline int
655 656
__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
			  const char __user *cpu_vaddr,
657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679
			  int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
				       cpu_vaddr + cpu_offset,
				       this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

680 681 682 683 684 685
/*
 * Pins the specified object's pages and synchronizes the object with
 * GPU accesses. Sets needs_clflush to non-zero if the caller should
 * flush the object from the CPU cache.
 */
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
686
				    unsigned int *needs_clflush)
687 688 689
{
	int ret;

690
	lockdep_assert_held(&obj->base.dev->struct_mutex);
691

692
	*needs_clflush = 0;
693 694
	if (!i915_gem_object_has_struct_page(obj))
		return -ENODEV;
695

696 697 698 699 700
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
701 702 703
	if (ret)
		return ret;

C
Chris Wilson 已提交
704
	ret = i915_gem_object_pin_pages(obj);
705 706 707
	if (ret)
		return ret;

708 709
	i915_gem_object_flush_gtt_write_domain(obj);

710 711 712 713 714 715
	/* If we're not in the cpu read domain, set ourself into the gtt
	 * read domain and manually flush cachelines (if required). This
	 * optimizes for the case when the gpu will dirty the data
	 * anyway again before the next pread happens.
	 */
	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
716 717
		*needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
							obj->cache_level);
718 719 720

	if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
		ret = i915_gem_object_set_to_cpu_domain(obj, false);
721 722 723
		if (ret)
			goto err_unpin;

724
		*needs_clflush = 0;
725 726
	}

727
	/* return with the pages pinned */
728
	return 0;
729 730 731 732

err_unpin:
	i915_gem_object_unpin_pages(obj);
	return ret;
733 734 735 736 737 738 739
}

int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
				     unsigned int *needs_clflush)
{
	int ret;

740 741
	lockdep_assert_held(&obj->base.dev->struct_mutex);

742 743 744 745
	*needs_clflush = 0;
	if (!i915_gem_object_has_struct_page(obj))
		return -ENODEV;

746 747 748 749 750 751
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   I915_WAIT_ALL,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
752 753 754
	if (ret)
		return ret;

C
Chris Wilson 已提交
755
	ret = i915_gem_object_pin_pages(obj);
756 757 758
	if (ret)
		return ret;

759 760
	i915_gem_object_flush_gtt_write_domain(obj);

761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777
	/* If we're not in the cpu write domain, set ourself into the
	 * gtt write domain and manually flush cachelines (as required).
	 * This optimizes for the case when the gpu will use the data
	 * right away and we therefore have to clflush anyway.
	 */
	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
		*needs_clflush |= cpu_write_needs_clflush(obj) << 1;

	/* Same trick applies to invalidate partially written cachelines read
	 * before writing.
	 */
	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
		*needs_clflush |= !cpu_cache_is_coherent(obj->base.dev,
							 obj->cache_level);

	if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
		ret = i915_gem_object_set_to_cpu_domain(obj, true);
778 779 780
		if (ret)
			goto err_unpin;

781 782 783 784 785 786 787
		*needs_clflush = 0;
	}

	if ((*needs_clflush & CLFLUSH_AFTER) == 0)
		obj->cache_dirty = true;

	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
C
Chris Wilson 已提交
788
	obj->mm.dirty = true;
789
	/* return with the pages pinned */
790
	return 0;
791 792 793 794

err_unpin:
	i915_gem_object_unpin_pages(obj);
	return ret;
795 796
}

797 798 799 800
static void
shmem_clflush_swizzled_range(char *addr, unsigned long length,
			     bool swizzled)
{
801
	if (unlikely(swizzled)) {
802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818
		unsigned long start = (unsigned long) addr;
		unsigned long end = (unsigned long) addr + length;

		/* For swizzling simply ensure that we always flush both
		 * channels. Lame, but simple and it works. Swizzled
		 * pwrite/pread is far from a hotpath - current userspace
		 * doesn't use it at all. */
		start = round_down(start, 128);
		end = round_up(end, 128);

		drm_clflush_virt_range((void *)start, end - start);
	} else {
		drm_clflush_virt_range(addr, length);
	}

}

819 820 821
/* Only difference to the fast-path function is that this can handle bit17
 * and uses non-atomic copy and kmap functions. */
static int
822
shmem_pread_slow(struct page *page, int offset, int length,
823 824 825 826 827 828 829 830
		 char __user *user_data,
		 bool page_do_bit17_swizzling, bool needs_clflush)
{
	char *vaddr;
	int ret;

	vaddr = kmap(page);
	if (needs_clflush)
831
		shmem_clflush_swizzled_range(vaddr + offset, length,
832
					     page_do_bit17_swizzling);
833 834

	if (page_do_bit17_swizzling)
835
		ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
836
	else
837
		ret = __copy_to_user(user_data, vaddr + offset, length);
838 839
	kunmap(page);

840
	return ret ? - EFAULT : 0;
841 842
}

843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918
static int
shmem_pread(struct page *page, int offset, int length, char __user *user_data,
	    bool page_do_bit17_swizzling, bool needs_clflush)
{
	int ret;

	ret = -ENODEV;
	if (!page_do_bit17_swizzling) {
		char *vaddr = kmap_atomic(page);

		if (needs_clflush)
			drm_clflush_virt_range(vaddr + offset, length);
		ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
		kunmap_atomic(vaddr);
	}
	if (ret == 0)
		return 0;

	return shmem_pread_slow(page, offset, length, user_data,
				page_do_bit17_swizzling, needs_clflush);
}

static int
i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pread *args)
{
	char __user *user_data;
	u64 remain;
	unsigned int obj_do_bit17_swizzling;
	unsigned int needs_clflush;
	unsigned int idx, offset;
	int ret;

	obj_do_bit17_swizzling = 0;
	if (i915_gem_object_needs_bit17_swizzle(obj))
		obj_do_bit17_swizzling = BIT(17);

	ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
	if (ret)
		return ret;

	ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
	mutex_unlock(&obj->base.dev->struct_mutex);
	if (ret)
		return ret;

	remain = args->size;
	user_data = u64_to_user_ptr(args->data_ptr);
	offset = offset_in_page(args->offset);
	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
		struct page *page = i915_gem_object_get_page(obj, idx);
		int length;

		length = remain;
		if (offset + length > PAGE_SIZE)
			length = PAGE_SIZE - offset;

		ret = shmem_pread(page, offset, length, user_data,
				  page_to_phys(page) & obj_do_bit17_swizzling,
				  needs_clflush);
		if (ret)
			break;

		remain -= length;
		user_data += length;
		offset = 0;
	}

	i915_gem_obj_finish_shmem_access(obj);
	return ret;
}

static inline bool
gtt_user_read(struct io_mapping *mapping,
	      loff_t base, int offset,
	      char __user *user_data, int length)
919 920
{
	void *vaddr;
921
	unsigned long unwritten;
922 923

	/* We can use the cpu mem copy function because this is X86. */
924 925 926 927 928 929 930 931 932
	vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
	unwritten = __copy_to_user_inatomic(user_data, vaddr + offset, length);
	io_mapping_unmap_atomic(vaddr);
	if (unwritten) {
		vaddr = (void __force *)
			io_mapping_map_wc(mapping, base, PAGE_SIZE);
		unwritten = copy_to_user(user_data, vaddr + offset, length);
		io_mapping_unmap(vaddr);
	}
933 934 935 936
	return unwritten;
}

static int
937 938
i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
		   const struct drm_i915_gem_pread *args)
939
{
940 941
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	struct i915_ggtt *ggtt = &i915->ggtt;
942
	struct drm_mm_node node;
943 944 945
	struct i915_vma *vma;
	void __user *user_data;
	u64 remain, offset;
946 947
	int ret;

948 949 950 951 952 953 954
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
	if (ret)
		return ret;

	intel_runtime_pm_get(i915);
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
				       PIN_MAPPABLE | PIN_NONBLOCK);
955 956 957
	if (!IS_ERR(vma)) {
		node.start = i915_ggtt_offset(vma);
		node.allocated = false;
958
		ret = i915_vma_put_fence(vma);
959 960 961 962 963
		if (ret) {
			i915_vma_unpin(vma);
			vma = ERR_PTR(ret);
		}
	}
C
Chris Wilson 已提交
964
	if (IS_ERR(vma)) {
965
		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
966
		if (ret)
967 968
			goto out_unlock;
		GEM_BUG_ON(!node.allocated);
969 970 971 972 973 974
	}

	ret = i915_gem_object_set_to_gtt_domain(obj, false);
	if (ret)
		goto out_unpin;

975
	mutex_unlock(&i915->drm.struct_mutex);
976

977 978 979
	user_data = u64_to_user_ptr(args->data_ptr);
	remain = args->size;
	offset = args->offset;
980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995

	while (remain > 0) {
		/* Operation in this page
		 *
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
		 */
		u32 page_base = node.start;
		unsigned page_offset = offset_in_page(offset);
		unsigned page_length = PAGE_SIZE - page_offset;
		page_length = remain < page_length ? remain : page_length;
		if (node.allocated) {
			wmb();
			ggtt->base.insert_page(&ggtt->base,
					       i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
996
					       node.start, I915_CACHE_NONE, 0);
997 998 999 1000
			wmb();
		} else {
			page_base += offset & PAGE_MASK;
		}
1001 1002 1003

		if (gtt_user_read(&ggtt->mappable, page_base, page_offset,
				  user_data, page_length)) {
1004 1005 1006 1007 1008 1009 1010 1011 1012
			ret = -EFAULT;
			break;
		}

		remain -= page_length;
		user_data += page_length;
		offset += page_length;
	}

1013
	mutex_lock(&i915->drm.struct_mutex);
1014 1015 1016 1017
out_unpin:
	if (node.allocated) {
		wmb();
		ggtt->base.clear_range(&ggtt->base,
1018
				       node.start, node.size);
1019 1020
		remove_mappable_node(&node);
	} else {
C
Chris Wilson 已提交
1021
		i915_vma_unpin(vma);
1022
	}
1023 1024 1025
out_unlock:
	intel_runtime_pm_put(i915);
	mutex_unlock(&i915->drm.struct_mutex);
1026

1027 1028 1029
	return ret;
}

1030 1031
/**
 * Reads data from the object referenced by handle.
1032 1033 1034
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
1035 1036 1037 1038 1039
 *
 * On error, the contents of *data are undefined.
 */
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1040
		     struct drm_file *file)
1041 1042
{
	struct drm_i915_gem_pread *args = data;
1043
	struct drm_i915_gem_object *obj;
1044
	int ret;
1045

1046 1047 1048 1049
	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_WRITE,
1050
		       u64_to_user_ptr(args->data_ptr),
1051 1052 1053
		       args->size))
		return -EFAULT;

1054
	obj = i915_gem_object_lookup(file, args->handle);
1055 1056
	if (!obj)
		return -ENOENT;
1057

1058
	/* Bounds check source.  */
1059 1060
	if (args->offset > obj->base.size ||
	    args->size > obj->base.size - args->offset) {
C
Chris Wilson 已提交
1061
		ret = -EINVAL;
1062
		goto out;
C
Chris Wilson 已提交
1063 1064
	}

C
Chris Wilson 已提交
1065 1066
	trace_i915_gem_object_pread(obj, args->offset, args->size);

1067 1068 1069 1070
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE,
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
1071
	if (ret)
1072
		goto out;
1073

1074
	ret = i915_gem_object_pin_pages(obj);
1075
	if (ret)
1076
		goto out;
1077

1078
	ret = i915_gem_shmem_pread(obj, args);
1079
	if (ret == -EFAULT || ret == -ENODEV)
1080
		ret = i915_gem_gtt_pread(obj, args);
1081

1082 1083
	i915_gem_object_unpin_pages(obj);
out:
C
Chris Wilson 已提交
1084
	i915_gem_object_put(obj);
1085
	return ret;
1086 1087
}

1088 1089
/* This is the fast write path which cannot handle
 * page faults in the source data
1090
 */
1091

1092 1093 1094 1095
static inline bool
ggtt_write(struct io_mapping *mapping,
	   loff_t base, int offset,
	   char __user *user_data, int length)
1096
{
1097
	void *vaddr;
1098
	unsigned long unwritten;
1099

1100
	/* We can use the cpu mem copy function because this is X86. */
1101 1102
	vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
	unwritten = __copy_from_user_inatomic_nocache(vaddr + offset,
1103
						      user_data, length);
1104 1105 1106 1107 1108 1109 1110
	io_mapping_unmap_atomic(vaddr);
	if (unwritten) {
		vaddr = (void __force *)
			io_mapping_map_wc(mapping, base, PAGE_SIZE);
		unwritten = copy_from_user(vaddr + offset, user_data, length);
		io_mapping_unmap(vaddr);
	}
1111 1112 1113 1114

	return unwritten;
}

1115 1116 1117
/**
 * This is the fast pwrite path, where we copy the data directly from the
 * user into the GTT, uncached.
1118
 * @obj: i915 GEM object
1119
 * @args: pwrite arguments structure
1120
 */
1121
static int
1122 1123
i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
			 const struct drm_i915_gem_pwrite *args)
1124
{
1125
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
1126 1127
	struct i915_ggtt *ggtt = &i915->ggtt;
	struct drm_mm_node node;
1128 1129 1130
	struct i915_vma *vma;
	u64 remain, offset;
	void __user *user_data;
1131
	int ret;
1132

1133 1134 1135
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
	if (ret)
		return ret;
D
Daniel Vetter 已提交
1136

1137
	intel_runtime_pm_get(i915);
C
Chris Wilson 已提交
1138
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1139
				       PIN_MAPPABLE | PIN_NONBLOCK);
1140 1141 1142
	if (!IS_ERR(vma)) {
		node.start = i915_ggtt_offset(vma);
		node.allocated = false;
1143
		ret = i915_vma_put_fence(vma);
1144 1145 1146 1147 1148
		if (ret) {
			i915_vma_unpin(vma);
			vma = ERR_PTR(ret);
		}
	}
C
Chris Wilson 已提交
1149
	if (IS_ERR(vma)) {
1150
		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1151
		if (ret)
1152 1153
			goto out_unlock;
		GEM_BUG_ON(!node.allocated);
1154
	}
D
Daniel Vetter 已提交
1155 1156 1157 1158 1159

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		goto out_unpin;

1160 1161
	mutex_unlock(&i915->drm.struct_mutex);

1162
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1163

1164 1165 1166 1167
	user_data = u64_to_user_ptr(args->data_ptr);
	offset = args->offset;
	remain = args->size;
	while (remain) {
1168 1169
		/* Operation in this page
		 *
1170 1171 1172
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
1173
		 */
1174
		u32 page_base = node.start;
1175 1176
		unsigned int page_offset = offset_in_page(offset);
		unsigned int page_length = PAGE_SIZE - page_offset;
1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
		page_length = remain < page_length ? remain : page_length;
		if (node.allocated) {
			wmb(); /* flush the write before we modify the GGTT */
			ggtt->base.insert_page(&ggtt->base,
					       i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
					       node.start, I915_CACHE_NONE, 0);
			wmb(); /* flush modifications to the GGTT (insert_page) */
		} else {
			page_base += offset & PAGE_MASK;
		}
1187
		/* If we get a fault while copying data, then (presumably) our
1188 1189
		 * source page isn't available.  Return the error and we'll
		 * retry in the slow path.
1190 1191
		 * If the object is non-shmem backed, we retry again with the
		 * path that handles page fault.
1192
		 */
1193 1194 1195 1196
		if (ggtt_write(&ggtt->mappable, page_base, page_offset,
			       user_data, page_length)) {
			ret = -EFAULT;
			break;
D
Daniel Vetter 已提交
1197
		}
1198

1199 1200 1201
		remain -= page_length;
		user_data += page_length;
		offset += page_length;
1202
	}
1203
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1204 1205

	mutex_lock(&i915->drm.struct_mutex);
D
Daniel Vetter 已提交
1206
out_unpin:
1207 1208 1209
	if (node.allocated) {
		wmb();
		ggtt->base.clear_range(&ggtt->base,
1210
				       node.start, node.size);
1211 1212
		remove_mappable_node(&node);
	} else {
C
Chris Wilson 已提交
1213
		i915_vma_unpin(vma);
1214
	}
1215
out_unlock:
1216
	intel_runtime_pm_put(i915);
1217
	mutex_unlock(&i915->drm.struct_mutex);
1218
	return ret;
1219 1220
}

1221
static int
1222
shmem_pwrite_slow(struct page *page, int offset, int length,
1223 1224 1225 1226
		  char __user *user_data,
		  bool page_do_bit17_swizzling,
		  bool needs_clflush_before,
		  bool needs_clflush_after)
1227
{
1228 1229
	char *vaddr;
	int ret;
1230

1231
	vaddr = kmap(page);
1232
	if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1233
		shmem_clflush_swizzled_range(vaddr + offset, length,
1234
					     page_do_bit17_swizzling);
1235
	if (page_do_bit17_swizzling)
1236 1237
		ret = __copy_from_user_swizzled(vaddr, offset, user_data,
						length);
1238
	else
1239
		ret = __copy_from_user(vaddr + offset, user_data, length);
1240
	if (needs_clflush_after)
1241
		shmem_clflush_swizzled_range(vaddr + offset, length,
1242
					     page_do_bit17_swizzling);
1243
	kunmap(page);
1244

1245
	return ret ? -EFAULT : 0;
1246 1247
}

1248 1249 1250 1251 1252
/* Per-page copy function for the shmem pwrite fastpath.
 * Flushes invalid cachelines before writing to the target if
 * needs_clflush_before is set and flushes out any written cachelines after
 * writing if needs_clflush is set.
 */
1253
static int
1254 1255 1256 1257
shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
	     bool page_do_bit17_swizzling,
	     bool needs_clflush_before,
	     bool needs_clflush_after)
1258
{
1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290
	int ret;

	ret = -ENODEV;
	if (!page_do_bit17_swizzling) {
		char *vaddr = kmap_atomic(page);

		if (needs_clflush_before)
			drm_clflush_virt_range(vaddr + offset, len);
		ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
		if (needs_clflush_after)
			drm_clflush_virt_range(vaddr + offset, len);

		kunmap_atomic(vaddr);
	}
	if (ret == 0)
		return ret;

	return shmem_pwrite_slow(page, offset, len, user_data,
				 page_do_bit17_swizzling,
				 needs_clflush_before,
				 needs_clflush_after);
}

static int
i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
		      const struct drm_i915_gem_pwrite *args)
{
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	void __user *user_data;
	u64 remain;
	unsigned int obj_do_bit17_swizzling;
	unsigned int partial_cacheline_write;
1291
	unsigned int needs_clflush;
1292 1293
	unsigned int offset, idx;
	int ret;
1294

1295
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1296 1297 1298
	if (ret)
		return ret;

1299 1300 1301 1302
	ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
	mutex_unlock(&i915->drm.struct_mutex);
	if (ret)
		return ret;
1303

1304 1305 1306
	obj_do_bit17_swizzling = 0;
	if (i915_gem_object_needs_bit17_swizzle(obj))
		obj_do_bit17_swizzling = BIT(17);
1307

1308 1309 1310 1311 1312 1313 1314
	/* If we don't overwrite a cacheline completely we need to be
	 * careful to have up-to-date data by first clflushing. Don't
	 * overcomplicate things and flush the entire patch.
	 */
	partial_cacheline_write = 0;
	if (needs_clflush & CLFLUSH_BEFORE)
		partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
1315

1316 1317 1318 1319 1320 1321
	user_data = u64_to_user_ptr(args->data_ptr);
	remain = args->size;
	offset = offset_in_page(args->offset);
	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
		struct page *page = i915_gem_object_get_page(obj, idx);
		int length;
1322

1323 1324 1325
		length = remain;
		if (offset + length > PAGE_SIZE)
			length = PAGE_SIZE - offset;
1326

1327 1328 1329 1330
		ret = shmem_pwrite(page, offset, length, user_data,
				   page_to_phys(page) & obj_do_bit17_swizzling,
				   (offset | length) & partial_cacheline_write,
				   needs_clflush & CLFLUSH_AFTER);
1331
		if (ret)
1332
			break;
1333

1334 1335 1336
		remain -= length;
		user_data += length;
		offset = 0;
1337
	}
1338

1339
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1340
	i915_gem_obj_finish_shmem_access(obj);
1341
	return ret;
1342 1343 1344 1345
}

/**
 * Writes data to the object referenced by handle.
1346 1347 1348
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1349 1350 1351 1352 1353
 *
 * On error, the contents of the buffer that were to be modified are undefined.
 */
int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1354
		      struct drm_file *file)
1355 1356
{
	struct drm_i915_gem_pwrite *args = data;
1357
	struct drm_i915_gem_object *obj;
1358 1359 1360 1361 1362 1363
	int ret;

	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_READ,
1364
		       u64_to_user_ptr(args->data_ptr),
1365 1366 1367
		       args->size))
		return -EFAULT;

1368
	obj = i915_gem_object_lookup(file, args->handle);
1369 1370
	if (!obj)
		return -ENOENT;
1371

1372
	/* Bounds check destination. */
1373 1374
	if (args->offset > obj->base.size ||
	    args->size > obj->base.size - args->offset) {
C
Chris Wilson 已提交
1375
		ret = -EINVAL;
1376
		goto err;
C
Chris Wilson 已提交
1377 1378
	}

C
Chris Wilson 已提交
1379 1380
	trace_i915_gem_object_pwrite(obj, args->offset, args->size);

1381 1382 1383 1384 1385
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_ALL,
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
1386 1387 1388
	if (ret)
		goto err;

1389
	ret = i915_gem_object_pin_pages(obj);
1390
	if (ret)
1391
		goto err;
1392

D
Daniel Vetter 已提交
1393
	ret = -EFAULT;
1394 1395 1396 1397 1398 1399
	/* We can only do the GTT pwrite on untiled buffers, as otherwise
	 * it would end up going through the fenced access, and we'll get
	 * different detiling behavior between reading and writing.
	 * pread/pwrite currently are reading and writing from the CPU
	 * perspective, requiring manual detiling by the client.
	 */
1400
	if (!i915_gem_object_has_struct_page(obj) ||
1401
	    cpu_write_needs_clflush(obj))
D
Daniel Vetter 已提交
1402 1403
		/* Note that the gtt paths might fail with non-page-backed user
		 * pointers (e.g. gtt mappings when moving data between
1404 1405
		 * textures). Fallback to the shmem path in that case.
		 */
1406
		ret = i915_gem_gtt_pwrite_fast(obj, args);
1407

1408
	if (ret == -EFAULT || ret == -ENOSPC) {
1409 1410
		if (obj->phys_handle)
			ret = i915_gem_phys_pwrite(obj, args, file);
1411
		else
1412
			ret = i915_gem_shmem_pwrite(obj, args);
1413
	}
1414

1415
	i915_gem_object_unpin_pages(obj);
1416
err:
C
Chris Wilson 已提交
1417
	i915_gem_object_put(obj);
1418
	return ret;
1419 1420
}

1421
static inline enum fb_op_origin
1422 1423
write_origin(struct drm_i915_gem_object *obj, unsigned domain)
{
1424 1425
	return (domain == I915_GEM_DOMAIN_GTT ?
		obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
1426 1427
}

1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448
static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *i915;
	struct list_head *list;
	struct i915_vma *vma;

	list_for_each_entry(vma, &obj->vma_list, obj_link) {
		if (!i915_vma_is_ggtt(vma))
			continue;

		if (i915_vma_is_active(vma))
			continue;

		if (!drm_mm_node_allocated(&vma->node))
			continue;

		list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
	}

	i915 = to_i915(obj->base.dev);
	list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
1449
	list_move_tail(&obj->global_link, list);
1450 1451
}

1452
/**
1453 1454
 * Called when user space prepares to use an object with the CPU, either
 * through the mmap ioctl's mapping or a GTT mapping.
1455 1456 1457
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1458 1459 1460
 */
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1461
			  struct drm_file *file)
1462 1463
{
	struct drm_i915_gem_set_domain *args = data;
1464
	struct drm_i915_gem_object *obj;
1465 1466
	uint32_t read_domains = args->read_domains;
	uint32_t write_domain = args->write_domain;
1467
	int err;
1468

1469
	/* Only handle setting domains to types used by the CPU. */
1470
	if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
1471 1472 1473 1474 1475 1476 1477 1478
		return -EINVAL;

	/* Having something in the write domain implies it's in the read
	 * domain, and only that read domain.  Enforce that in the request.
	 */
	if (write_domain != 0 && read_domains != write_domain)
		return -EINVAL;

1479
	obj = i915_gem_object_lookup(file, args->handle);
1480 1481
	if (!obj)
		return -ENOENT;
1482

1483 1484 1485 1486
	/* Try to flush the object off the GPU without holding the lock.
	 * We will repeat the flush holding the lock in the normal manner
	 * to catch cases where we are gazumped.
	 */
1487
	err = i915_gem_object_wait(obj,
1488 1489 1490 1491
				   I915_WAIT_INTERRUPTIBLE |
				   (write_domain ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
1492
	if (err)
C
Chris Wilson 已提交
1493
		goto out;
1494

1495 1496 1497 1498 1499 1500 1501 1502 1503 1504
	/* Flush and acquire obj->pages so that we are coherent through
	 * direct access in memory with previous cached writes through
	 * shmemfs and that our cache domain tracking remains valid.
	 * For example, if the obj->filp was moved to swap without us
	 * being notified and releasing the pages, we would mistakenly
	 * continue to assume that the obj remained out of the CPU cached
	 * domain.
	 */
	err = i915_gem_object_pin_pages(obj);
	if (err)
C
Chris Wilson 已提交
1505
		goto out;
1506 1507 1508

	err = i915_mutex_lock_interruptible(dev);
	if (err)
C
Chris Wilson 已提交
1509
		goto out_unpin;
1510

1511
	if (read_domains & I915_GEM_DOMAIN_GTT)
1512
		err = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1513
	else
1514
		err = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1515

1516 1517
	/* And bump the LRU for this access */
	i915_gem_object_bump_inactive_ggtt(obj);
1518

1519
	mutex_unlock(&dev->struct_mutex);
1520

1521 1522 1523
	if (write_domain != 0)
		intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));

C
Chris Wilson 已提交
1524
out_unpin:
1525
	i915_gem_object_unpin_pages(obj);
C
Chris Wilson 已提交
1526 1527
out:
	i915_gem_object_put(obj);
1528
	return err;
1529 1530 1531 1532
}

/**
 * Called when user space has done writes to this buffer
1533 1534 1535
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1536 1537 1538
 */
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1539
			 struct drm_file *file)
1540 1541
{
	struct drm_i915_gem_sw_finish *args = data;
1542
	struct drm_i915_gem_object *obj;
1543
	int err = 0;
1544

1545
	obj = i915_gem_object_lookup(file, args->handle);
1546 1547
	if (!obj)
		return -ENOENT;
1548 1549

	/* Pinned buffers may be scanout, so flush the cache */
1550 1551 1552 1553 1554 1555 1556
	if (READ_ONCE(obj->pin_display)) {
		err = i915_mutex_lock_interruptible(dev);
		if (!err) {
			i915_gem_object_flush_cpu_write_domain(obj);
			mutex_unlock(&dev->struct_mutex);
		}
	}
1557

C
Chris Wilson 已提交
1558
	i915_gem_object_put(obj);
1559
	return err;
1560 1561 1562
}

/**
1563 1564 1565 1566 1567
 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
 *			 it is mapped to.
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1568 1569 1570
 *
 * While the mapping holds a reference on the contents of the object, it doesn't
 * imply a ref on the object itself.
1571 1572 1573 1574 1575 1576 1577 1578 1579 1580
 *
 * IMPORTANT:
 *
 * DRM driver writers who look a this function as an example for how to do GEM
 * mmap support, please don't implement mmap support like here. The modern way
 * to implement DRM mmap support is with an mmap offset ioctl (like
 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
 * That way debug tooling like valgrind will understand what's going on, hiding
 * the mmap call in a driver private ioctl will break that. The i915 driver only
 * does cpu mmaps this way because we didn't know better.
1581 1582 1583
 */
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1584
		    struct drm_file *file)
1585 1586
{
	struct drm_i915_gem_mmap *args = data;
1587
	struct drm_i915_gem_object *obj;
1588 1589
	unsigned long addr;

1590 1591 1592
	if (args->flags & ~(I915_MMAP_WC))
		return -EINVAL;

1593
	if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1594 1595
		return -ENODEV;

1596 1597
	obj = i915_gem_object_lookup(file, args->handle);
	if (!obj)
1598
		return -ENOENT;
1599

1600 1601 1602
	/* prime objects have no backing filp to GEM mmap
	 * pages from.
	 */
1603
	if (!obj->base.filp) {
C
Chris Wilson 已提交
1604
		i915_gem_object_put(obj);
1605 1606 1607
		return -EINVAL;
	}

1608
	addr = vm_mmap(obj->base.filp, 0, args->size,
1609 1610
		       PROT_READ | PROT_WRITE, MAP_SHARED,
		       args->offset);
1611 1612 1613 1614
	if (args->flags & I915_MMAP_WC) {
		struct mm_struct *mm = current->mm;
		struct vm_area_struct *vma;

1615
		if (down_write_killable(&mm->mmap_sem)) {
C
Chris Wilson 已提交
1616
			i915_gem_object_put(obj);
1617 1618
			return -EINTR;
		}
1619 1620 1621 1622 1623 1624 1625
		vma = find_vma(mm, addr);
		if (vma)
			vma->vm_page_prot =
				pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
		else
			addr = -ENOMEM;
		up_write(&mm->mmap_sem);
1626 1627

		/* This may race, but that's ok, it only gets set */
1628
		WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
1629
	}
C
Chris Wilson 已提交
1630
	i915_gem_object_put(obj);
1631 1632 1633 1634 1635 1636 1637 1638
	if (IS_ERR((void *)addr))
		return addr;

	args->addr_ptr = (uint64_t) addr;

	return 0;
}

1639 1640 1641 1642 1643 1644 1645 1646 1647 1648
static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
{
	u64 size;

	size = i915_gem_object_get_stride(obj);
	size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8;

	return size >> PAGE_SHIFT;
}

1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698
/**
 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
 *
 * A history of the GTT mmap interface:
 *
 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
 *     aligned and suitable for fencing, and still fit into the available
 *     mappable space left by the pinned display objects. A classic problem
 *     we called the page-fault-of-doom where we would ping-pong between
 *     two objects that could not fit inside the GTT and so the memcpy
 *     would page one object in at the expense of the other between every
 *     single byte.
 *
 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
 *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
 *     object is too large for the available space (or simply too large
 *     for the mappable aperture!), a view is created instead and faulted
 *     into userspace. (This view is aligned and sized appropriately for
 *     fenced access.)
 *
 * Restrictions:
 *
 *  * snoopable objects cannot be accessed via the GTT. It can cause machine
 *    hangs on some architectures, corruption on others. An attempt to service
 *    a GTT page fault from a snoopable object will generate a SIGBUS.
 *
 *  * the object must be able to fit into RAM (physical memory, though no
 *    limited to the mappable aperture).
 *
 *
 * Caveats:
 *
 *  * a new GTT page fault will synchronize rendering from the GPU and flush
 *    all data to system memory. Subsequent access will not be synchronized.
 *
 *  * all mappings are revoked on runtime device suspend.
 *
 *  * there are only 8, 16 or 32 fence registers to share between all users
 *    (older machines require fence register for display and blitter access
 *    as well). Contention of the fence registers will cause the previous users
 *    to be unmapped and any new access will generate new page faults.
 *
 *  * running out of memory while servicing a fault may generate a SIGBUS,
 *    rather than the expected SIGSEGV.
 */
int i915_gem_mmap_gtt_version(void)
{
	return 1;
}

1699 1700
/**
 * i915_gem_fault - fault a page into the GTT
C
Chris Wilson 已提交
1701
 * @area: CPU VMA in question
1702
 * @vmf: fault info
1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713
 *
 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
 * from userspace.  The fault handler takes care of binding the object to
 * the GTT (if needed), allocating and programming a fence register (again,
 * only if needed based on whether the old reg is still valid or the object
 * is tiled) and inserting a new PTE into the faulting process.
 *
 * Note that the faulting process may involve evicting existing objects
 * from the GTT and/or fence registers to make room.  So performance may
 * suffer if the GTT working set is large or there are few fence registers
 * left.
1714 1715 1716
 *
 * The current feature set supported by i915_gem_fault() and thus GTT mmaps
 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
1717
 */
C
Chris Wilson 已提交
1718
int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
1719
{
1720
#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
C
Chris Wilson 已提交
1721
	struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
1722
	struct drm_device *dev = obj->base.dev;
1723 1724
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
1725
	bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
C
Chris Wilson 已提交
1726
	struct i915_vma *vma;
1727
	pgoff_t page_offset;
1728
	unsigned int flags;
1729
	int ret;
1730

1731
	/* We don't use vmf->pgoff since that has the fake offset */
C
Chris Wilson 已提交
1732
	page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
1733 1734
		PAGE_SHIFT;

C
Chris Wilson 已提交
1735 1736
	trace_i915_gem_object_fault(obj, page_offset, true, write);

1737
	/* Try to flush the object off the GPU first without holding the lock.
1738
	 * Upon acquiring the lock, we will perform our sanity checks and then
1739 1740 1741
	 * repeat the flush holding the lock in the normal manner to catch cases
	 * where we are gazumped.
	 */
1742 1743 1744 1745
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
1746
	if (ret)
1747 1748
		goto err;

1749 1750 1751 1752
	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		goto err;

1753 1754 1755 1756 1757
	intel_runtime_pm_get(dev_priv);

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto err_rpm;
1758

1759 1760
	/* Access to snoopable pages through the GTT is incoherent. */
	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev)) {
1761
		ret = -EFAULT;
1762
		goto err_unlock;
1763 1764
	}

1765 1766 1767 1768 1769 1770 1771 1772
	/* If the object is smaller than a couple of partial vma, it is
	 * not worth only creating a single partial vma - we may as well
	 * clear enough space for the full object.
	 */
	flags = PIN_MAPPABLE;
	if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
		flags |= PIN_NONBLOCK | PIN_NONFAULT;

1773
	/* Now pin it into the GTT as needed */
1774
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
1775 1776
	if (IS_ERR(vma)) {
		struct i915_ggtt_view view;
1777 1778
		unsigned int chunk_size;

1779
		/* Use a partial view if it is bigger than available space */
1780 1781 1782
		chunk_size = MIN_CHUNK_PAGES;
		if (i915_gem_object_is_tiled(obj))
			chunk_size = max(chunk_size, tile_row_pages(obj));
1783

1784 1785 1786 1787
		memset(&view, 0, sizeof(view));
		view.type = I915_GGTT_VIEW_PARTIAL;
		view.params.partial.offset = rounddown(page_offset, chunk_size);
		view.params.partial.size =
1788
			min_t(unsigned int, chunk_size,
1789
			      vma_pages(area) - view.params.partial.offset);
1790

1791 1792 1793 1794 1795 1796
		/* If the partial covers the entire object, just create a
		 * normal VMA.
		 */
		if (chunk_size >= obj->base.size >> PAGE_SHIFT)
			view.type = I915_GGTT_VIEW_NORMAL;

1797 1798 1799 1800 1801
		/* Userspace is now writing through an untracked VMA, abandon
		 * all hope that the hardware is able to track future writes.
		 */
		obj->frontbuffer_ggtt_origin = ORIGIN_CPU;

1802 1803
		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
	}
C
Chris Wilson 已提交
1804 1805
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
1806
		goto err_unlock;
C
Chris Wilson 已提交
1807
	}
1808

1809 1810
	ret = i915_gem_object_set_to_gtt_domain(obj, write);
	if (ret)
1811
		goto err_unpin;
1812

1813
	ret = i915_vma_get_fence(vma);
1814
	if (ret)
1815
		goto err_unpin;
1816

1817
	/* Mark as being mmapped into userspace for later revocation */
1818
	assert_rpm_wakelock_held(dev_priv);
1819 1820 1821
	if (list_empty(&obj->userfault_link))
		list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);

1822
	/* Finally, remap it using the new GTT offset */
1823 1824 1825 1826 1827
	ret = remap_io_mapping(area,
			       area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
			       (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
			       min_t(u64, vma->size, area->vm_end - area->vm_start),
			       &ggtt->mappable);
1828

1829
err_unpin:
C
Chris Wilson 已提交
1830
	__i915_vma_unpin(vma);
1831
err_unlock:
1832
	mutex_unlock(&dev->struct_mutex);
1833 1834
err_rpm:
	intel_runtime_pm_put(dev_priv);
1835
	i915_gem_object_unpin_pages(obj);
1836
err:
1837
	switch (ret) {
1838
	case -EIO:
1839 1840 1841 1842 1843 1844 1845
		/*
		 * We eat errors when the gpu is terminally wedged to avoid
		 * userspace unduly crashing (gl has no provisions for mmaps to
		 * fail). But any other -EIO isn't ours (e.g. swap in failure)
		 * and so needs to be reported.
		 */
		if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1846 1847 1848
			ret = VM_FAULT_SIGBUS;
			break;
		}
1849
	case -EAGAIN:
D
Daniel Vetter 已提交
1850 1851 1852 1853
		/*
		 * EAGAIN means the gpu is hung and we'll wait for the error
		 * handler to reset everything when re-faulting in
		 * i915_mutex_lock_interruptible.
1854
		 */
1855 1856
	case 0:
	case -ERESTARTSYS:
1857
	case -EINTR:
1858 1859 1860 1861 1862
	case -EBUSY:
		/*
		 * EBUSY is ok: this just means that another thread
		 * already did the job.
		 */
1863 1864
		ret = VM_FAULT_NOPAGE;
		break;
1865
	case -ENOMEM:
1866 1867
		ret = VM_FAULT_OOM;
		break;
1868
	case -ENOSPC:
1869
	case -EFAULT:
1870 1871
		ret = VM_FAULT_SIGBUS;
		break;
1872
	default:
1873
		WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1874 1875
		ret = VM_FAULT_SIGBUS;
		break;
1876
	}
1877
	return ret;
1878 1879
}

1880 1881 1882 1883
/**
 * i915_gem_release_mmap - remove physical page mappings
 * @obj: obj in question
 *
1884
 * Preserve the reservation of the mmapping with the DRM core code, but
1885 1886 1887 1888 1889 1890 1891 1892 1893
 * relinquish ownership of the pages back to the system.
 *
 * It is vital that we remove the page mapping if we have mapped a tiled
 * object through the GTT and then lose the fence register due to
 * resource pressure. Similarly if the object has been moved out of the
 * aperture, than pages mapped into userspace must be revoked. Removing the
 * mapping will then trigger a page fault on the next user access, allowing
 * fixup by i915_gem_fault().
 */
1894
void
1895
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1896
{
1897 1898
	struct drm_i915_private *i915 = to_i915(obj->base.dev);

1899 1900 1901
	/* Serialisation between user GTT access and our code depends upon
	 * revoking the CPU's PTE whilst the mutex is held. The next user
	 * pagefault then has to wait until we release the mutex.
1902 1903 1904 1905
	 *
	 * Note that RPM complicates somewhat by adding an additional
	 * requirement that operations to the GGTT be made holding the RPM
	 * wakeref.
1906
	 */
1907
	lockdep_assert_held(&i915->drm.struct_mutex);
1908
	intel_runtime_pm_get(i915);
1909

1910
	if (list_empty(&obj->userfault_link))
1911
		goto out;
1912

1913
	list_del_init(&obj->userfault_link);
1914 1915
	drm_vma_node_unmap(&obj->base.vma_node,
			   obj->base.dev->anon_inode->i_mapping);
1916 1917 1918 1919 1920 1921 1922 1923 1924

	/* Ensure that the CPU's PTE are revoked and there are not outstanding
	 * memory transactions from userspace before we return. The TLB
	 * flushing implied above by changing the PTE above *should* be
	 * sufficient, an extra barrier here just provides us with a bit
	 * of paranoid documentation about our requirement to serialise
	 * memory writes before touching registers / GSM.
	 */
	wmb();
1925 1926 1927

out:
	intel_runtime_pm_put(i915);
1928 1929
}

1930
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
1931
{
1932
	struct drm_i915_gem_object *obj, *on;
1933
	int i;
1934

1935 1936 1937 1938 1939 1940
	/*
	 * Only called during RPM suspend. All users of the userfault_list
	 * must be holding an RPM wakeref to ensure that this can not
	 * run concurrently with themselves (and use the struct_mutex for
	 * protection between themselves).
	 */
1941

1942 1943 1944
	list_for_each_entry_safe(obj, on,
				 &dev_priv->mm.userfault_list, userfault_link) {
		list_del_init(&obj->userfault_link);
1945 1946 1947
		drm_vma_node_unmap(&obj->base.vma_node,
				   obj->base.dev->anon_inode->i_mapping);
	}
1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964

	/* The fence will be lost when the device powers down. If any were
	 * in use by hardware (i.e. they are pinned), we should not be powering
	 * down! All other fences will be reacquired by the user upon waking.
	 */
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];

		if (WARN_ON(reg->pin_count))
			continue;

		if (!reg->vma)
			continue;

		GEM_BUG_ON(!list_empty(&reg->vma->obj->userfault_link));
		reg->dirty = true;
	}
1965 1966
}

1967 1968
/**
 * i915_gem_get_ggtt_size - return required global GTT size for an object
1969
 * @dev_priv: i915 device
1970 1971 1972 1973 1974 1975
 * @size: object size
 * @tiling_mode: tiling mode
 *
 * Return the required global GTT size for an object, taking into account
 * potential fence register mapping.
 */
1976 1977
u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
			   u64 size, int tiling_mode)
1978
{
1979
	u64 ggtt_size;
1980

1981 1982
	GEM_BUG_ON(size == 0);

1983
	if (INTEL_GEN(dev_priv) >= 4 ||
1984 1985
	    tiling_mode == I915_TILING_NONE)
		return size;
1986 1987

	/* Previous chips need a power-of-two fence region when tiling */
1988
	if (IS_GEN3(dev_priv))
1989
		ggtt_size = 1024*1024;
1990
	else
1991
		ggtt_size = 512*1024;
1992

1993 1994
	while (ggtt_size < size)
		ggtt_size <<= 1;
1995

1996
	return ggtt_size;
1997 1998
}

1999
/**
2000
 * i915_gem_get_ggtt_alignment - return required global GTT alignment
2001
 * @dev_priv: i915 device
2002 2003
 * @size: object size
 * @tiling_mode: tiling mode
2004
 * @fenced: is fenced alignment required or not
2005
 *
2006
 * Return the required global GTT alignment for an object, taking into account
2007
 * potential fence register mapping.
2008
 */
2009
u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
2010
				int tiling_mode, bool fenced)
2011
{
2012 2013
	GEM_BUG_ON(size == 0);

2014 2015 2016 2017
	/*
	 * Minimum alignment is 4k (GTT page size), but might be greater
	 * if a fence register is needed for the object.
	 */
2018
	if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
2019
	    tiling_mode == I915_TILING_NONE)
2020 2021
		return 4096;

2022 2023 2024 2025
	/*
	 * Previous chips need to be aligned to the size of the smallest
	 * fence register that can contain the object.
	 */
2026
	return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
2027 2028
}

2029 2030
static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
{
2031
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2032
	int err;
2033

2034 2035 2036
	err = drm_gem_create_mmap_offset(&obj->base);
	if (!err)
		return 0;
2037

2038 2039 2040
	/* We can idle the GPU locklessly to flush stale objects, but in order
	 * to claim that space for ourselves, we need to take the big
	 * struct_mutex to free the requests+objects and allocate our slot.
2041
	 */
2042
	err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
2043 2044 2045 2046 2047 2048 2049 2050 2051
	if (err)
		return err;

	err = i915_mutex_lock_interruptible(&dev_priv->drm);
	if (!err) {
		i915_gem_retire_requests(dev_priv);
		err = drm_gem_create_mmap_offset(&obj->base);
		mutex_unlock(&dev_priv->drm.struct_mutex);
	}
2052

2053
	return err;
2054 2055 2056 2057 2058 2059 2060
}

static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
{
	drm_gem_free_mmap_offset(&obj->base);
}

2061
int
2062 2063
i915_gem_mmap_gtt(struct drm_file *file,
		  struct drm_device *dev,
2064
		  uint32_t handle,
2065
		  uint64_t *offset)
2066
{
2067
	struct drm_i915_gem_object *obj;
2068 2069
	int ret;

2070
	obj = i915_gem_object_lookup(file, handle);
2071 2072
	if (!obj)
		return -ENOENT;
2073

2074
	ret = i915_gem_object_create_mmap_offset(obj);
2075 2076
	if (ret == 0)
		*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2077

C
Chris Wilson 已提交
2078
	i915_gem_object_put(obj);
2079
	return ret;
2080 2081
}

2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102
/**
 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
 * @dev: DRM device
 * @data: GTT mapping ioctl data
 * @file: GEM object info
 *
 * Simply returns the fake offset to userspace so it can mmap it.
 * The mmap call will end up in drm_gem_mmap(), which will set things
 * up so we can get faults in the handler above.
 *
 * The fault handler will take care of binding the object into the GTT
 * (since it may have been evicted to make room for something), allocating
 * a fence register, and mapping the appropriate aperture address into
 * userspace.
 */
int
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file)
{
	struct drm_i915_gem_mmap_gtt *args = data;

2103
	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2104 2105
}

D
Daniel Vetter 已提交
2106 2107 2108
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2109
{
2110
	i915_gem_object_free_mmap_offset(obj);
2111

2112 2113
	if (obj->base.filp == NULL)
		return;
2114

D
Daniel Vetter 已提交
2115 2116 2117 2118 2119
	/* Our goal here is to return as much of the memory as
	 * is possible back to the system as we are called from OOM.
	 * To do this we must instruct the shmfs to drop all of its
	 * backing pages, *now*.
	 */
2120
	shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
C
Chris Wilson 已提交
2121
	obj->mm.madv = __I915_MADV_PURGED;
D
Daniel Vetter 已提交
2122
}
2123

2124
/* Try to discard unwanted pages */
2125
void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
D
Daniel Vetter 已提交
2126
{
2127 2128
	struct address_space *mapping;

2129 2130 2131
	lockdep_assert_held(&obj->mm.lock);
	GEM_BUG_ON(obj->mm.pages);

C
Chris Wilson 已提交
2132
	switch (obj->mm.madv) {
2133 2134 2135 2136 2137 2138 2139 2140 2141
	case I915_MADV_DONTNEED:
		i915_gem_object_truncate(obj);
	case __I915_MADV_PURGED:
		return;
	}

	if (obj->base.filp == NULL)
		return;

2142
	mapping = obj->base.filp->f_mapping,
2143
	invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2144 2145
}

2146
static void
2147 2148
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
			      struct sg_table *pages)
2149
{
2150 2151
	struct sgt_iter sgt_iter;
	struct page *page;
2152

2153
	__i915_gem_object_release_shmem(obj);
2154

2155
	i915_gem_gtt_finish_pages(obj, pages);
I
Imre Deak 已提交
2156

2157
	if (i915_gem_object_needs_bit17_swizzle(obj))
2158
		i915_gem_object_save_bit_17_swizzle(obj, pages);
2159

2160
	for_each_sgt_page(page, sgt_iter, pages) {
C
Chris Wilson 已提交
2161
		if (obj->mm.dirty)
2162
			set_page_dirty(page);
2163

C
Chris Wilson 已提交
2164
		if (obj->mm.madv == I915_MADV_WILLNEED)
2165
			mark_page_accessed(page);
2166

2167
		put_page(page);
2168
	}
C
Chris Wilson 已提交
2169
	obj->mm.dirty = false;
2170

2171 2172
	sg_free_table(pages);
	kfree(pages);
2173
}
C
Chris Wilson 已提交
2174

2175 2176 2177 2178 2179
static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
{
	struct radix_tree_iter iter;
	void **slot;

C
Chris Wilson 已提交
2180 2181
	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
2182 2183
}

2184 2185
void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
				 enum i915_mm_subclass subclass)
2186
{
2187
	struct sg_table *pages;
2188

C
Chris Wilson 已提交
2189
	if (i915_gem_object_has_pinned_pages(obj))
2190
		return;
2191

2192
	GEM_BUG_ON(obj->bind_count);
2193 2194 2195 2196
	if (!READ_ONCE(obj->mm.pages))
		return;

	/* May be called by shrinker from within get_pages() (on another bo) */
2197
	mutex_lock_nested(&obj->mm.lock, subclass);
2198 2199
	if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
		goto unlock;
B
Ben Widawsky 已提交
2200

2201 2202 2203
	/* ->put_pages might need to allocate memory for the bit17 swizzle
	 * array, hence protect them from being reaped by removing them from gtt
	 * lists early. */
2204 2205
	pages = fetch_and_zero(&obj->mm.pages);
	GEM_BUG_ON(!pages);
2206

C
Chris Wilson 已提交
2207
	if (obj->mm.mapping) {
2208 2209
		void *ptr;

C
Chris Wilson 已提交
2210
		ptr = ptr_mask_bits(obj->mm.mapping);
2211 2212
		if (is_vmalloc_addr(ptr))
			vunmap(ptr);
2213
		else
2214 2215
			kunmap(kmap_to_page(ptr));

C
Chris Wilson 已提交
2216
		obj->mm.mapping = NULL;
2217 2218
	}

2219 2220
	__i915_gem_object_reset_page_iter(obj);

2221
	obj->ops->put_pages(obj, pages);
2222 2223
unlock:
	mutex_unlock(&obj->mm.lock);
C
Chris Wilson 已提交
2224 2225
}

2226
static unsigned int swiotlb_max_size(void)
2227 2228 2229 2230 2231 2232 2233 2234
{
#if IS_ENABLED(CONFIG_SWIOTLB)
	return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE);
#else
	return 0;
#endif
}

2235
static struct sg_table *
C
Chris Wilson 已提交
2236
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2237
{
2238
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2239 2240
	int page_count, i;
	struct address_space *mapping;
2241 2242
	struct sg_table *st;
	struct scatterlist *sg;
2243
	struct sgt_iter sgt_iter;
2244
	struct page *page;
2245
	unsigned long last_pfn = 0;	/* suppress gcc warning */
2246
	unsigned int max_segment;
I
Imre Deak 已提交
2247
	int ret;
C
Chris Wilson 已提交
2248
	gfp_t gfp;
2249

C
Chris Wilson 已提交
2250 2251 2252 2253
	/* Assert that the object is not currently in any GPU domain. As it
	 * wasn't in the GTT, there shouldn't be any way it could have been in
	 * a GPU cache
	 */
2254 2255
	GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
	GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
C
Chris Wilson 已提交
2256

2257 2258
	max_segment = swiotlb_max_size();
	if (!max_segment)
2259
		max_segment = rounddown(UINT_MAX, PAGE_SIZE);
2260

2261 2262
	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
2263
		return ERR_PTR(-ENOMEM);
2264

2265
	page_count = obj->base.size / PAGE_SIZE;
2266 2267
	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
		kfree(st);
2268
		return ERR_PTR(-ENOMEM);
2269
	}
2270

2271 2272 2273 2274 2275
	/* Get the list of pages out of our struct file.  They'll be pinned
	 * at this point until we release them.
	 *
	 * Fail silently without starting the shrinker
	 */
2276
	mapping = obj->base.filp->f_mapping;
2277
	gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
2278
	gfp |= __GFP_NORETRY | __GFP_NOWARN;
2279 2280 2281
	sg = st->sgl;
	st->nents = 0;
	for (i = 0; i < page_count; i++) {
C
Chris Wilson 已提交
2282 2283
		page = shmem_read_mapping_page_gfp(mapping, i, gfp);
		if (IS_ERR(page)) {
2284 2285 2286 2287 2288
			i915_gem_shrink(dev_priv,
					page_count,
					I915_SHRINK_BOUND |
					I915_SHRINK_UNBOUND |
					I915_SHRINK_PURGEABLE);
C
Chris Wilson 已提交
2289 2290 2291 2292 2293 2294 2295
			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
		}
		if (IS_ERR(page)) {
			/* We've tried hard to allocate the memory by reaping
			 * our own buffer, now let the real VM do its job and
			 * go down in flames if truly OOM.
			 */
2296
			page = shmem_read_mapping_page(mapping, i);
I
Imre Deak 已提交
2297 2298
			if (IS_ERR(page)) {
				ret = PTR_ERR(page);
C
Chris Wilson 已提交
2299
				goto err_pages;
I
Imre Deak 已提交
2300
			}
C
Chris Wilson 已提交
2301
		}
2302 2303 2304
		if (!i ||
		    sg->length >= max_segment ||
		    page_to_pfn(page) != last_pfn + 1) {
2305 2306 2307 2308 2309 2310 2311 2312
			if (i)
				sg = sg_next(sg);
			st->nents++;
			sg_set_page(sg, page, PAGE_SIZE, 0);
		} else {
			sg->length += PAGE_SIZE;
		}
		last_pfn = page_to_pfn(page);
2313 2314 2315

		/* Check that the i965g/gm workaround works. */
		WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2316
	}
2317
	if (sg) /* loop terminated early; short sg table */
2318
		sg_mark_end(sg);
2319

2320
	ret = i915_gem_gtt_prepare_pages(obj, st);
I
Imre Deak 已提交
2321 2322 2323
	if (ret)
		goto err_pages;

2324
	if (i915_gem_object_needs_bit17_swizzle(obj))
2325
		i915_gem_object_do_bit_17_swizzle(obj, st);
2326

2327
	if (i915_gem_object_is_tiled(obj) &&
2328
	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
C
Chris Wilson 已提交
2329
		__i915_gem_object_pin_pages(obj);
2330 2331
		obj->mm.quirked = true;
	}
2332

2333
	return st;
2334 2335

err_pages:
2336
	sg_mark_end(sg);
2337 2338
	for_each_sgt_page(page, sgt_iter, st)
		put_page(page);
2339 2340
	sg_free_table(st);
	kfree(st);
2341 2342 2343 2344 2345 2346 2347 2348 2349

	/* shmemfs first checks if there is enough memory to allocate the page
	 * and reports ENOSPC should there be insufficient, along with the usual
	 * ENOMEM for a genuine allocation failure.
	 *
	 * We use ENOSPC in our driver to mean that we have run out of aperture
	 * space and so want to translate the error from shmemfs back to our
	 * usual understanding of ENOMEM.
	 */
I
Imre Deak 已提交
2350 2351 2352
	if (ret == -ENOSPC)
		ret = -ENOMEM;

2353 2354 2355 2356 2357 2358
	return ERR_PTR(ret);
}

void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
				 struct sg_table *pages)
{
2359
	lockdep_assert_held(&obj->mm.lock);
2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381

	obj->mm.get_page.sg_pos = pages->sgl;
	obj->mm.get_page.sg_idx = 0;

	obj->mm.pages = pages;
}

static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
	struct sg_table *pages;

	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
		DRM_DEBUG("Attempting to obtain a purgeable object\n");
		return -EFAULT;
	}

	pages = obj->ops->get_pages(obj);
	if (unlikely(IS_ERR(pages)))
		return PTR_ERR(pages);

	__i915_gem_object_set_pages(obj, pages);
	return 0;
2382 2383
}

2384
/* Ensure that the associated pages are gathered from the backing storage
2385
 * and pinned into our object. i915_gem_object_pin_pages() may be called
2386
 * multiple times before they are released by a single call to
2387
 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
2388 2389 2390
 * either as a result of memory pressure (reaping pages under the shrinker)
 * or as the object is itself released.
 */
C
Chris Wilson 已提交
2391
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2392
{
2393
	int err;
2394

2395 2396 2397
	err = mutex_lock_interruptible(&obj->mm.lock);
	if (err)
		return err;
2398

2399 2400 2401 2402 2403 2404
	if (likely(obj->mm.pages)) {
		__i915_gem_object_pin_pages(obj);
		goto unlock;
	}

	GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
2405

2406
	err = ____i915_gem_object_get_pages(obj);
2407 2408
	if (!err)
		atomic_set_release(&obj->mm.pages_pin_count, 1);
2409

2410 2411
unlock:
	mutex_unlock(&obj->mm.lock);
2412
	return err;
2413 2414
}

2415
/* The 'mapping' part of i915_gem_object_pin_map() below */
2416 2417
static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
				 enum i915_map_type type)
2418 2419
{
	unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
C
Chris Wilson 已提交
2420
	struct sg_table *sgt = obj->mm.pages;
2421 2422
	struct sgt_iter sgt_iter;
	struct page *page;
2423 2424
	struct page *stack_pages[32];
	struct page **pages = stack_pages;
2425
	unsigned long i = 0;
2426
	pgprot_t pgprot;
2427 2428 2429
	void *addr;

	/* A single page can always be kmapped */
2430
	if (n_pages == 1 && type == I915_MAP_WB)
2431 2432
		return kmap(sg_page(sgt->sgl));

2433 2434 2435 2436 2437 2438
	if (n_pages > ARRAY_SIZE(stack_pages)) {
		/* Too big for stack -- allocate temporary array instead */
		pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
		if (!pages)
			return NULL;
	}
2439

2440 2441
	for_each_sgt_page(page, sgt_iter, sgt)
		pages[i++] = page;
2442 2443 2444 2445

	/* Check that we have the expected number of pages */
	GEM_BUG_ON(i != n_pages);

2446 2447 2448 2449 2450 2451 2452 2453 2454
	switch (type) {
	case I915_MAP_WB:
		pgprot = PAGE_KERNEL;
		break;
	case I915_MAP_WC:
		pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
		break;
	}
	addr = vmap(pages, n_pages, 0, pgprot);
2455

2456 2457
	if (pages != stack_pages)
		drm_free_large(pages);
2458 2459 2460 2461 2462

	return addr;
}

/* get, pin, and map the pages of the object into kernel space */
2463 2464
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
			      enum i915_map_type type)
2465
{
2466 2467 2468
	enum i915_map_type has_type;
	bool pinned;
	void *ptr;
2469 2470
	int ret;

2471
	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
2472

2473
	ret = mutex_lock_interruptible(&obj->mm.lock);
2474 2475 2476
	if (ret)
		return ERR_PTR(ret);

2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487
	pinned = true;
	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
		ret = ____i915_gem_object_get_pages(obj);
		if (ret)
			goto err_unlock;

		GEM_BUG_ON(atomic_read(&obj->mm.pages_pin_count));
		atomic_set_release(&obj->mm.pages_pin_count, 1);
		pinned = false;
	}
	GEM_BUG_ON(!obj->mm.pages);
2488

C
Chris Wilson 已提交
2489
	ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
2490 2491 2492
	if (ptr && has_type != type) {
		if (pinned) {
			ret = -EBUSY;
2493
			goto err_unpin;
2494
		}
2495 2496 2497 2498 2499 2500

		if (is_vmalloc_addr(ptr))
			vunmap(ptr);
		else
			kunmap(kmap_to_page(ptr));

C
Chris Wilson 已提交
2501
		ptr = obj->mm.mapping = NULL;
2502 2503
	}

2504 2505 2506 2507
	if (!ptr) {
		ptr = i915_gem_object_map(obj, type);
		if (!ptr) {
			ret = -ENOMEM;
2508
			goto err_unpin;
2509 2510
		}

C
Chris Wilson 已提交
2511
		obj->mm.mapping = ptr_pack_bits(ptr, type);
2512 2513
	}

2514 2515
out_unlock:
	mutex_unlock(&obj->mm.lock);
2516 2517
	return ptr;

2518 2519 2520 2521 2522
err_unpin:
	atomic_dec(&obj->mm.pages_pin_count);
err_unlock:
	ptr = ERR_PTR(ret);
	goto out_unlock;
2523 2524
}

2525
static bool i915_context_is_banned(const struct i915_gem_context *ctx)
2526
{
2527
	unsigned long elapsed;
2528

2529
	if (ctx->hang_stats.banned)
2530 2531
		return true;

2532
	elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2533 2534
	if (ctx->hang_stats.ban_period_seconds &&
	    elapsed <= ctx->hang_stats.ban_period_seconds) {
2535 2536
		DRM_DEBUG("context hanging too fast, banning!\n");
		return true;
2537 2538 2539 2540 2541
	}

	return false;
}

2542
static void i915_set_reset_status(struct i915_gem_context *ctx,
2543
				  const bool guilty)
2544
{
2545
	struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
2546 2547

	if (guilty) {
2548
		hs->banned = i915_context_is_banned(ctx);
2549 2550 2551 2552
		hs->batch_active++;
		hs->guilty_ts = get_seconds();
	} else {
		hs->batch_pending++;
2553 2554 2555
	}
}

2556
struct drm_i915_gem_request *
2557
i915_gem_find_active_request(struct intel_engine_cs *engine)
2558
{
2559 2560
	struct drm_i915_gem_request *request;

2561 2562 2563 2564 2565 2566 2567 2568
	/* We are called by the error capture and reset at a random
	 * point in time. In particular, note that neither is crucially
	 * ordered with an interrupt. After a hang, the GPU is dead and we
	 * assume that no more writes can happen (we waited long enough for
	 * all writes that were in transaction to be flushed) - adding an
	 * extra delay for a recent interrupt is pointless. Hence, we do
	 * not need an engine->irq_seqno_barrier() before the seqno reads.
	 */
2569
	list_for_each_entry(request, &engine->timeline->requests, link) {
C
Chris Wilson 已提交
2570
		if (__i915_gem_request_completed(request))
2571
			continue;
2572

2573
		return request;
2574
	}
2575 2576 2577 2578

	return NULL;
}

2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596
static void reset_request(struct drm_i915_gem_request *request)
{
	void *vaddr = request->ring->vaddr;
	u32 head;

	/* As this request likely depends on state from the lost
	 * context, clear out all the user operations leaving the
	 * breadcrumb at the end (so we get the fence notifications).
	 */
	head = request->head;
	if (request->postfix < head) {
		memset(vaddr + head, 0, request->ring->size - head);
		head = 0;
	}
	memset(vaddr + head, 0, request->postfix - head);
}

static void i915_gem_reset_engine(struct intel_engine_cs *engine)
2597 2598
{
	struct drm_i915_gem_request *request;
2599
	struct i915_gem_context *incomplete_ctx;
C
Chris Wilson 已提交
2600
	struct intel_timeline *timeline;
2601 2602
	bool ring_hung;

2603 2604 2605
	if (engine->irq_seqno_barrier)
		engine->irq_seqno_barrier(engine);

2606
	request = i915_gem_find_active_request(engine);
2607
	if (!request)
2608 2609
		return;

2610
	ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
2611 2612 2613
	if (engine->hangcheck.seqno != intel_engine_get_seqno(engine))
		ring_hung = false;

2614
	i915_set_reset_status(request->ctx, ring_hung);
2615 2616 2617 2618
	if (!ring_hung)
		return;

	DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
2619
			 engine->name, request->global_seqno);
2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635

	/* Setup the CS to resume from the breadcrumb of the hung request */
	engine->reset_hw(engine, request);

	/* Users of the default context do not rely on logical state
	 * preserved between batches. They have to emit full state on
	 * every batch and so it is safe to execute queued requests following
	 * the hang.
	 *
	 * Other contexts preserve state, now corrupt. We want to skip all
	 * queued requests that reference the corrupt context.
	 */
	incomplete_ctx = request->ctx;
	if (i915_gem_context_is_default(incomplete_ctx))
		return;

2636
	list_for_each_entry_continue(request, &engine->timeline->requests, link)
2637 2638
		if (request->ctx == incomplete_ctx)
			reset_request(request);
C
Chris Wilson 已提交
2639 2640 2641 2642

	timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
	list_for_each_entry(request, &timeline->requests, link)
		reset_request(request);
2643
}
2644

2645
void i915_gem_reset(struct drm_i915_private *dev_priv)
2646
{
2647
	struct intel_engine_cs *engine;
2648
	enum intel_engine_id id;
2649

2650 2651
	lockdep_assert_held(&dev_priv->drm.struct_mutex);

2652 2653
	i915_gem_retire_requests(dev_priv);

2654
	for_each_engine(engine, dev_priv, id)
2655 2656 2657
		i915_gem_reset_engine(engine);

	i915_gem_restore_fences(&dev_priv->drm);
2658 2659 2660 2661 2662 2663 2664

	if (dev_priv->gt.awake) {
		intel_sanitize_gt_powersave(dev_priv);
		intel_enable_gt_powersave(dev_priv);
		if (INTEL_GEN(dev_priv) >= 6)
			gen6_rps_busy(dev_priv);
	}
2665 2666 2667 2668 2669 2670 2671 2672 2673
}

static void nop_submit_request(struct drm_i915_gem_request *request)
{
}

static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
{
	engine->submit_request = nop_submit_request;
2674

2675 2676 2677 2678
	/* Mark all pending requests as complete so that any concurrent
	 * (lockless) lookup doesn't try and wait upon the request as we
	 * reset it.
	 */
2679
	intel_engine_init_global_seqno(engine,
2680
				       intel_engine_last_submit(engine));
2681

2682 2683 2684 2685 2686 2687
	/*
	 * Clear the execlists queue up before freeing the requests, as those
	 * are the ones that keep the context and ringbuffer backing objects
	 * pinned in place.
	 */

2688
	if (i915.enable_execlists) {
2689 2690 2691 2692 2693 2694
		spin_lock(&engine->execlist_lock);
		INIT_LIST_HEAD(&engine->execlist_queue);
		i915_gem_request_put(engine->execlist_port[0].request);
		i915_gem_request_put(engine->execlist_port[1].request);
		memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
		spin_unlock(&engine->execlist_lock);
2695
	}
2696 2697
}

2698
void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
2699
{
2700
	struct intel_engine_cs *engine;
2701
	enum intel_engine_id id;
2702

2703 2704
	lockdep_assert_held(&dev_priv->drm.struct_mutex);
	set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
2705

2706
	i915_gem_context_lost(dev_priv);
2707
	for_each_engine(engine, dev_priv, id)
2708
		i915_gem_cleanup_engine(engine);
2709
	mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
2710

2711
	i915_gem_retire_requests(dev_priv);
2712 2713
}

2714
static void
2715 2716
i915_gem_retire_work_handler(struct work_struct *work)
{
2717
	struct drm_i915_private *dev_priv =
2718
		container_of(work, typeof(*dev_priv), gt.retire_work.work);
2719
	struct drm_device *dev = &dev_priv->drm;
2720

2721
	/* Come back later if the device is busy... */
2722
	if (mutex_trylock(&dev->struct_mutex)) {
2723
		i915_gem_retire_requests(dev_priv);
2724
		mutex_unlock(&dev->struct_mutex);
2725
	}
2726 2727 2728 2729 2730

	/* Keep the retire handler running until we are finally idle.
	 * We do not need to do this test under locking as in the worst-case
	 * we queue the retire worker once too often.
	 */
2731 2732
	if (READ_ONCE(dev_priv->gt.awake)) {
		i915_queue_hangcheck(dev_priv);
2733 2734
		queue_delayed_work(dev_priv->wq,
				   &dev_priv->gt.retire_work,
2735
				   round_jiffies_up_relative(HZ));
2736
	}
2737
}
2738

2739 2740 2741 2742
static void
i915_gem_idle_work_handler(struct work_struct *work)
{
	struct drm_i915_private *dev_priv =
2743
		container_of(work, typeof(*dev_priv), gt.idle_work.work);
2744
	struct drm_device *dev = &dev_priv->drm;
2745
	struct intel_engine_cs *engine;
2746
	enum intel_engine_id id;
2747 2748 2749 2750 2751
	bool rearm_hangcheck;

	if (!READ_ONCE(dev_priv->gt.awake))
		return;

2752
	if (READ_ONCE(dev_priv->gt.active_requests))
2753 2754 2755 2756 2757 2758 2759 2760 2761 2762 2763 2764 2765
		return;

	rearm_hangcheck =
		cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);

	if (!mutex_trylock(&dev->struct_mutex)) {
		/* Currently busy, come back later */
		mod_delayed_work(dev_priv->wq,
				 &dev_priv->gt.idle_work,
				 msecs_to_jiffies(50));
		goto out_rearm;
	}

2766
	if (dev_priv->gt.active_requests)
2767
		goto out_unlock;
2768

2769
	for_each_engine(engine, dev_priv, id)
2770
		i915_gem_batch_pool_fini(&engine->batch_pool);
2771

2772 2773 2774
	GEM_BUG_ON(!dev_priv->gt.awake);
	dev_priv->gt.awake = false;
	rearm_hangcheck = false;
2775

2776 2777 2778 2779 2780
	if (INTEL_GEN(dev_priv) >= 6)
		gen6_rps_idle(dev_priv);
	intel_runtime_pm_put(dev_priv);
out_unlock:
	mutex_unlock(&dev->struct_mutex);
2781

2782 2783 2784 2785
out_rearm:
	if (rearm_hangcheck) {
		GEM_BUG_ON(!dev_priv->gt.awake);
		i915_queue_hangcheck(dev_priv);
2786
	}
2787 2788
}

2789 2790 2791 2792 2793 2794 2795 2796 2797 2798
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
	struct drm_i915_gem_object *obj = to_intel_bo(gem);
	struct drm_i915_file_private *fpriv = file->driver_priv;
	struct i915_vma *vma, *vn;

	mutex_lock(&obj->base.dev->struct_mutex);
	list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
		if (vma->vm->file == fpriv)
			i915_vma_close(vma);
2799 2800 2801 2802 2803 2804

	if (i915_gem_object_is_active(obj) &&
	    !i915_gem_object_has_active_reference(obj)) {
		i915_gem_object_set_active_reference(obj);
		i915_gem_object_get(obj);
	}
2805 2806 2807
	mutex_unlock(&obj->base.dev->struct_mutex);
}

2808 2809 2810 2811 2812 2813 2814 2815 2816 2817 2818
static unsigned long to_wait_timeout(s64 timeout_ns)
{
	if (timeout_ns < 0)
		return MAX_SCHEDULE_TIMEOUT;

	if (timeout_ns == 0)
		return 0;

	return nsecs_to_jiffies_timeout(timeout_ns);
}

2819 2820
/**
 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2821 2822 2823
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
2824 2825 2826 2827 2828 2829 2830 2831 2832 2833 2834 2835 2836 2837 2838 2839 2840 2841 2842 2843 2844 2845 2846 2847
 *
 * Returns 0 if successful, else an error is returned with the remaining time in
 * the timeout parameter.
 *  -ETIME: object is still busy after timeout
 *  -ERESTARTSYS: signal interrupted the wait
 *  -ENONENT: object doesn't exist
 * Also possible, but rare:
 *  -EAGAIN: GPU wedged
 *  -ENOMEM: damn
 *  -ENODEV: Internal IRQ fail
 *  -E?: The add request failed
 *
 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
 * non-zero timeout parameter the wait ioctl will wait for the given number of
 * nanoseconds on an object becoming unbusy. Since the wait itself does so
 * without holding struct_mutex the object may become re-busied before this
 * function completes. A similar but shorter * race condition exists in the busy
 * ioctl
 */
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
	struct drm_i915_gem_wait *args = data;
	struct drm_i915_gem_object *obj;
2848 2849
	ktime_t start;
	long ret;
2850

2851 2852 2853
	if (args->flags != 0)
		return -EINVAL;

2854
	obj = i915_gem_object_lookup(file, args->bo_handle);
2855
	if (!obj)
2856 2857
		return -ENOENT;

2858 2859 2860 2861 2862 2863 2864 2865 2866 2867 2868
	start = ktime_get();

	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
				   to_wait_timeout(args->timeout_ns),
				   to_rps_client(file));

	if (args->timeout_ns > 0) {
		args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
		if (args->timeout_ns < 0)
			args->timeout_ns = 0;
2869 2870
	}

C
Chris Wilson 已提交
2871
	i915_gem_object_put(obj);
2872
	return ret;
2873 2874
}

2875 2876
static void __i915_vma_iounmap(struct i915_vma *vma)
{
2877
	GEM_BUG_ON(i915_vma_is_pinned(vma));
2878 2879 2880 2881 2882 2883 2884 2885

	if (vma->iomap == NULL)
		return;

	io_mapping_unmap(vma->iomap);
	vma->iomap = NULL;
}

2886
int i915_vma_unbind(struct i915_vma *vma)
2887
{
2888
	struct drm_i915_gem_object *obj = vma->obj;
2889
	unsigned long active;
2890
	int ret;
2891

2892 2893
	lockdep_assert_held(&obj->base.dev->struct_mutex);

2894 2895 2896 2897
	/* First wait upon any activity as retiring the request may
	 * have side-effects such as unpinning or even unbinding this vma.
	 */
	active = i915_vma_get_active(vma);
2898
	if (active) {
2899 2900
		int idx;

2901 2902 2903 2904
		/* When a closed VMA is retired, it is unbound - eek.
		 * In order to prevent it from being recursively closed,
		 * take a pin on the vma so that the second unbind is
		 * aborted.
2905 2906 2907 2908 2909 2910 2911
		 *
		 * Even more scary is that the retire callback may free
		 * the object (last active vma). To prevent the explosion
		 * we defer the actual object free to a worker that can
		 * only proceed once it acquires the struct_mutex (which
		 * we currently hold, therefore it cannot free this object
		 * before we are finished).
2912
		 */
2913
		__i915_vma_pin(vma);
2914

2915 2916 2917 2918
		for_each_active(active, idx) {
			ret = i915_gem_active_retire(&vma->last_read[idx],
						   &vma->vm->dev->struct_mutex);
			if (ret)
2919
				break;
2920 2921
		}

2922
		__i915_vma_unpin(vma);
2923 2924 2925
		if (ret)
			return ret;

2926 2927 2928
		GEM_BUG_ON(i915_vma_is_active(vma));
	}

2929
	if (i915_vma_is_pinned(vma))
2930 2931
		return -EBUSY;

2932 2933
	if (!drm_mm_node_allocated(&vma->node))
		goto destroy;
2934

2935
	GEM_BUG_ON(obj->bind_count == 0);
C
Chris Wilson 已提交
2936
	GEM_BUG_ON(!obj->mm.pages);
2937

2938
	if (i915_vma_is_map_and_fenceable(vma)) {
2939
		/* release the fence reg _after_ flushing */
2940
		ret = i915_vma_put_fence(vma);
2941 2942
		if (ret)
			return ret;
2943

2944 2945 2946
		/* Force a pagefault for domain tracking on next user access */
		i915_gem_release_mmap(obj);

2947
		__i915_vma_iounmap(vma);
2948
		vma->flags &= ~I915_VMA_CAN_FENCE;
2949
	}
2950

2951 2952 2953 2954
	if (likely(!vma->vm->closed)) {
		trace_i915_vma_unbind(vma);
		vma->vm->unbind_vma(vma);
	}
2955
	vma->flags &= ~(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
2956

2957 2958 2959
	drm_mm_remove_node(&vma->node);
	list_move_tail(&vma->vm_link, &vma->vm->unbound_list);

C
Chris Wilson 已提交
2960
	if (vma->pages != obj->mm.pages) {
2961 2962 2963
		GEM_BUG_ON(!vma->pages);
		sg_free_table(vma->pages);
		kfree(vma->pages);
2964
	}
2965
	vma->pages = NULL;
2966

B
Ben Widawsky 已提交
2967
	/* Since the unbound list is global, only move to that list if
2968
	 * no more VMAs exist. */
2969
	if (--obj->bind_count == 0)
2970
		list_move_tail(&obj->global_link,
2971
			       &to_i915(obj->base.dev)->mm.unbound_list);
2972

2973 2974 2975 2976 2977 2978
	/* And finally now the object is completely decoupled from this vma,
	 * we can drop its hold on the backing storage and allow it to be
	 * reaped by the shrinker.
	 */
	i915_gem_object_unpin_pages(obj);

2979
destroy:
2980
	if (unlikely(i915_vma_is_closed(vma)))
2981 2982
		i915_vma_destroy(vma);

2983
	return 0;
2984 2985
}

2986
static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
2987
{
2988
	int ret, i;
2989

2990 2991 2992 2993 2994
	for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
		ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
		if (ret)
			return ret;
	}
2995

2996 2997 2998 2999 3000 3001 3002 3003 3004 3005
	return 0;
}

int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
{
	struct i915_gem_timeline *tl;
	int ret;

	list_for_each_entry(tl, &i915->gt.timelines, link) {
		ret = wait_for_timeline(tl, flags);
3006 3007 3008
		if (ret)
			return ret;
	}
3009

3010
	return 0;
3011 3012
}

3013
static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
3014 3015
				     unsigned long cache_level)
{
3016
	struct drm_mm_node *gtt_space = &vma->node;
3017 3018
	struct drm_mm_node *other;

3019 3020 3021 3022 3023 3024
	/*
	 * On some machines we have to be careful when putting differing types
	 * of snoopable memory together to avoid the prefetcher crossing memory
	 * domains and dying. During vm initialisation, we decide whether or not
	 * these constraints apply and set the drm_mm.color_adjust
	 * appropriately.
3025
	 */
3026
	if (vma->vm->mm.color_adjust == NULL)
3027 3028
		return true;

3029
	if (!drm_mm_node_allocated(gtt_space))
3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043 3044 3045
		return true;

	if (list_empty(&gtt_space->node_list))
		return true;

	other = list_entry(gtt_space->node_list.prev, struct drm_mm_node, node_list);
	if (other->allocated && !other->hole_follows && other->color != cache_level)
		return false;

	other = list_entry(gtt_space->node_list.next, struct drm_mm_node, node_list);
	if (other->allocated && !gtt_space->hole_follows && other->color != cache_level)
		return false;

	return true;
}

3046
/**
3047 3048
 * i915_vma_insert - finds a slot for the vma in its address space
 * @vma: the vma
3049
 * @size: requested size in bytes (can be larger than the VMA)
3050
 * @alignment: required alignment
3051
 * @flags: mask of PIN_* flags to use
3052 3053 3054 3055 3056 3057 3058
 *
 * First we try to allocate some free space that meets the requirements for
 * the VMA. Failiing that, if the flags permit, it will evict an old VMA,
 * preferrably the oldest idle entry to make room for the new VMA.
 *
 * Returns:
 * 0 on success, negative error code otherwise.
3059
 */
3060 3061
static int
i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
3062
{
3063 3064
	struct drm_i915_private *dev_priv = to_i915(vma->vm->dev);
	struct drm_i915_gem_object *obj = vma->obj;
3065
	u64 start, end;
3066
	int ret;
3067

3068
	GEM_BUG_ON(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
3069
	GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
3070 3071 3072

	size = max(size, vma->size);
	if (flags & PIN_MAPPABLE)
3073 3074
		size = i915_gem_get_ggtt_size(dev_priv, size,
					      i915_gem_object_get_tiling(obj));
3075

3076 3077 3078 3079
	alignment = max(max(alignment, vma->display_alignment),
			i915_gem_get_ggtt_alignment(dev_priv, size,
						    i915_gem_object_get_tiling(obj),
						    flags & PIN_MAPPABLE));
3080

3081
	start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
3082 3083

	end = vma->vm->total;
3084
	if (flags & PIN_MAPPABLE)
3085
		end = min_t(u64, end, dev_priv->ggtt.mappable_end);
3086
	if (flags & PIN_ZONE_4G)
3087
		end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
3088

3089 3090 3091
	/* If binding the object/GGTT view requires more space than the entire
	 * aperture has, reject it early before evicting everything in a vain
	 * attempt to find space.
3092
	 */
3093
	if (size > end) {
3094
		DRM_DEBUG("Attempting to bind an object larger than the aperture: request=%llu [object=%zd] > %s aperture=%llu\n",
3095
			  size, obj->base.size,
3096
			  flags & PIN_MAPPABLE ? "mappable" : "total",
3097
			  end);
3098
		return -E2BIG;
3099 3100
	}

C
Chris Wilson 已提交
3101
	ret = i915_gem_object_pin_pages(obj);
C
Chris Wilson 已提交
3102
	if (ret)
3103
		return ret;
C
Chris Wilson 已提交
3104

3105
	if (flags & PIN_OFFSET_FIXED) {
3106
		u64 offset = flags & PIN_OFFSET_MASK;
3107
		if (offset & (alignment - 1) || offset > end - size) {
3108
			ret = -EINVAL;
3109
			goto err_unpin;
3110
		}
3111

3112 3113 3114
		vma->node.start = offset;
		vma->node.size = size;
		vma->node.color = obj->cache_level;
3115
		ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
3116 3117 3118
		if (ret) {
			ret = i915_gem_evict_for_vma(vma);
			if (ret == 0)
3119 3120 3121
				ret = drm_mm_reserve_node(&vma->vm->mm, &vma->node);
			if (ret)
				goto err_unpin;
3122
		}
3123
	} else {
3124 3125
		u32 search_flag, alloc_flag;

3126 3127 3128 3129 3130 3131 3132
		if (flags & PIN_HIGH) {
			search_flag = DRM_MM_SEARCH_BELOW;
			alloc_flag = DRM_MM_CREATE_TOP;
		} else {
			search_flag = DRM_MM_SEARCH_DEFAULT;
			alloc_flag = DRM_MM_CREATE_DEFAULT;
		}
3133

3134 3135 3136 3137 3138 3139 3140 3141 3142
		/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
		 * so we know that we always have a minimum alignment of 4096.
		 * The drm_mm range manager is optimised to return results
		 * with zero alignment, so where possible use the optimal
		 * path.
		 */
		if (alignment <= 4096)
			alignment = 0;

3143
search_free:
3144 3145
		ret = drm_mm_insert_node_in_range_generic(&vma->vm->mm,
							  &vma->node,
3146 3147 3148 3149 3150 3151
							  size, alignment,
							  obj->cache_level,
							  start, end,
							  search_flag,
							  alloc_flag);
		if (ret) {
3152
			ret = i915_gem_evict_something(vma->vm, size, alignment,
3153 3154 3155 3156 3157
						       obj->cache_level,
						       start, end,
						       flags);
			if (ret == 0)
				goto search_free;
3158

3159
			goto err_unpin;
3160
		}
3161 3162 3163

		GEM_BUG_ON(vma->node.start < start);
		GEM_BUG_ON(vma->node.start + vma->node.size > end);
3164
	}
3165
	GEM_BUG_ON(!i915_gem_valid_gtt_space(vma, obj->cache_level));
3166

3167
	list_move_tail(&obj->global_link, &dev_priv->mm.bound_list);
3168
	list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3169
	obj->bind_count++;
3170

3171
	return 0;
B
Ben Widawsky 已提交
3172

3173
err_unpin:
B
Ben Widawsky 已提交
3174
	i915_gem_object_unpin_pages(obj);
3175
	return ret;
3176 3177
}

3178
bool
3179 3180
i915_gem_clflush_object(struct drm_i915_gem_object *obj,
			bool force)
3181 3182 3183 3184 3185
{
	/* If we don't have a page list set up, then we're not pinned
	 * to GPU, and we can ignore the cache flush because it'll happen
	 * again at bind time.
	 */
C
Chris Wilson 已提交
3186
	if (!obj->mm.pages)
3187
		return false;
3188

3189 3190 3191 3192
	/*
	 * Stolen memory is always coherent with the GPU as it is explicitly
	 * marked as wc by the system, or the system is cache-coherent.
	 */
3193
	if (obj->stolen || obj->phys_handle)
3194
		return false;
3195

3196 3197 3198 3199 3200 3201 3202 3203
	/* If the GPU is snooping the contents of the CPU cache,
	 * we do not need to manually clear the CPU cache lines.  However,
	 * the caches are only snooped when the render cache is
	 * flushed/invalidated.  As we always have to emit invalidations
	 * and flushes when moving into and out of the RENDER domain, correct
	 * snooping behaviour occurs naturally as the result of our domain
	 * tracking.
	 */
3204 3205
	if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
		obj->cache_dirty = true;
3206
		return false;
3207
	}
3208

C
Chris Wilson 已提交
3209
	trace_i915_gem_object_clflush(obj);
C
Chris Wilson 已提交
3210
	drm_clflush_sg(obj->mm.pages);
3211
	obj->cache_dirty = false;
3212 3213

	return true;
3214 3215 3216 3217
}

/** Flushes the GTT write domain for the object if it's dirty. */
static void
3218
i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3219
{
3220
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
C
Chris Wilson 已提交
3221

3222
	if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3223 3224
		return;

3225
	/* No actual flushing is required for the GTT write domain.  Writes
3226
	 * to it "immediately" go to main memory as far as we know, so there's
3227
	 * no chipset flush.  It also doesn't land in render cache.
3228 3229 3230 3231
	 *
	 * However, we do have to enforce the order so that all writes through
	 * the GTT land before any writes to the device, such as updates to
	 * the GATT itself.
3232 3233 3234 3235 3236 3237 3238
	 *
	 * We also have to wait a bit for the writes to land from the GTT.
	 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
	 * timing. This issue has only been observed when switching quickly
	 * between GTT writes and CPU reads from inside the kernel on recent hw,
	 * and it appears to only affect discrete GTT blocks (i.e. on LLC
	 * system agents we cannot reproduce this behaviour).
3239
	 */
3240
	wmb();
3241
	if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
3242
		POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
3243

3244
	intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
3245

3246
	obj->base.write_domain = 0;
C
Chris Wilson 已提交
3247
	trace_i915_gem_object_change_domain(obj,
3248
					    obj->base.read_domains,
3249
					    I915_GEM_DOMAIN_GTT);
3250 3251 3252 3253
}

/** Flushes the CPU write domain for the object if it's dirty. */
static void
3254
i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3255
{
3256
	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3257 3258
		return;

3259
	if (i915_gem_clflush_object(obj, obj->pin_display))
3260
		i915_gem_chipset_flush(to_i915(obj->base.dev));
3261

3262
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
3263

3264
	obj->base.write_domain = 0;
C
Chris Wilson 已提交
3265
	trace_i915_gem_object_change_domain(obj,
3266
					    obj->base.read_domains,
3267
					    I915_GEM_DOMAIN_CPU);
3268 3269
}

3270 3271
/**
 * Moves a single object to the GTT read, and possibly write domain.
3272 3273
 * @obj: object to act on
 * @write: ask for write access or read only
3274 3275 3276 3277
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
J
Jesse Barnes 已提交
3278
int
3279
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3280
{
C
Chris Wilson 已提交
3281
	uint32_t old_write_domain, old_read_domains;
3282
	int ret;
3283

3284
	lockdep_assert_held(&obj->base.dev->struct_mutex);
3285

3286 3287 3288 3289 3290 3291
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   (write ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
3292 3293 3294
	if (ret)
		return ret;

3295 3296 3297
	if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
		return 0;

3298 3299 3300 3301 3302 3303 3304 3305
	/* Flush and acquire obj->pages so that we are coherent through
	 * direct access in memory with previous cached writes through
	 * shmemfs and that our cache domain tracking remains valid.
	 * For example, if the obj->filp was moved to swap without us
	 * being notified and releasing the pages, we would mistakenly
	 * continue to assume that the obj remained out of the CPU cached
	 * domain.
	 */
C
Chris Wilson 已提交
3306
	ret = i915_gem_object_pin_pages(obj);
3307 3308 3309
	if (ret)
		return ret;

3310
	i915_gem_object_flush_cpu_write_domain(obj);
C
Chris Wilson 已提交
3311

3312 3313 3314 3315 3316 3317 3318
	/* Serialise direct access to this object with the barriers for
	 * coherent writes from the GPU, by effectively invalidating the
	 * GTT domain upon first access.
	 */
	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
		mb();

3319 3320
	old_write_domain = obj->base.write_domain;
	old_read_domains = obj->base.read_domains;
C
Chris Wilson 已提交
3321

3322 3323 3324
	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3325
	GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3326
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3327
	if (write) {
3328 3329
		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
C
Chris Wilson 已提交
3330
		obj->mm.dirty = true;
3331 3332
	}

C
Chris Wilson 已提交
3333 3334 3335 3336
	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    old_write_domain);

C
Chris Wilson 已提交
3337
	i915_gem_object_unpin_pages(obj);
3338 3339 3340
	return 0;
}

3341 3342
/**
 * Changes the cache-level of an object across all VMA.
3343 3344
 * @obj: object to act on
 * @cache_level: new cache level to set for the object
3345 3346 3347 3348 3349 3350 3351 3352 3353 3354 3355
 *
 * After this function returns, the object will be in the new cache-level
 * across all GTT and the contents of the backing storage will be coherent,
 * with respect to the new cache-level. In order to keep the backing storage
 * coherent for all users, we only allow a single cache level to be set
 * globally on the object and prevent it from being changed whilst the
 * hardware is reading from the object. That is if the object is currently
 * on the scanout it will be set to uncached (or equivalent display
 * cache coherency) and all non-MOCS GPU access will also be uncached so
 * that all direct access to the scanout remains coherent.
 */
3356 3357 3358
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
				    enum i915_cache_level cache_level)
{
3359
	struct i915_vma *vma;
3360
	int ret = 0;
3361

3362 3363
	lockdep_assert_held(&obj->base.dev->struct_mutex);

3364
	if (obj->cache_level == cache_level)
3365
		goto out;
3366

3367 3368 3369 3370 3371
	/* Inspect the list of currently bound VMA and unbind any that would
	 * be invalid given the new cache-level. This is principally to
	 * catch the issue of the CS prefetch crossing page boundaries and
	 * reading an invalid PTE on older architectures.
	 */
3372 3373
restart:
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
3374 3375 3376
		if (!drm_mm_node_allocated(&vma->node))
			continue;

3377
		if (i915_vma_is_pinned(vma)) {
3378 3379 3380 3381
			DRM_DEBUG("can not change the cache level of pinned objects\n");
			return -EBUSY;
		}

3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393
		if (i915_gem_valid_gtt_space(vma, cache_level))
			continue;

		ret = i915_vma_unbind(vma);
		if (ret)
			return ret;

		/* As unbinding may affect other elements in the
		 * obj->vma_list (due to side-effects from retiring
		 * an active vma), play safe and restart the iterator.
		 */
		goto restart;
3394 3395
	}

3396 3397 3398 3399 3400 3401 3402
	/* We can reuse the existing drm_mm nodes but need to change the
	 * cache-level on the PTE. We could simply unbind them all and
	 * rebind with the correct cache-level on next use. However since
	 * we already have a valid slot, dma mapping, pages etc, we may as
	 * rewrite the PTE in the belief that doing so tramples upon less
	 * state and so involves less work.
	 */
3403
	if (obj->bind_count) {
3404 3405 3406 3407
		/* Before we change the PTE, the GPU must not be accessing it.
		 * If we wait upon the object, we know that all the bound
		 * VMA are no longer active.
		 */
3408 3409 3410 3411 3412 3413
		ret = i915_gem_object_wait(obj,
					   I915_WAIT_INTERRUPTIBLE |
					   I915_WAIT_LOCKED |
					   I915_WAIT_ALL,
					   MAX_SCHEDULE_TIMEOUT,
					   NULL);
3414 3415 3416
		if (ret)
			return ret;

3417
		if (!HAS_LLC(obj->base.dev) && cache_level != I915_CACHE_NONE) {
3418 3419 3420 3421 3422 3423 3424 3425 3426 3427 3428 3429 3430 3431 3432 3433
			/* Access to snoopable pages through the GTT is
			 * incoherent and on some machines causes a hard
			 * lockup. Relinquish the CPU mmaping to force
			 * userspace to refault in the pages and we can
			 * then double check if the GTT mapping is still
			 * valid for that pointer access.
			 */
			i915_gem_release_mmap(obj);

			/* As we no longer need a fence for GTT access,
			 * we can relinquish it now (and so prevent having
			 * to steal a fence from someone else on the next
			 * fence request). Note GPU activity would have
			 * dropped the fence as all snoopable access is
			 * supposed to be linear.
			 */
3434 3435 3436 3437 3438
			list_for_each_entry(vma, &obj->vma_list, obj_link) {
				ret = i915_vma_put_fence(vma);
				if (ret)
					return ret;
			}
3439 3440 3441 3442 3443 3444 3445 3446
		} else {
			/* We either have incoherent backing store and
			 * so no GTT access or the architecture is fully
			 * coherent. In such cases, existing GTT mmaps
			 * ignore the cache bit in the PTE and we can
			 * rewrite it without confusing the GPU or having
			 * to force userspace to fault back in its mmaps.
			 */
3447 3448
		}

3449
		list_for_each_entry(vma, &obj->vma_list, obj_link) {
3450 3451 3452 3453 3454 3455 3456
			if (!drm_mm_node_allocated(&vma->node))
				continue;

			ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
			if (ret)
				return ret;
		}
3457 3458
	}

3459
	list_for_each_entry(vma, &obj->vma_list, obj_link)
3460 3461 3462
		vma->node.color = cache_level;
	obj->cache_level = cache_level;

3463
out:
3464 3465 3466 3467
	/* Flush the dirty CPU caches to the backing storage so that the
	 * object is now coherent at its new cache level (with respect
	 * to the access domain).
	 */
3468
	if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
3469
		if (i915_gem_clflush_object(obj, true))
3470
			i915_gem_chipset_flush(to_i915(obj->base.dev));
3471 3472 3473 3474 3475
	}

	return 0;
}

B
Ben Widawsky 已提交
3476 3477
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file)
3478
{
B
Ben Widawsky 已提交
3479
	struct drm_i915_gem_caching *args = data;
3480
	struct drm_i915_gem_object *obj;
3481
	int err = 0;
3482

3483 3484 3485 3486 3487 3488
	rcu_read_lock();
	obj = i915_gem_object_lookup_rcu(file, args->handle);
	if (!obj) {
		err = -ENOENT;
		goto out;
	}
3489

3490 3491 3492 3493 3494 3495
	switch (obj->cache_level) {
	case I915_CACHE_LLC:
	case I915_CACHE_L3_LLC:
		args->caching = I915_CACHING_CACHED;
		break;

3496 3497 3498 3499
	case I915_CACHE_WT:
		args->caching = I915_CACHING_DISPLAY;
		break;

3500 3501 3502 3503
	default:
		args->caching = I915_CACHING_NONE;
		break;
	}
3504 3505 3506
out:
	rcu_read_unlock();
	return err;
3507 3508
}

B
Ben Widawsky 已提交
3509 3510
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file)
3511
{
3512
	struct drm_i915_private *i915 = to_i915(dev);
B
Ben Widawsky 已提交
3513
	struct drm_i915_gem_caching *args = data;
3514 3515 3516 3517
	struct drm_i915_gem_object *obj;
	enum i915_cache_level level;
	int ret;

B
Ben Widawsky 已提交
3518 3519
	switch (args->caching) {
	case I915_CACHING_NONE:
3520 3521
		level = I915_CACHE_NONE;
		break;
B
Ben Widawsky 已提交
3522
	case I915_CACHING_CACHED:
3523 3524 3525 3526 3527 3528
		/*
		 * Due to a HW issue on BXT A stepping, GPU stores via a
		 * snooped mapping may leave stale data in a corresponding CPU
		 * cacheline, whereas normally such cachelines would get
		 * invalidated.
		 */
3529
		if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
3530 3531
			return -ENODEV;

3532 3533
		level = I915_CACHE_LLC;
		break;
3534
	case I915_CACHING_DISPLAY:
3535
		level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
3536
		break;
3537 3538 3539 3540
	default:
		return -EINVAL;
	}

B
Ben Widawsky 已提交
3541 3542
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
3543
		return ret;
B
Ben Widawsky 已提交
3544

3545 3546
	obj = i915_gem_object_lookup(file, args->handle);
	if (!obj) {
3547 3548 3549 3550 3551
		ret = -ENOENT;
		goto unlock;
	}

	ret = i915_gem_object_set_cache_level(obj, level);
3552
	i915_gem_object_put(obj);
3553 3554 3555 3556 3557
unlock:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

3558
/*
3559 3560 3561
 * Prepare buffer for display plane (scanout, cursors, etc).
 * Can be called from an uninterruptible phase (modesetting) and allows
 * any flushes to be pipelined (for pageflips).
3562
 */
C
Chris Wilson 已提交
3563
struct i915_vma *
3564 3565
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
				     u32 alignment,
3566
				     const struct i915_ggtt_view *view)
3567
{
C
Chris Wilson 已提交
3568
	struct i915_vma *vma;
3569
	u32 old_read_domains, old_write_domain;
3570 3571
	int ret;

3572 3573
	lockdep_assert_held(&obj->base.dev->struct_mutex);

3574 3575 3576
	/* Mark the pin_display early so that we account for the
	 * display coherency whilst setting up the cache domains.
	 */
3577
	obj->pin_display++;
3578

3579 3580 3581 3582 3583 3584 3585 3586 3587
	/* The display engine is not coherent with the LLC cache on gen6.  As
	 * a result, we make sure that the pinning that is about to occur is
	 * done with uncached PTEs. This is lowest common denominator for all
	 * chipsets.
	 *
	 * However for gen6+, we could do better by using the GFDT bit instead
	 * of uncaching, which would allow us to flush all the LLC-cached data
	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
	 */
3588
	ret = i915_gem_object_set_cache_level(obj,
3589 3590
					      HAS_WT(to_i915(obj->base.dev)) ?
					      I915_CACHE_WT : I915_CACHE_NONE);
C
Chris Wilson 已提交
3591 3592
	if (ret) {
		vma = ERR_PTR(ret);
3593
		goto err_unpin_display;
C
Chris Wilson 已提交
3594
	}
3595

3596 3597
	/* As the user may map the buffer once pinned in the display plane
	 * (e.g. libkms for the bootup splash), we have to ensure that we
3598 3599 3600 3601
	 * always use map_and_fenceable for all scanout buffers. However,
	 * it may simply be too big to fit into mappable, in which case
	 * put it anyway and hope that userspace can cope (but always first
	 * try to preserve the existing ABI).
3602
	 */
3603 3604 3605 3606 3607 3608
	vma = ERR_PTR(-ENOSPC);
	if (view->type == I915_GGTT_VIEW_NORMAL)
		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
					       PIN_MAPPABLE | PIN_NONBLOCK);
	if (IS_ERR(vma))
		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 0);
C
Chris Wilson 已提交
3609
	if (IS_ERR(vma))
3610
		goto err_unpin_display;
3611

3612 3613
	vma->display_alignment = max_t(u64, vma->display_alignment, alignment);

3614
	i915_gem_object_flush_cpu_write_domain(obj);
3615

3616
	old_write_domain = obj->base.write_domain;
3617
	old_read_domains = obj->base.read_domains;
3618 3619 3620 3621

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3622
	obj->base.write_domain = 0;
3623
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3624 3625 3626

	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
3627
					    old_write_domain);
3628

C
Chris Wilson 已提交
3629
	return vma;
3630 3631

err_unpin_display:
3632
	obj->pin_display--;
C
Chris Wilson 已提交
3633
	return vma;
3634 3635 3636
}

void
C
Chris Wilson 已提交
3637
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
3638
{
3639 3640
	lockdep_assert_held(&vma->vm->dev->struct_mutex);

C
Chris Wilson 已提交
3641
	if (WARN_ON(vma->obj->pin_display == 0))
3642 3643
		return;

3644 3645
	if (--vma->obj->pin_display == 0)
		vma->display_alignment = 0;
3646

3647 3648 3649 3650
	/* Bump the LRU to try and avoid premature eviction whilst flipping  */
	if (!i915_vma_is_active(vma))
		list_move_tail(&vma->vm_link, &vma->vm->inactive_list);

C
Chris Wilson 已提交
3651
	i915_vma_unpin(vma);
3652 3653
}

3654 3655
/**
 * Moves a single object to the CPU read, and possibly write domain.
3656 3657
 * @obj: object to act on
 * @write: requesting write or read-only access
3658 3659 3660 3661
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
3662
int
3663
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3664
{
C
Chris Wilson 已提交
3665
	uint32_t old_write_domain, old_read_domains;
3666 3667
	int ret;

3668
	lockdep_assert_held(&obj->base.dev->struct_mutex);
3669

3670 3671 3672 3673 3674 3675
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   (write ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
3676 3677 3678
	if (ret)
		return ret;

3679 3680 3681
	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
		return 0;

3682
	i915_gem_object_flush_gtt_write_domain(obj);
3683

3684 3685
	old_write_domain = obj->base.write_domain;
	old_read_domains = obj->base.read_domains;
C
Chris Wilson 已提交
3686

3687
	/* Flush the CPU cache if it's still invalid. */
3688
	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3689
		i915_gem_clflush_object(obj, false);
3690

3691
		obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3692 3693 3694 3695 3696
	}

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3697
	GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3698 3699 3700 3701 3702

	/* If we're writing through the CPU, then the GPU read domains will
	 * need to be invalidated at next use.
	 */
	if (write) {
3703 3704
		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3705
	}
3706

C
Chris Wilson 已提交
3707 3708 3709 3710
	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    old_write_domain);

3711 3712 3713
	return 0;
}

3714 3715 3716
/* Throttle our rendering by waiting until the ring has completed our requests
 * emitted over 20 msec ago.
 *
3717 3718 3719 3720
 * Note that if we were to use the current jiffies each time around the loop,
 * we wouldn't escape the function with any frames outstanding if the time to
 * render a frame was over 20ms.
 *
3721 3722 3723
 * This should get us reasonable parallelism between CPU and GPU but also
 * relatively low latency when blocking on a particular request to finish.
 */
3724
static int
3725
i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3726
{
3727
	struct drm_i915_private *dev_priv = to_i915(dev);
3728
	struct drm_i915_file_private *file_priv = file->driver_priv;
3729
	unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
3730
	struct drm_i915_gem_request *request, *target = NULL;
3731
	long ret;
3732

3733 3734 3735
	/* ABI: return -EIO if already wedged */
	if (i915_terminally_wedged(&dev_priv->gpu_error))
		return -EIO;
3736

3737
	spin_lock(&file_priv->mm.lock);
3738
	list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3739 3740
		if (time_after_eq(request->emitted_jiffies, recent_enough))
			break;
3741

3742 3743 3744 3745 3746 3747 3748
		/*
		 * Note that the request might not have been submitted yet.
		 * In which case emitted_jiffies will be zero.
		 */
		if (!request->emitted_jiffies)
			continue;

3749
		target = request;
3750
	}
3751
	if (target)
3752
		i915_gem_request_get(target);
3753
	spin_unlock(&file_priv->mm.lock);
3754

3755
	if (target == NULL)
3756
		return 0;
3757

3758 3759 3760
	ret = i915_wait_request(target,
				I915_WAIT_INTERRUPTIBLE,
				MAX_SCHEDULE_TIMEOUT);
3761
	i915_gem_request_put(target);
3762

3763
	return ret < 0 ? ret : 0;
3764 3765
}

3766
static bool
3767
i915_vma_misplaced(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
3768
{
3769 3770 3771
	if (!drm_mm_node_allocated(&vma->node))
		return false;

3772 3773 3774 3775
	if (vma->node.size < size)
		return true;

	if (alignment && vma->node.start & (alignment - 1))
3776 3777
		return true;

3778
	if (flags & PIN_MAPPABLE && !i915_vma_is_map_and_fenceable(vma))
3779 3780 3781 3782 3783 3784
		return true;

	if (flags & PIN_OFFSET_BIAS &&
	    vma->node.start < (flags & PIN_OFFSET_MASK))
		return true;

3785 3786 3787 3788
	if (flags & PIN_OFFSET_FIXED &&
	    vma->node.start != (flags & PIN_OFFSET_MASK))
		return true;

3789 3790 3791
	return false;
}

3792 3793 3794
void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
{
	struct drm_i915_gem_object *obj = vma->obj;
3795
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
3796 3797 3798
	bool mappable, fenceable;
	u32 fence_size, fence_alignment;

3799
	fence_size = i915_gem_get_ggtt_size(dev_priv,
3800
					    vma->size,
3801
					    i915_gem_object_get_tiling(obj));
3802
	fence_alignment = i915_gem_get_ggtt_alignment(dev_priv,
3803
						      vma->size,
3804
						      i915_gem_object_get_tiling(obj),
3805
						      true);
3806 3807 3808 3809 3810

	fenceable = (vma->node.size == fence_size &&
		     (vma->node.start & (fence_alignment - 1)) == 0);

	mappable = (vma->node.start + fence_size <=
3811
		    dev_priv->ggtt.mappable_end);
3812

3813 3814 3815 3816 3817 3818
	/*
	 * Explicitly disable for rotated VMA since the display does not
	 * need the fence and the VMA is not accessible to other users.
	 */
	if (mappable && fenceable &&
	    vma->ggtt_view.type != I915_GGTT_VIEW_ROTATED)
3819 3820 3821
		vma->flags |= I915_VMA_CAN_FENCE;
	else
		vma->flags &= ~I915_VMA_CAN_FENCE;
3822 3823
}

3824 3825
int __i915_vma_do_pin(struct i915_vma *vma,
		      u64 size, u64 alignment, u64 flags)
3826
{
3827
	unsigned int bound = vma->flags;
3828 3829
	int ret;

3830
	lockdep_assert_held(&vma->vm->dev->struct_mutex);
3831
	GEM_BUG_ON((flags & (PIN_GLOBAL | PIN_USER)) == 0);
3832
	GEM_BUG_ON((flags & PIN_GLOBAL) && !i915_vma_is_ggtt(vma));
B
Ben Widawsky 已提交
3833

3834 3835 3836 3837
	if (WARN_ON(bound & I915_VMA_PIN_OVERFLOW)) {
		ret = -EBUSY;
		goto err;
	}
3838

3839
	if ((bound & I915_VMA_BIND_MASK) == 0) {
3840 3841 3842
		ret = i915_vma_insert(vma, size, alignment, flags);
		if (ret)
			goto err;
3843
	}
3844

3845
	ret = i915_vma_bind(vma, vma->obj->cache_level, flags);
3846
	if (ret)
3847
		goto err;
3848

3849
	if ((bound ^ vma->flags) & I915_VMA_GLOBAL_BIND)
3850
		__i915_vma_set_map_and_fenceable(vma);
3851

3852
	GEM_BUG_ON(i915_vma_misplaced(vma, size, alignment, flags));
3853 3854
	return 0;

3855 3856 3857
err:
	__i915_vma_unpin(vma);
	return ret;
3858 3859
}

C
Chris Wilson 已提交
3860
struct i915_vma *
3861 3862
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
			 const struct i915_ggtt_view *view,
3863
			 u64 size,
3864 3865
			 u64 alignment,
			 u64 flags)
3866
{
3867 3868
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
	struct i915_address_space *vm = &dev_priv->ggtt.base;
3869 3870
	struct i915_vma *vma;
	int ret;
3871

3872 3873
	lockdep_assert_held(&obj->base.dev->struct_mutex);

C
Chris Wilson 已提交
3874
	vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
3875
	if (IS_ERR(vma))
C
Chris Wilson 已提交
3876
		return vma;
3877 3878 3879 3880

	if (i915_vma_misplaced(vma, size, alignment, flags)) {
		if (flags & PIN_NONBLOCK &&
		    (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
C
Chris Wilson 已提交
3881
			return ERR_PTR(-ENOSPC);
3882

3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904 3905 3906 3907 3908 3909 3910 3911 3912 3913 3914 3915 3916 3917
		if (flags & PIN_MAPPABLE) {
			u32 fence_size;

			fence_size = i915_gem_get_ggtt_size(dev_priv, vma->size,
							    i915_gem_object_get_tiling(obj));
			/* If the required space is larger than the available
			 * aperture, we will not able to find a slot for the
			 * object and unbinding the object now will be in
			 * vain. Worse, doing so may cause us to ping-pong
			 * the object in and out of the Global GTT and
			 * waste a lot of cycles under the mutex.
			 */
			if (fence_size > dev_priv->ggtt.mappable_end)
				return ERR_PTR(-E2BIG);

			/* If NONBLOCK is set the caller is optimistically
			 * trying to cache the full object within the mappable
			 * aperture, and *must* have a fallback in place for
			 * situations where we cannot bind the object. We
			 * can be a little more lax here and use the fallback
			 * more often to avoid costly migrations of ourselves
			 * and other objects within the aperture.
			 *
			 * Half-the-aperture is used as a simple heuristic.
			 * More interesting would to do search for a free
			 * block prior to making the commitment to unbind.
			 * That caters for the self-harm case, and with a
			 * little more heuristics (e.g. NOFAULT, NOEVICT)
			 * we could try to minimise harm to others.
			 */
			if (flags & PIN_NONBLOCK &&
			    fence_size > dev_priv->ggtt.mappable_end / 2)
				return ERR_PTR(-ENOSPC);
		}

3918 3919
		WARN(i915_vma_is_pinned(vma),
		     "bo is already pinned in ggtt with incorrect alignment:"
3920 3921 3922
		     " offset=%08x, req.alignment=%llx,"
		     " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
		     i915_ggtt_offset(vma), alignment,
3923
		     !!(flags & PIN_MAPPABLE),
3924
		     i915_vma_is_map_and_fenceable(vma));
3925 3926
		ret = i915_vma_unbind(vma);
		if (ret)
C
Chris Wilson 已提交
3927
			return ERR_PTR(ret);
3928 3929
	}

C
Chris Wilson 已提交
3930 3931 3932
	ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
	if (ret)
		return ERR_PTR(ret);
3933

C
Chris Wilson 已提交
3934
	return vma;
3935 3936
}

3937
static __always_inline unsigned int __busy_read_flag(unsigned int id)
3938 3939 3940 3941 3942 3943 3944 3945 3946 3947 3948 3949 3950 3951
{
	/* Note that we could alias engines in the execbuf API, but
	 * that would be very unwise as it prevents userspace from
	 * fine control over engine selection. Ahem.
	 *
	 * This should be something like EXEC_MAX_ENGINE instead of
	 * I915_NUM_ENGINES.
	 */
	BUILD_BUG_ON(I915_NUM_ENGINES > 16);
	return 0x10000 << id;
}

static __always_inline unsigned int __busy_write_id(unsigned int id)
{
3952 3953 3954 3955 3956 3957 3958 3959 3960
	/* The uABI guarantees an active writer is also amongst the read
	 * engines. This would be true if we accessed the activity tracking
	 * under the lock, but as we perform the lookup of the object and
	 * its activity locklessly we can not guarantee that the last_write
	 * being active implies that we have set the same engine flag from
	 * last_read - hence we always set both read and write busy for
	 * last_write.
	 */
	return id | __busy_read_flag(id);
3961 3962
}

3963
static __always_inline unsigned int
3964
__busy_set_if_active(const struct dma_fence *fence,
3965 3966
		     unsigned int (*flag)(unsigned int id))
{
3967
	struct drm_i915_gem_request *rq;
3968

3969 3970 3971 3972
	/* We have to check the current hw status of the fence as the uABI
	 * guarantees forward progress. We could rely on the idle worker
	 * to eventually flush us, but to minimise latency just ask the
	 * hardware.
3973
	 *
3974
	 * Note we only report on the status of native fences.
3975
	 */
3976 3977 3978 3979 3980 3981 3982 3983 3984
	if (!dma_fence_is_i915(fence))
		return 0;

	/* opencode to_request() in order to avoid const warnings */
	rq = container_of(fence, struct drm_i915_gem_request, fence);
	if (i915_gem_request_completed(rq))
		return 0;

	return flag(rq->engine->exec_id);
3985 3986
}

3987
static __always_inline unsigned int
3988
busy_check_reader(const struct dma_fence *fence)
3989
{
3990
	return __busy_set_if_active(fence, __busy_read_flag);
3991 3992
}

3993
static __always_inline unsigned int
3994
busy_check_writer(const struct dma_fence *fence)
3995
{
3996 3997 3998 3999
	if (!fence)
		return 0;

	return __busy_set_if_active(fence, __busy_write_id);
4000 4001
}

4002 4003
int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4004
		    struct drm_file *file)
4005 4006
{
	struct drm_i915_gem_busy *args = data;
4007
	struct drm_i915_gem_object *obj;
4008 4009
	struct reservation_object_list *list;
	unsigned int seq;
4010
	int err;
4011

4012
	err = -ENOENT;
4013 4014
	rcu_read_lock();
	obj = i915_gem_object_lookup_rcu(file, args->handle);
4015
	if (!obj)
4016
		goto out;
4017

4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034 4035
	/* A discrepancy here is that we do not report the status of
	 * non-i915 fences, i.e. even though we may report the object as idle,
	 * a call to set-domain may still stall waiting for foreign rendering.
	 * This also means that wait-ioctl may report an object as busy,
	 * where busy-ioctl considers it idle.
	 *
	 * We trade the ability to warn of foreign fences to report on which
	 * i915 engines are active for the object.
	 *
	 * Alternatively, we can trade that extra information on read/write
	 * activity with
	 *	args->busy =
	 *		!reservation_object_test_signaled_rcu(obj->resv, true);
	 * to report the overall busyness. This is what the wait-ioctl does.
	 *
	 */
retry:
	seq = raw_read_seqcount(&obj->resv->seq);
4036

4037 4038
	/* Translate the exclusive fence to the READ *and* WRITE engine */
	args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
4039

4040 4041 4042 4043
	/* Translate shared fences to READ set of engines */
	list = rcu_dereference(obj->resv->fence);
	if (list) {
		unsigned int shared_count = list->shared_count, i;
4044

4045 4046 4047 4048 4049 4050
		for (i = 0; i < shared_count; ++i) {
			struct dma_fence *fence =
				rcu_dereference(list->shared[i]);

			args->busy |= busy_check_reader(fence);
		}
4051
	}
4052

4053 4054 4055 4056
	if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
		goto retry;

	err = 0;
4057 4058 4059
out:
	rcu_read_unlock();
	return err;
4060 4061 4062 4063 4064 4065
}

int
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file_priv)
{
4066
	return i915_gem_ring_throttle(dev, file_priv);
4067 4068
}

4069 4070 4071 4072
int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
4073
	struct drm_i915_private *dev_priv = to_i915(dev);
4074
	struct drm_i915_gem_madvise *args = data;
4075
	struct drm_i915_gem_object *obj;
4076
	int err;
4077 4078 4079 4080 4081 4082 4083 4084 4085

	switch (args->madv) {
	case I915_MADV_DONTNEED:
	case I915_MADV_WILLNEED:
	    break;
	default:
	    return -EINVAL;
	}

4086
	obj = i915_gem_object_lookup(file_priv, args->handle);
4087 4088 4089 4090 4091 4092
	if (!obj)
		return -ENOENT;

	err = mutex_lock_interruptible(&obj->mm.lock);
	if (err)
		goto out;
4093

C
Chris Wilson 已提交
4094
	if (obj->mm.pages &&
4095
	    i915_gem_object_is_tiled(obj) &&
4096
	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4097 4098
		if (obj->mm.madv == I915_MADV_WILLNEED) {
			GEM_BUG_ON(!obj->mm.quirked);
C
Chris Wilson 已提交
4099
			__i915_gem_object_unpin_pages(obj);
4100 4101 4102
			obj->mm.quirked = false;
		}
		if (args->madv == I915_MADV_WILLNEED) {
C
Chris Wilson 已提交
4103
			__i915_gem_object_pin_pages(obj);
4104 4105
			obj->mm.quirked = true;
		}
4106 4107
	}

C
Chris Wilson 已提交
4108 4109
	if (obj->mm.madv != __I915_MADV_PURGED)
		obj->mm.madv = args->madv;
4110

C
Chris Wilson 已提交
4111
	/* if the object is no longer attached, discard its backing storage */
C
Chris Wilson 已提交
4112
	if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
4113 4114
		i915_gem_object_truncate(obj);

C
Chris Wilson 已提交
4115
	args->retained = obj->mm.madv != __I915_MADV_PURGED;
4116
	mutex_unlock(&obj->mm.lock);
C
Chris Wilson 已提交
4117

4118
out:
4119
	i915_gem_object_put(obj);
4120
	return err;
4121 4122
}

4123 4124
void i915_gem_object_init(struct drm_i915_gem_object *obj,
			  const struct drm_i915_gem_object_ops *ops)
4125
{
4126 4127
	mutex_init(&obj->mm.lock);

4128
	INIT_LIST_HEAD(&obj->global_link);
4129
	INIT_LIST_HEAD(&obj->userfault_link);
4130
	INIT_LIST_HEAD(&obj->obj_exec_link);
B
Ben Widawsky 已提交
4131
	INIT_LIST_HEAD(&obj->vma_list);
4132
	INIT_LIST_HEAD(&obj->batch_pool_link);
4133

4134 4135
	obj->ops = ops;

4136 4137 4138
	reservation_object_init(&obj->__builtin_resv);
	obj->resv = &obj->__builtin_resv;

4139
	obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
C
Chris Wilson 已提交
4140 4141 4142 4143

	obj->mm.madv = I915_MADV_WILLNEED;
	INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
	mutex_init(&obj->mm.get_page.lock);
4144

4145
	i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
4146 4147
}

4148
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4149 4150
	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
		 I915_GEM_OBJECT_IS_SHRINKABLE,
4151 4152 4153 4154
	.get_pages = i915_gem_object_get_pages_gtt,
	.put_pages = i915_gem_object_put_pages_gtt,
};

4155 4156 4157 4158 4159 4160
/* Note we don't consider signbits :| */
#define overflows_type(x, T) \
	(sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))

struct drm_i915_gem_object *
i915_gem_object_create(struct drm_device *dev, u64 size)
4161
{
4162
	struct drm_i915_private *dev_priv = to_i915(dev);
4163
	struct drm_i915_gem_object *obj;
4164
	struct address_space *mapping;
D
Daniel Vetter 已提交
4165
	gfp_t mask;
4166
	int ret;
4167

4168 4169 4170 4171 4172 4173 4174 4175 4176 4177 4178
	/* There is a prevalence of the assumption that we fit the object's
	 * page count inside a 32bit _signed_ variable. Let's document this and
	 * catch if we ever need to fix it. In the meantime, if you do spot
	 * such a local variable, please consider fixing!
	 */
	if (WARN_ON(size >> PAGE_SHIFT > INT_MAX))
		return ERR_PTR(-E2BIG);

	if (overflows_type(size, obj->base.size))
		return ERR_PTR(-E2BIG);

4179
	obj = i915_gem_object_alloc(dev);
4180
	if (obj == NULL)
4181
		return ERR_PTR(-ENOMEM);
4182

4183 4184 4185
	ret = drm_gem_object_init(dev, &obj->base, size);
	if (ret)
		goto fail;
4186

4187
	mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4188
	if (IS_CRESTLINE(dev_priv) || IS_BROADWATER(dev_priv)) {
4189 4190 4191 4192 4193
		/* 965gm cannot relocate objects above 4GiB. */
		mask &= ~__GFP_HIGHMEM;
		mask |= __GFP_DMA32;
	}

4194
	mapping = obj->base.filp->f_mapping;
4195
	mapping_set_gfp_mask(mapping, mask);
4196

4197
	i915_gem_object_init(obj, &i915_gem_object_ops);
4198

4199 4200
	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4201

4202 4203
	if (HAS_LLC(dev)) {
		/* On some devices, we can have the GPU use the LLC (the CPU
4204 4205 4206 4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218
		 * cache) for about a 10% performance improvement
		 * compared to uncached.  Graphics requests other than
		 * display scanout are coherent with the CPU in
		 * accessing this cache.  This means in this mode we
		 * don't need to clflush on the CPU side, and on the
		 * GPU side we only need to flush internal caches to
		 * get data visible to the CPU.
		 *
		 * However, we maintain the display planes as UC, and so
		 * need to rebind when first used as such.
		 */
		obj->cache_level = I915_CACHE_LLC;
	} else
		obj->cache_level = I915_CACHE_NONE;

4219 4220
	trace_i915_gem_object_create(obj);

4221
	return obj;
4222 4223 4224 4225

fail:
	i915_gem_object_free(obj);
	return ERR_PTR(ret);
4226 4227
}

4228 4229 4230 4231 4232 4233 4234 4235
static bool discard_backing_storage(struct drm_i915_gem_object *obj)
{
	/* If we are the last user of the backing storage (be it shmemfs
	 * pages or stolen etc), we know that the pages are going to be
	 * immediately released. In this case, we can then skip copying
	 * back the contents from the GPU.
	 */

C
Chris Wilson 已提交
4236
	if (obj->mm.madv != I915_MADV_WILLNEED)
4237 4238 4239 4240 4241 4242 4243 4244 4245 4246 4247 4248 4249 4250 4251
		return false;

	if (obj->base.filp == NULL)
		return true;

	/* At first glance, this looks racy, but then again so would be
	 * userspace racing mmap against close. However, the first external
	 * reference to the filp can only be obtained through the
	 * i915_gem_mmap_ioctl() which safeguards us against the user
	 * acquiring such a reference whilst we are in the middle of
	 * freeing the object.
	 */
	return atomic_long_read(&obj->base.filp->f_count) == 1;
}

4252 4253
static void __i915_gem_free_objects(struct drm_i915_private *i915,
				    struct llist_node *freed)
4254
{
4255
	struct drm_i915_gem_object *obj, *on;
4256

4257 4258 4259 4260 4261 4262 4263 4264 4265 4266 4267 4268 4269 4270 4271
	mutex_lock(&i915->drm.struct_mutex);
	intel_runtime_pm_get(i915);
	llist_for_each_entry(obj, freed, freed) {
		struct i915_vma *vma, *vn;

		trace_i915_gem_object_destroy(obj);

		GEM_BUG_ON(i915_gem_object_is_active(obj));
		list_for_each_entry_safe(vma, vn,
					 &obj->vma_list, obj_link) {
			GEM_BUG_ON(!i915_vma_is_ggtt(vma));
			GEM_BUG_ON(i915_vma_is_active(vma));
			vma->flags &= ~I915_VMA_PIN_MASK;
			i915_vma_close(vma);
		}
4272 4273
		GEM_BUG_ON(!list_empty(&obj->vma_list));
		GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
4274

4275
		list_del(&obj->global_link);
4276 4277 4278 4279 4280 4281 4282 4283 4284 4285
	}
	intel_runtime_pm_put(i915);
	mutex_unlock(&i915->drm.struct_mutex);

	llist_for_each_entry_safe(obj, on, freed, freed) {
		GEM_BUG_ON(obj->bind_count);
		GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));

		if (obj->ops->release)
			obj->ops->release(obj);
4286

4287 4288
		if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
			atomic_set(&obj->mm.pages_pin_count, 0);
4289
		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
4290 4291 4292 4293 4294
		GEM_BUG_ON(obj->mm.pages);

		if (obj->base.import_attach)
			drm_prime_gem_destroy(&obj->base, NULL);

4295
		reservation_object_fini(&obj->__builtin_resv);
4296 4297 4298 4299 4300 4301 4302 4303 4304 4305 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317
		drm_gem_object_release(&obj->base);
		i915_gem_info_remove_obj(i915, obj->base.size);

		kfree(obj->bit_17);
		i915_gem_object_free(obj);
	}
}

static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
{
	struct llist_node *freed;

	freed = llist_del_all(&i915->mm.free_list);
	if (unlikely(freed))
		__i915_gem_free_objects(i915, freed);
}

static void __i915_gem_free_work(struct work_struct *work)
{
	struct drm_i915_private *i915 =
		container_of(work, struct drm_i915_private, mm.free_work);
	struct llist_node *freed;
4318

4319 4320 4321 4322 4323 4324 4325
	/* All file-owned VMA should have been released by this point through
	 * i915_gem_close_object(), or earlier by i915_gem_context_close().
	 * However, the object may also be bound into the global GTT (e.g.
	 * older GPUs without per-process support, or for direct access through
	 * the GTT either for the user or for scanout). Those VMA still need to
	 * unbound now.
	 */
4326

4327 4328 4329
	while ((freed = llist_del_all(&i915->mm.free_list)))
		__i915_gem_free_objects(i915, freed);
}
4330

4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342 4343 4344
static void __i915_gem_free_object_rcu(struct rcu_head *head)
{
	struct drm_i915_gem_object *obj =
		container_of(head, typeof(*obj), rcu);
	struct drm_i915_private *i915 = to_i915(obj->base.dev);

	/* We can't simply use call_rcu() from i915_gem_free_object()
	 * as we need to block whilst unbinding, and the call_rcu
	 * task may be called from softirq context. So we take a
	 * detour through a worker.
	 */
	if (llist_add(&obj->freed, &i915->mm.free_list))
		schedule_work(&i915->mm.free_work);
}
4345

4346 4347 4348
void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
C
Chris Wilson 已提交
4349

4350 4351 4352
	if (obj->mm.quirked)
		__i915_gem_object_unpin_pages(obj);

4353
	if (discard_backing_storage(obj))
C
Chris Wilson 已提交
4354
		obj->mm.madv = I915_MADV_DONTNEED;
4355

4356 4357 4358 4359 4360 4361
	/* Before we free the object, make sure any pure RCU-only
	 * read-side critical sections are complete, e.g.
	 * i915_gem_busy_ioctl(). For the corresponding synchronized
	 * lookup see i915_gem_object_lookup_rcu().
	 */
	call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
4362 4363
}

4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374
void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
{
	lockdep_assert_held(&obj->base.dev->struct_mutex);

	GEM_BUG_ON(i915_gem_object_has_active_reference(obj));
	if (i915_gem_object_is_active(obj))
		i915_gem_object_set_active_reference(obj);
	else
		i915_gem_object_put(obj);
}

4375 4376 4377 4378 4379 4380 4381 4382 4383
static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	for_each_engine(engine, dev_priv, id)
		GEM_BUG_ON(engine->last_context != dev_priv->kernel_context);
}

4384
int i915_gem_suspend(struct drm_device *dev)
4385
{
4386
	struct drm_i915_private *dev_priv = to_i915(dev);
4387
	int ret;
4388

4389 4390
	intel_suspend_gt_powersave(dev_priv);

4391
	mutex_lock(&dev->struct_mutex);
4392 4393 4394 4395 4396 4397 4398 4399 4400 4401 4402 4403 4404

	/* We have to flush all the executing contexts to main memory so
	 * that they can saved in the hibernation image. To ensure the last
	 * context image is coherent, we have to switch away from it. That
	 * leaves the dev_priv->kernel_context still active when
	 * we actually suspend, and its image in memory may not match the GPU
	 * state. Fortunately, the kernel_context is disposable and we do
	 * not rely on its state.
	 */
	ret = i915_gem_switch_to_kernel_context(dev_priv);
	if (ret)
		goto err;

4405 4406 4407
	ret = i915_gem_wait_for_idle(dev_priv,
				     I915_WAIT_INTERRUPTIBLE |
				     I915_WAIT_LOCKED);
4408
	if (ret)
4409
		goto err;
4410

4411
	i915_gem_retire_requests(dev_priv);
4412
	GEM_BUG_ON(dev_priv->gt.active_requests);
4413

4414
	assert_kernel_context_is_current(dev_priv);
4415
	i915_gem_context_lost(dev_priv);
4416 4417
	mutex_unlock(&dev->struct_mutex);

4418
	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4419 4420
	cancel_delayed_work_sync(&dev_priv->gt.retire_work);
	flush_delayed_work(&dev_priv->gt.idle_work);
4421
	flush_work(&dev_priv->mm.free_work);
4422

4423 4424 4425
	/* Assert that we sucessfully flushed all the work and
	 * reset the GPU back to its idle, low power state.
	 */
4426
	WARN_ON(dev_priv->gt.awake);
4427

4428 4429 4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450 4451
	/*
	 * Neither the BIOS, ourselves or any other kernel
	 * expects the system to be in execlists mode on startup,
	 * so we need to reset the GPU back to legacy mode. And the only
	 * known way to disable logical contexts is through a GPU reset.
	 *
	 * So in order to leave the system in a known default configuration,
	 * always reset the GPU upon unload and suspend. Afterwards we then
	 * clean up the GEM state tracking, flushing off the requests and
	 * leaving the system in a known idle state.
	 *
	 * Note that is of the upmost importance that the GPU is idle and
	 * all stray writes are flushed *before* we dismantle the backing
	 * storage for the pinned objects.
	 *
	 * However, since we are uncertain that resetting the GPU on older
	 * machines is a good idea, we don't - just in case it leaves the
	 * machine in an unusable condition.
	 */
	if (HAS_HW_CONTEXTS(dev)) {
		int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
		WARN_ON(reset && reset != -ENODEV);
	}

4452
	return 0;
4453 4454 4455 4456

err:
	mutex_unlock(&dev->struct_mutex);
	return ret;
4457 4458
}

4459 4460 4461 4462 4463 4464 4465 4466 4467 4468 4469
void i915_gem_resume(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = to_i915(dev);

	mutex_lock(&dev->struct_mutex);
	i915_gem_restore_gtt_mappings(dev);

	/* As we didn't flush the kernel context before suspend, we cannot
	 * guarantee that the context image is complete. So let's just reset
	 * it and start again.
	 */
4470
	dev_priv->gt.resume(dev_priv);
4471 4472 4473 4474

	mutex_unlock(&dev->struct_mutex);
}

4475 4476
void i915_gem_init_swizzling(struct drm_device *dev)
{
4477
	struct drm_i915_private *dev_priv = to_i915(dev);
4478

4479
	if (INTEL_INFO(dev)->gen < 5 ||
4480 4481 4482 4483 4484 4485
	    dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
		return;

	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
				 DISP_TILE_SURFACE_SWIZZLING);

4486
	if (IS_GEN5(dev_priv))
4487 4488
		return;

4489
	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4490
	if (IS_GEN6(dev_priv))
4491
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4492
	else if (IS_GEN7(dev_priv))
4493
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4494
	else if (IS_GEN8(dev_priv))
B
Ben Widawsky 已提交
4495
		I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4496 4497
	else
		BUG();
4498
}
D
Daniel Vetter 已提交
4499

4500
static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
4501 4502 4503 4504 4505 4506 4507
{
	I915_WRITE(RING_CTL(base), 0);
	I915_WRITE(RING_HEAD(base), 0);
	I915_WRITE(RING_TAIL(base), 0);
	I915_WRITE(RING_START(base), 0);
}

4508
static void init_unused_rings(struct drm_i915_private *dev_priv)
4509
{
4510 4511 4512 4513 4514 4515 4516 4517 4518 4519 4520 4521
	if (IS_I830(dev_priv)) {
		init_unused_ring(dev_priv, PRB1_BASE);
		init_unused_ring(dev_priv, SRB0_BASE);
		init_unused_ring(dev_priv, SRB1_BASE);
		init_unused_ring(dev_priv, SRB2_BASE);
		init_unused_ring(dev_priv, SRB3_BASE);
	} else if (IS_GEN2(dev_priv)) {
		init_unused_ring(dev_priv, SRB0_BASE);
		init_unused_ring(dev_priv, SRB1_BASE);
	} else if (IS_GEN3(dev_priv)) {
		init_unused_ring(dev_priv, PRB1_BASE);
		init_unused_ring(dev_priv, PRB2_BASE);
4522 4523 4524
	}
}

4525 4526 4527
int
i915_gem_init_hw(struct drm_device *dev)
{
4528
	struct drm_i915_private *dev_priv = to_i915(dev);
4529
	struct intel_engine_cs *engine;
4530
	enum intel_engine_id id;
C
Chris Wilson 已提交
4531
	int ret;
4532

4533 4534
	dev_priv->gt.last_init_time = ktime_get();

4535 4536 4537
	/* Double layer security blanket, see i915_gem_init() */
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

4538
	if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
4539
		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4540

4541
	if (IS_HASWELL(dev_priv))
4542
		I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
4543
			   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4544

4545
	if (HAS_PCH_NOP(dev_priv)) {
4546
		if (IS_IVYBRIDGE(dev_priv)) {
4547 4548 4549 4550 4551 4552 4553 4554
			u32 temp = I915_READ(GEN7_MSG_CTL);
			temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
			I915_WRITE(GEN7_MSG_CTL, temp);
		} else if (INTEL_INFO(dev)->gen >= 7) {
			u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
			temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
			I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
		}
4555 4556
	}

4557 4558
	i915_gem_init_swizzling(dev);

4559 4560 4561 4562 4563 4564
	/*
	 * At least 830 can leave some of the unused rings
	 * "active" (ie. head != tail) after resume which
	 * will prevent c3 entry. Makes sure all unused rings
	 * are totally idle.
	 */
4565
	init_unused_rings(dev_priv);
4566

4567
	BUG_ON(!dev_priv->kernel_context);
4568

4569 4570 4571 4572 4573 4574 4575
	ret = i915_ppgtt_init_hw(dev);
	if (ret) {
		DRM_ERROR("PPGTT enable HW failed %d\n", ret);
		goto out;
	}

	/* Need to do basic initialisation of all rings first: */
4576
	for_each_engine(engine, dev_priv, id) {
4577
		ret = engine->init_hw(engine);
D
Daniel Vetter 已提交
4578
		if (ret)
4579
			goto out;
D
Daniel Vetter 已提交
4580
	}
4581

4582 4583
	intel_mocs_init_l3cc_table(dev);

4584
	/* We can't enable contexts until all firmware is loaded */
4585 4586 4587
	ret = intel_guc_setup(dev);
	if (ret)
		goto out;
4588

4589 4590
out:
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4591
	return ret;
4592 4593
}

4594 4595 4596 4597 4598 4599 4600 4601 4602 4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614
bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
{
	if (INTEL_INFO(dev_priv)->gen < 6)
		return false;

	/* TODO: make semaphores and Execlists play nicely together */
	if (i915.enable_execlists)
		return false;

	if (value >= 0)
		return value;

#ifdef CONFIG_INTEL_IOMMU
	/* Enable semaphores on SNB when IO remapping is off */
	if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
		return false;
#endif

	return true;
}

4615 4616
int i915_gem_init(struct drm_device *dev)
{
4617
	struct drm_i915_private *dev_priv = to_i915(dev);
4618 4619 4620
	int ret;

	mutex_lock(&dev->struct_mutex);
4621

4622
	if (!i915.enable_execlists) {
4623
		dev_priv->gt.resume = intel_legacy_submission_resume;
4624
		dev_priv->gt.cleanup_engine = intel_engine_cleanup;
4625
	} else {
4626
		dev_priv->gt.resume = intel_lr_context_resume;
4627
		dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
4628 4629
	}

4630 4631 4632 4633 4634 4635 4636 4637
	/* This is just a security blanket to placate dragons.
	 * On some systems, we very sporadically observe that the first TLBs
	 * used by the CS may be stale, despite us poking the TLB reset. If
	 * we hold the forcewake during initialisation these problems
	 * just magically go away.
	 */
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

4638
	i915_gem_init_userptr(dev_priv);
4639 4640 4641 4642

	ret = i915_gem_init_ggtt(dev_priv);
	if (ret)
		goto out_unlock;
4643

4644
	ret = i915_gem_context_init(dev);
4645 4646
	if (ret)
		goto out_unlock;
4647

4648
	ret = intel_engines_init(dev);
D
Daniel Vetter 已提交
4649
	if (ret)
4650
		goto out_unlock;
4651

4652
	ret = i915_gem_init_hw(dev);
4653
	if (ret == -EIO) {
4654
		/* Allow engine initialisation to fail by marking the GPU as
4655 4656 4657 4658
		 * wedged. But we only want to do this where the GPU is angry,
		 * for all other failure, such as an allocation failure, bail.
		 */
		DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4659
		i915_gem_set_wedged(dev_priv);
4660
		ret = 0;
4661
	}
4662 4663

out_unlock:
4664
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4665
	mutex_unlock(&dev->struct_mutex);
4666

4667
	return ret;
4668 4669
}

4670
void
4671
i915_gem_cleanup_engines(struct drm_device *dev)
4672
{
4673
	struct drm_i915_private *dev_priv = to_i915(dev);
4674
	struct intel_engine_cs *engine;
4675
	enum intel_engine_id id;
4676

4677
	for_each_engine(engine, dev_priv, id)
4678
		dev_priv->gt.cleanup_engine(engine);
4679 4680
}

4681 4682 4683
void
i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
4684
	struct drm_device *dev = &dev_priv->drm;
4685
	int i;
4686 4687 4688 4689 4690 4691 4692 4693 4694 4695

	if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
	    !IS_CHERRYVIEW(dev_priv))
		dev_priv->num_fence_regs = 32;
	else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
		 IS_I945GM(dev_priv) || IS_G33(dev_priv))
		dev_priv->num_fence_regs = 16;
	else
		dev_priv->num_fence_regs = 8;

4696
	if (intel_vgpu_active(dev_priv))
4697 4698 4699 4700
		dev_priv->num_fence_regs =
				I915_READ(vgtif_reg(avail_rs.fence_num));

	/* Initialize fence registers to zero */
4701 4702 4703 4704 4705 4706 4707
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
		struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];

		fence->i915 = dev_priv;
		fence->id = i;
		list_add_tail(&fence->link, &dev_priv->mm.fence_list);
	}
4708 4709 4710 4711 4712
	i915_gem_restore_fences(dev);

	i915_gem_detect_bit_6_swizzle(dev);
}

4713
int
4714
i915_gem_load_init(struct drm_device *dev)
4715
{
4716
	struct drm_i915_private *dev_priv = to_i915(dev);
4717
	int err;
4718

4719
	dev_priv->objects =
4720 4721 4722 4723
		kmem_cache_create("i915_gem_object",
				  sizeof(struct drm_i915_gem_object), 0,
				  SLAB_HWCACHE_ALIGN,
				  NULL);
4724 4725 4726 4727 4728
	if (!dev_priv->objects) {
		err = -ENOMEM;
		goto err_out;
	}

4729 4730 4731 4732 4733
	dev_priv->vmas =
		kmem_cache_create("i915_gem_vma",
				  sizeof(struct i915_vma), 0,
				  SLAB_HWCACHE_ALIGN,
				  NULL);
4734 4735 4736 4737 4738
	if (!dev_priv->vmas) {
		err = -ENOMEM;
		goto err_objects;
	}

4739 4740 4741
	dev_priv->requests =
		kmem_cache_create("i915_gem_request",
				  sizeof(struct drm_i915_gem_request), 0,
4742 4743 4744
				  SLAB_HWCACHE_ALIGN |
				  SLAB_RECLAIM_ACCOUNT |
				  SLAB_DESTROY_BY_RCU,
4745
				  NULL);
4746 4747 4748 4749 4750 4751 4752 4753 4754 4755 4756 4757 4758
	if (!dev_priv->requests) {
		err = -ENOMEM;
		goto err_vmas;
	}

	mutex_lock(&dev_priv->drm.struct_mutex);
	INIT_LIST_HEAD(&dev_priv->gt.timelines);
	err = i915_gem_timeline_init(dev_priv,
				     &dev_priv->gt.global_timeline,
				     "[execution]");
	mutex_unlock(&dev_priv->drm.struct_mutex);
	if (err)
		goto err_requests;
4759

4760
	INIT_LIST_HEAD(&dev_priv->context_list);
4761 4762
	INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
	init_llist_head(&dev_priv->mm.free_list);
C
Chris Wilson 已提交
4763 4764
	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4765
	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4766
	INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
4767
	INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
4768
			  i915_gem_retire_work_handler);
4769
	INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
4770
			  i915_gem_idle_work_handler);
4771
	init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
4772
	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4773

4774 4775
	dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;

4776
	init_waitqueue_head(&dev_priv->pending_flip_queue);
4777

4778 4779
	dev_priv->mm.interruptible = true;

4780 4781
	atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);

4782
	spin_lock_init(&dev_priv->fb_tracking.lock);
4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793

	return 0;

err_requests:
	kmem_cache_destroy(dev_priv->requests);
err_vmas:
	kmem_cache_destroy(dev_priv->vmas);
err_objects:
	kmem_cache_destroy(dev_priv->objects);
err_out:
	return err;
4794
}
4795

4796 4797 4798 4799
void i915_gem_load_cleanup(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = to_i915(dev);

4800 4801
	WARN_ON(!llist_empty(&dev_priv->mm.free_list));

4802 4803 4804
	kmem_cache_destroy(dev_priv->requests);
	kmem_cache_destroy(dev_priv->vmas);
	kmem_cache_destroy(dev_priv->objects);
4805 4806 4807

	/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
	rcu_barrier();
4808 4809
}

4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822
int i915_gem_freeze(struct drm_i915_private *dev_priv)
{
	intel_runtime_pm_get(dev_priv);

	mutex_lock(&dev_priv->drm.struct_mutex);
	i915_gem_shrink_all(dev_priv);
	mutex_unlock(&dev_priv->drm.struct_mutex);

	intel_runtime_pm_put(dev_priv);

	return 0;
}

4823 4824 4825
int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
{
	struct drm_i915_gem_object *obj;
4826 4827 4828 4829 4830
	struct list_head *phases[] = {
		&dev_priv->mm.unbound_list,
		&dev_priv->mm.bound_list,
		NULL
	}, **p;
4831 4832 4833 4834 4835 4836 4837 4838 4839 4840

	/* Called just before we write the hibernation image.
	 *
	 * We need to update the domain tracking to reflect that the CPU
	 * will be accessing all the pages to create and restore from the
	 * hibernation, and so upon restoration those pages will be in the
	 * CPU domain.
	 *
	 * To make sure the hibernation image contains the latest state,
	 * we update that state just before writing out the image.
4841 4842 4843
	 *
	 * To try and reduce the hibernation image, we manually shrink
	 * the objects as well.
4844 4845
	 */

4846 4847
	mutex_lock(&dev_priv->drm.struct_mutex);
	i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
4848

4849
	for (p = phases; *p; p++) {
4850
		list_for_each_entry(obj, *p, global_link) {
4851 4852 4853
			obj->base.read_domains = I915_GEM_DOMAIN_CPU;
			obj->base.write_domain = I915_GEM_DOMAIN_CPU;
		}
4854
	}
4855
	mutex_unlock(&dev_priv->drm.struct_mutex);
4856 4857 4858 4859

	return 0;
}

4860
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4861
{
4862
	struct drm_i915_file_private *file_priv = file->driver_priv;
4863
	struct drm_i915_gem_request *request;
4864 4865 4866 4867 4868

	/* Clean up our request list when the client is going away, so that
	 * later retire_requests won't dereference our soon-to-be-gone
	 * file_priv.
	 */
4869
	spin_lock(&file_priv->mm.lock);
4870
	list_for_each_entry(request, &file_priv->mm.request_list, client_list)
4871
		request->file_priv = NULL;
4872
	spin_unlock(&file_priv->mm.lock);
4873

4874
	if (!list_empty(&file_priv->rps.link)) {
4875
		spin_lock(&to_i915(dev)->rps.client_lock);
4876
		list_del(&file_priv->rps.link);
4877
		spin_unlock(&to_i915(dev)->rps.client_lock);
4878
	}
4879 4880 4881 4882 4883
}

int i915_gem_open(struct drm_device *dev, struct drm_file *file)
{
	struct drm_i915_file_private *file_priv;
4884
	int ret;
4885 4886 4887 4888 4889 4890 4891 4892

	DRM_DEBUG_DRIVER("\n");

	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
	if (!file_priv)
		return -ENOMEM;

	file->driver_priv = file_priv;
4893
	file_priv->dev_priv = to_i915(dev);
4894
	file_priv->file = file;
4895
	INIT_LIST_HEAD(&file_priv->rps.link);
4896 4897 4898 4899

	spin_lock_init(&file_priv->mm.lock);
	INIT_LIST_HEAD(&file_priv->mm.request_list);

4900
	file_priv->bsd_engine = -1;
4901

4902 4903 4904
	ret = i915_gem_context_open(dev, file);
	if (ret)
		kfree(file_priv);
4905

4906
	return ret;
4907 4908
}

4909 4910
/**
 * i915_gem_track_fb - update frontbuffer tracking
4911 4912 4913
 * @old: current GEM buffer for the frontbuffer slots
 * @new: new GEM buffer for the frontbuffer slots
 * @frontbuffer_bits: bitmask of frontbuffer slots
4914 4915 4916 4917
 *
 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
 * from @old and setting them in @new. Both @old and @new can be NULL.
 */
4918 4919 4920 4921
void i915_gem_track_fb(struct drm_i915_gem_object *old,
		       struct drm_i915_gem_object *new,
		       unsigned frontbuffer_bits)
{
4922 4923 4924 4925 4926 4927 4928 4929 4930
	/* Control of individual bits within the mask are guarded by
	 * the owning plane->mutex, i.e. we can never see concurrent
	 * manipulation of individual bits. But since the bitfield as a whole
	 * is updated using RMW, we need to use atomics in order to update
	 * the bits.
	 */
	BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
		     sizeof(atomic_t) * BITS_PER_BYTE);

4931
	if (old) {
4932 4933
		WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
		atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
4934 4935 4936
	}

	if (new) {
4937 4938
		WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
		atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
4939 4940 4941
	}
}

4942 4943 4944 4945 4946 4947 4948 4949 4950 4951
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
i915_gem_object_create_from_data(struct drm_device *dev,
			         const void *data, size_t size)
{
	struct drm_i915_gem_object *obj;
	struct sg_table *sg;
	size_t bytes;
	int ret;

4952
	obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
4953
	if (IS_ERR(obj))
4954 4955 4956 4957 4958 4959
		return obj;

	ret = i915_gem_object_set_to_cpu_domain(obj, true);
	if (ret)
		goto fail;

C
Chris Wilson 已提交
4960
	ret = i915_gem_object_pin_pages(obj);
4961 4962 4963
	if (ret)
		goto fail;

C
Chris Wilson 已提交
4964
	sg = obj->mm.pages;
4965
	bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
C
Chris Wilson 已提交
4966
	obj->mm.dirty = true; /* Backing store is now out of date */
4967 4968 4969 4970 4971 4972 4973 4974 4975 4976 4977
	i915_gem_object_unpin_pages(obj);

	if (WARN_ON(bytes != size)) {
		DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
		ret = -EFAULT;
		goto fail;
	}

	return obj;

fail:
4978
	i915_gem_object_put(obj);
4979 4980
	return ERR_PTR(ret);
}
4981 4982 4983 4984 4985 4986

struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
		       unsigned int n,
		       unsigned int *offset)
{
C
Chris Wilson 已提交
4987
	struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
4988 4989 4990 4991 4992
	struct scatterlist *sg;
	unsigned int idx, count;

	might_sleep();
	GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
C
Chris Wilson 已提交
4993
	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
4994 4995 4996 4997 4998 4999 5000 5001 5002 5003 5004 5005 5006 5007 5008 5009 5010 5011 5012 5013 5014 5015 5016 5017 5018 5019 5020 5021 5022 5023 5024 5025 5026 5027 5028 5029 5030 5031 5032 5033 5034 5035 5036 5037 5038 5039 5040 5041 5042 5043 5044 5045 5046 5047 5048 5049 5050 5051 5052 5053 5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073 5074 5075 5076 5077 5078 5079 5080 5081 5082 5083 5084 5085 5086 5087 5088 5089 5090 5091 5092 5093 5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104 5105 5106 5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117

	/* As we iterate forward through the sg, we record each entry in a
	 * radixtree for quick repeated (backwards) lookups. If we have seen
	 * this index previously, we will have an entry for it.
	 *
	 * Initial lookup is O(N), but this is amortized to O(1) for
	 * sequential page access (where each new request is consecutive
	 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
	 * i.e. O(1) with a large constant!
	 */
	if (n < READ_ONCE(iter->sg_idx))
		goto lookup;

	mutex_lock(&iter->lock);

	/* We prefer to reuse the last sg so that repeated lookup of this
	 * (or the subsequent) sg are fast - comparing against the last
	 * sg is faster than going through the radixtree.
	 */

	sg = iter->sg_pos;
	idx = iter->sg_idx;
	count = __sg_page_count(sg);

	while (idx + count <= n) {
		unsigned long exception, i;
		int ret;

		/* If we cannot allocate and insert this entry, or the
		 * individual pages from this range, cancel updating the
		 * sg_idx so that on this lookup we are forced to linearly
		 * scan onwards, but on future lookups we will try the
		 * insertion again (in which case we need to be careful of
		 * the error return reporting that we have already inserted
		 * this index).
		 */
		ret = radix_tree_insert(&iter->radix, idx, sg);
		if (ret && ret != -EEXIST)
			goto scan;

		exception =
			RADIX_TREE_EXCEPTIONAL_ENTRY |
			idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
		for (i = 1; i < count; i++) {
			ret = radix_tree_insert(&iter->radix, idx + i,
						(void *)exception);
			if (ret && ret != -EEXIST)
				goto scan;
		}

		idx += count;
		sg = ____sg_next(sg);
		count = __sg_page_count(sg);
	}

scan:
	iter->sg_pos = sg;
	iter->sg_idx = idx;

	mutex_unlock(&iter->lock);

	if (unlikely(n < idx)) /* insertion completed by another thread */
		goto lookup;

	/* In case we failed to insert the entry into the radixtree, we need
	 * to look beyond the current sg.
	 */
	while (idx + count <= n) {
		idx += count;
		sg = ____sg_next(sg);
		count = __sg_page_count(sg);
	}

	*offset = n - idx;
	return sg;

lookup:
	rcu_read_lock();

	sg = radix_tree_lookup(&iter->radix, n);
	GEM_BUG_ON(!sg);

	/* If this index is in the middle of multi-page sg entry,
	 * the radixtree will contain an exceptional entry that points
	 * to the start of that range. We will return the pointer to
	 * the base page and the offset of this page within the
	 * sg entry's range.
	 */
	*offset = 0;
	if (unlikely(radix_tree_exception(sg))) {
		unsigned long base =
			(unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;

		sg = radix_tree_lookup(&iter->radix, base);
		GEM_BUG_ON(!sg);

		*offset = n - base;
	}

	rcu_read_unlock();

	return sg;
}

struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
{
	struct scatterlist *sg;
	unsigned int offset;

	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));

	sg = i915_gem_object_get_sg(obj, n, &offset);
	return nth_page(sg_page(sg), offset);
}

/* Like i915_gem_object_get_page(), but mark the returned page dirty */
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
			       unsigned int n)
{
	struct page *page;

	page = i915_gem_object_get_page(obj, n);
C
Chris Wilson 已提交
5118
	if (!obj->mm.dirty)
5119 5120 5121 5122 5123 5124 5125 5126 5127 5128 5129 5130 5131 5132 5133
		set_page_dirty(page);

	return page;
}

dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
				unsigned long n)
{
	struct scatterlist *sg;
	unsigned int offset;

	sg = i915_gem_object_get_sg(obj, n, &offset);
	return sg_dma_address(sg) + (offset << PAGE_SHIFT);
}