i915_gem.c 131.4 KB
Newer Older
1
/*
2
 * Copyright © 2008-2015 Intel Corporation
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *
 */

28
#include <drm/drmP.h>
29
#include <drm/drm_vma_manager.h>
30
#include <drm/i915_drm.h>
31
#include "i915_drv.h"
32
#include "i915_vgpu.h"
C
Chris Wilson 已提交
33
#include "i915_trace.h"
34
#include "intel_drv.h"
35
#include "intel_frontbuffer.h"
36
#include "intel_mocs.h"
37
#include <linux/dma-fence-array.h>
38
#include <linux/reservation.h>
39
#include <linux/shmem_fs.h>
40
#include <linux/slab.h>
41
#include <linux/stop_machine.h>
42
#include <linux/swap.h>
J
Jesse Barnes 已提交
43
#include <linux/pci.h>
44
#include <linux/dma-buf.h>
45

46
static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
47
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
48
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
49

50 51 52
static bool cpu_cache_is_coherent(struct drm_device *dev,
				  enum i915_cache_level level)
{
53
	return HAS_LLC(to_i915(dev)) || level != I915_CACHE_NONE;
54 55
}

56 57
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
58 59 60
	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
		return false;

61 62 63 64 65 66
	if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
		return true;

	return obj->pin_display;
}

67
static int
68
insert_mappable_node(struct i915_ggtt *ggtt,
69 70 71
                     struct drm_mm_node *node, u32 size)
{
	memset(node, 0, sizeof(*node));
72
	return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node,
73 74
						   size, 0,
						   I915_COLOR_UNEVICTABLE,
75
						   0, ggtt->mappable_end,
76 77 78 79 80 81 82 83 84 85
						   DRM_MM_SEARCH_DEFAULT,
						   DRM_MM_CREATE_DEFAULT);
}

static void
remove_mappable_node(struct drm_mm_node *node)
{
	drm_mm_remove_node(node);
}

86 87
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
88
				  u64 size)
89
{
90
	spin_lock(&dev_priv->mm.object_stat_lock);
91 92
	dev_priv->mm.object_count++;
	dev_priv->mm.object_memory += size;
93
	spin_unlock(&dev_priv->mm.object_stat_lock);
94 95 96
}

static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
97
				     u64 size)
98
{
99
	spin_lock(&dev_priv->mm.object_stat_lock);
100 101
	dev_priv->mm.object_count--;
	dev_priv->mm.object_memory -= size;
102
	spin_unlock(&dev_priv->mm.object_stat_lock);
103 104
}

105
static int
106
i915_gem_wait_for_error(struct i915_gpu_error *error)
107 108 109
{
	int ret;

110 111
	might_sleep();

112
	if (!i915_reset_in_progress(error))
113 114
		return 0;

115 116 117 118 119
	/*
	 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
	 * userspace. If it takes that long something really bad is going on and
	 * we should simply try to bail out and fail as gracefully as possible.
	 */
120
	ret = wait_event_interruptible_timeout(error->reset_queue,
121
					       !i915_reset_in_progress(error),
122
					       I915_RESET_TIMEOUT);
123 124 125 126
	if (ret == 0) {
		DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
		return -EIO;
	} else if (ret < 0) {
127
		return ret;
128 129
	} else {
		return 0;
130
	}
131 132
}

133
int i915_mutex_lock_interruptible(struct drm_device *dev)
134
{
135
	struct drm_i915_private *dev_priv = to_i915(dev);
136 137
	int ret;

138
	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
139 140 141 142 143 144 145 146 147
	if (ret)
		return ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	return 0;
}
148

149 150
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
151
			    struct drm_file *file)
152
{
153
	struct drm_i915_private *dev_priv = to_i915(dev);
154
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
155
	struct drm_i915_gem_get_aperture *args = data;
156
	struct i915_vma *vma;
157
	size_t pinned;
158

159
	pinned = 0;
160
	mutex_lock(&dev->struct_mutex);
161
	list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
162
		if (i915_vma_is_pinned(vma))
163
			pinned += vma->node.size;
164
	list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
165
		if (i915_vma_is_pinned(vma))
166
			pinned += vma->node.size;
167
	mutex_unlock(&dev->struct_mutex);
168

169
	args->aper_size = ggtt->base.total;
170
	args->aper_available_size = args->aper_size - pinned;
171

172 173 174
	return 0;
}

175
static struct sg_table *
176
i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
177
{
178
	struct address_space *mapping = obj->base.filp->f_mapping;
179
	drm_dma_handle_t *phys;
180 181
	struct sg_table *st;
	struct scatterlist *sg;
182
	char *vaddr;
183
	int i;
184

185
	if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
186
		return ERR_PTR(-EINVAL);
187

188 189 190 191 192 193 194 195 196 197 198
	/* Always aligning to the object size, allows a single allocation
	 * to handle all possible callers, and given typical object sizes,
	 * the alignment of the buddy allocation will naturally match.
	 */
	phys = drm_pci_alloc(obj->base.dev,
			     obj->base.size,
			     roundup_pow_of_two(obj->base.size));
	if (!phys)
		return ERR_PTR(-ENOMEM);

	vaddr = phys->vaddr;
199 200 201 202 203
	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
		struct page *page;
		char *src;

		page = shmem_read_mapping_page(mapping, i);
204 205 206 207
		if (IS_ERR(page)) {
			st = ERR_CAST(page);
			goto err_phys;
		}
208 209 210 211 212 213

		src = kmap_atomic(page);
		memcpy(vaddr, src, PAGE_SIZE);
		drm_clflush_virt_range(vaddr, PAGE_SIZE);
		kunmap_atomic(src);

214
		put_page(page);
215 216 217
		vaddr += PAGE_SIZE;
	}

218
	i915_gem_chipset_flush(to_i915(obj->base.dev));
219 220

	st = kmalloc(sizeof(*st), GFP_KERNEL);
221 222 223 224
	if (!st) {
		st = ERR_PTR(-ENOMEM);
		goto err_phys;
	}
225 226 227

	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
		kfree(st);
228 229
		st = ERR_PTR(-ENOMEM);
		goto err_phys;
230 231 232 233 234
	}

	sg = st->sgl;
	sg->offset = 0;
	sg->length = obj->base.size;
235

236
	sg_dma_address(sg) = phys->busaddr;
237 238
	sg_dma_len(sg) = obj->base.size;

239 240 241 242 243
	obj->phys_handle = phys;
	return st;

err_phys:
	drm_pci_free(obj->base.dev, phys);
244
	return st;
245 246 247
}

static void
248
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
249 250
				struct sg_table *pages,
				bool needs_clflush)
251
{
C
Chris Wilson 已提交
252
	GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
253

C
Chris Wilson 已提交
254 255
	if (obj->mm.madv == I915_MADV_DONTNEED)
		obj->mm.dirty = false;
256

257 258
	if (needs_clflush &&
	    (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
259
	    !cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
260
		drm_clflush_sg(pages);
261 262 263 264 265 266 267 268 269

	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}

static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
			       struct sg_table *pages)
{
270
	__i915_gem_object_release_shmem(obj, pages, false);
271

C
Chris Wilson 已提交
272
	if (obj->mm.dirty) {
273
		struct address_space *mapping = obj->base.filp->f_mapping;
274
		char *vaddr = obj->phys_handle->vaddr;
275 276 277
		int i;

		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
278 279 280 281 282 283 284 285 286 287 288 289 290
			struct page *page;
			char *dst;

			page = shmem_read_mapping_page(mapping, i);
			if (IS_ERR(page))
				continue;

			dst = kmap_atomic(page);
			drm_clflush_virt_range(vaddr, PAGE_SIZE);
			memcpy(dst, vaddr, PAGE_SIZE);
			kunmap_atomic(dst);

			set_page_dirty(page);
C
Chris Wilson 已提交
291
			if (obj->mm.madv == I915_MADV_WILLNEED)
292
				mark_page_accessed(page);
293
			put_page(page);
294 295
			vaddr += PAGE_SIZE;
		}
C
Chris Wilson 已提交
296
		obj->mm.dirty = false;
297 298
	}

299 300
	sg_free_table(pages);
	kfree(pages);
301 302

	drm_pci_free(obj->base.dev, obj->phys_handle);
303 304 305 306 307
}

static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{
C
Chris Wilson 已提交
308
	i915_gem_object_unpin_pages(obj);
309 310 311 312 313 314 315 316
}

static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
	.get_pages = i915_gem_object_get_pages_phys,
	.put_pages = i915_gem_object_put_pages_phys,
	.release = i915_gem_object_release_phys,
};

317
int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
318 319 320
{
	struct i915_vma *vma;
	LIST_HEAD(still_in_list);
321 322 323
	int ret;

	lockdep_assert_held(&obj->base.dev->struct_mutex);
324

325 326 327 328
	/* Closed vma are removed from the obj->vma_list - but they may
	 * still have an active binding on the object. To remove those we
	 * must wait for all rendering to complete to the object (as unbinding
	 * must anyway), and retire the requests.
329
	 */
330 331 332 333 334 335
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   I915_WAIT_ALL,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
336 337 338 339 340
	if (ret)
		return ret;

	i915_gem_retire_requests(to_i915(obj->base.dev));

341 342 343 344 345 346 347 348 349 350 351 352 353
	while ((vma = list_first_entry_or_null(&obj->vma_list,
					       struct i915_vma,
					       obj_link))) {
		list_move_tail(&vma->obj_link, &still_in_list);
		ret = i915_vma_unbind(vma);
		if (ret)
			break;
	}
	list_splice(&still_in_list, &obj->vma_list);

	return ret;
}

354 355 356 357 358
static long
i915_gem_object_wait_fence(struct dma_fence *fence,
			   unsigned int flags,
			   long timeout,
			   struct intel_rps_client *rps)
359
{
360
	struct drm_i915_gem_request *rq;
361

362
	BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
363

364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
		return timeout;

	if (!dma_fence_is_i915(fence))
		return dma_fence_wait_timeout(fence,
					      flags & I915_WAIT_INTERRUPTIBLE,
					      timeout);

	rq = to_request(fence);
	if (i915_gem_request_completed(rq))
		goto out;

	/* This client is about to stall waiting for the GPU. In many cases
	 * this is undesirable and limits the throughput of the system, as
	 * many clients cannot continue processing user input/output whilst
	 * blocked. RPS autotuning may take tens of milliseconds to respond
	 * to the GPU load and thus incurs additional latency for the client.
	 * We can circumvent that by promoting the GPU frequency to maximum
	 * before we wait. This makes the GPU throttle up much more quickly
	 * (good for benchmarks and user experience, e.g. window animations),
	 * but at a cost of spending more power processing the workload
	 * (bad for battery). Not all clients even want their results
	 * immediately and for them we should just let the GPU select its own
	 * frequency to maximise efficiency. To prevent a single client from
	 * forcing the clocks too high for the whole system, we only allow
	 * each client to waitboost once in a busy period.
	 */
	if (rps) {
		if (INTEL_GEN(rq->i915) >= 6)
			gen6_rps_boost(rq->i915, rps, rq->emitted_jiffies);
		else
			rps = NULL;
396 397
	}

398 399 400 401 402 403
	timeout = i915_wait_request(rq, flags, timeout);

out:
	if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
		i915_gem_request_retire_upto(rq);

404
	if (rps && rq->global_seqno == intel_engine_last_submit(rq->engine)) {
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433
		/* The GPU is now idle and this client has stalled.
		 * Since no other client has submitted a request in the
		 * meantime, assume that this client is the only one
		 * supplying work to the GPU but is unable to keep that
		 * work supplied because it is waiting. Since the GPU is
		 * then never kept fully busy, RPS autoclocking will
		 * keep the clocks relatively low, causing further delays.
		 * Compensate by giving the synchronous client credit for
		 * a waitboost next time.
		 */
		spin_lock(&rq->i915->rps.client_lock);
		list_del_init(&rps->link);
		spin_unlock(&rq->i915->rps.client_lock);
	}

	return timeout;
}

static long
i915_gem_object_wait_reservation(struct reservation_object *resv,
				 unsigned int flags,
				 long timeout,
				 struct intel_rps_client *rps)
{
	struct dma_fence *excl;

	if (flags & I915_WAIT_ALL) {
		struct dma_fence **shared;
		unsigned int count, i;
434 435
		int ret;

436 437
		ret = reservation_object_get_fences_rcu(resv,
							&excl, &count, &shared);
438 439 440
		if (ret)
			return ret;

441 442 443 444 445 446
		for (i = 0; i < count; i++) {
			timeout = i915_gem_object_wait_fence(shared[i],
							     flags, timeout,
							     rps);
			if (timeout <= 0)
				break;
447

448 449 450 451 452 453 454 455
			dma_fence_put(shared[i]);
		}

		for (; i < count; i++)
			dma_fence_put(shared[i]);
		kfree(shared);
	} else {
		excl = reservation_object_get_excl_rcu(resv);
456 457
	}

458 459 460 461 462 463
	if (excl && timeout > 0)
		timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);

	dma_fence_put(excl);

	return timeout;
464 465
}

466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529
static void __fence_set_priority(struct dma_fence *fence, int prio)
{
	struct drm_i915_gem_request *rq;
	struct intel_engine_cs *engine;

	if (!dma_fence_is_i915(fence))
		return;

	rq = to_request(fence);
	engine = rq->engine;
	if (!engine->schedule)
		return;

	engine->schedule(rq, prio);
}

static void fence_set_priority(struct dma_fence *fence, int prio)
{
	/* Recurse once into a fence-array */
	if (dma_fence_is_array(fence)) {
		struct dma_fence_array *array = to_dma_fence_array(fence);
		int i;

		for (i = 0; i < array->num_fences; i++)
			__fence_set_priority(array->fences[i], prio);
	} else {
		__fence_set_priority(fence, prio);
	}
}

int
i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
			      unsigned int flags,
			      int prio)
{
	struct dma_fence *excl;

	if (flags & I915_WAIT_ALL) {
		struct dma_fence **shared;
		unsigned int count, i;
		int ret;

		ret = reservation_object_get_fences_rcu(obj->resv,
							&excl, &count, &shared);
		if (ret)
			return ret;

		for (i = 0; i < count; i++) {
			fence_set_priority(shared[i], prio);
			dma_fence_put(shared[i]);
		}

		kfree(shared);
	} else {
		excl = reservation_object_get_excl_rcu(obj->resv);
	}

	if (excl) {
		fence_set_priority(excl, prio);
		dma_fence_put(excl);
	}
	return 0;
}

530 531 532 533 534 535
/**
 * Waits for rendering to the object to be completed
 * @obj: i915 gem object
 * @flags: how to wait (under a lock, for all rendering or just for writes etc)
 * @timeout: how long to wait
 * @rps: client (user process) to charge for any waitboosting
536
 */
537 538 539 540 541
int
i915_gem_object_wait(struct drm_i915_gem_object *obj,
		     unsigned int flags,
		     long timeout,
		     struct intel_rps_client *rps)
542
{
543 544 545 546 547 548 549
	might_sleep();
#if IS_ENABLED(CONFIG_LOCKDEP)
	GEM_BUG_ON(debug_locks &&
		   !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
		   !!(flags & I915_WAIT_LOCKED));
#endif
	GEM_BUG_ON(timeout < 0);
550

551 552 553
	timeout = i915_gem_object_wait_reservation(obj->resv,
						   flags, timeout,
						   rps);
554
	return timeout < 0 ? timeout : 0;
555 556 557 558 559 560 561 562 563
}

static struct intel_rps_client *to_rps_client(struct drm_file *file)
{
	struct drm_i915_file_private *fpriv = file->driver_priv;

	return &fpriv->rps;
}

564 565 566 567
int
i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
			    int align)
{
568
	int ret;
569

570 571
	if (align > obj->base.size)
		return -EINVAL;
572

573
	if (obj->ops == &i915_gem_phys_ops)
574 575
		return 0;

C
Chris Wilson 已提交
576
	if (obj->mm.madv != I915_MADV_WILLNEED)
577 578 579 580 581
		return -EFAULT;

	if (obj->base.filp == NULL)
		return -EINVAL;

C
Chris Wilson 已提交
582 583 584 585
	ret = i915_gem_object_unbind(obj);
	if (ret)
		return ret;

586
	__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
587 588
	if (obj->mm.pages)
		return -EBUSY;
589 590 591

	obj->ops = &i915_gem_phys_ops;

C
Chris Wilson 已提交
592
	return i915_gem_object_pin_pages(obj);
593 594 595 596 597
}

static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pwrite *args,
598
		     struct drm_file *file)
599 600
{
	void *vaddr = obj->phys_handle->vaddr + args->offset;
601
	char __user *user_data = u64_to_user_ptr(args->data_ptr);
602 603 604 605

	/* We manually control the domain here and pretend that it
	 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
	 */
606
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
607 608
	if (copy_from_user(vaddr, user_data, args->size))
		return -EFAULT;
609

610
	drm_clflush_virt_range(vaddr, args->size);
611
	i915_gem_chipset_flush(to_i915(obj->base.dev));
612

613
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
614
	return 0;
615 616
}

617
void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
618
{
619
	return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
620 621 622 623
}

void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
624
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
625
	kmem_cache_free(dev_priv->objects, obj);
626 627
}

628 629
static int
i915_gem_create(struct drm_file *file,
630
		struct drm_i915_private *dev_priv,
631 632
		uint64_t size,
		uint32_t *handle_p)
633
{
634
	struct drm_i915_gem_object *obj;
635 636
	int ret;
	u32 handle;
637

638
	size = roundup(size, PAGE_SIZE);
639 640
	if (size == 0)
		return -EINVAL;
641 642

	/* Allocate the new object */
643
	obj = i915_gem_object_create(dev_priv, size);
644 645
	if (IS_ERR(obj))
		return PTR_ERR(obj);
646

647
	ret = drm_gem_handle_create(file, &obj->base, &handle);
648
	/* drop reference from allocate - handle holds it now */
C
Chris Wilson 已提交
649
	i915_gem_object_put(obj);
650 651
	if (ret)
		return ret;
652

653
	*handle_p = handle;
654 655 656
	return 0;
}

657 658 659 660 661 662
int
i915_gem_dumb_create(struct drm_file *file,
		     struct drm_device *dev,
		     struct drm_mode_create_dumb *args)
{
	/* have to work out size/pitch and return them */
663
	args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
664
	args->size = args->pitch * args->height;
665
	return i915_gem_create(file, to_i915(dev),
666
			       args->size, &args->handle);
667 668 669 670
}

/**
 * Creates a new mm object and returns a handle to it.
671 672 673
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
674 675 676 677 678
 */
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file)
{
679
	struct drm_i915_private *dev_priv = to_i915(dev);
680
	struct drm_i915_gem_create *args = data;
681

682
	i915_gem_flush_free_objects(dev_priv);
683

684
	return i915_gem_create(file, dev_priv,
685
			       args->size, &args->handle);
686 687
}

688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
			const char *gpu_vaddr, int gpu_offset,
			int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_to_user(cpu_vaddr + cpu_offset,
				     gpu_vaddr + swizzled_gpu_offset,
				     this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

714
static inline int
715 716
__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
			  const char __user *cpu_vaddr,
717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739
			  int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
				       cpu_vaddr + cpu_offset,
				       this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

740 741 742 743 744 745
/*
 * Pins the specified object's pages and synchronizes the object with
 * GPU accesses. Sets needs_clflush to non-zero if the caller should
 * flush the object from the CPU cache.
 */
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
746
				    unsigned int *needs_clflush)
747 748 749
{
	int ret;

750
	lockdep_assert_held(&obj->base.dev->struct_mutex);
751

752
	*needs_clflush = 0;
753 754
	if (!i915_gem_object_has_struct_page(obj))
		return -ENODEV;
755

756 757 758 759 760
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
761 762 763
	if (ret)
		return ret;

C
Chris Wilson 已提交
764
	ret = i915_gem_object_pin_pages(obj);
765 766 767
	if (ret)
		return ret;

768 769
	i915_gem_object_flush_gtt_write_domain(obj);

770 771 772 773 774 775
	/* If we're not in the cpu read domain, set ourself into the gtt
	 * read domain and manually flush cachelines (if required). This
	 * optimizes for the case when the gpu will dirty the data
	 * anyway again before the next pread happens.
	 */
	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
776 777
		*needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
							obj->cache_level);
778 779 780

	if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
		ret = i915_gem_object_set_to_cpu_domain(obj, false);
781 782 783
		if (ret)
			goto err_unpin;

784
		*needs_clflush = 0;
785 786
	}

787
	/* return with the pages pinned */
788
	return 0;
789 790 791 792

err_unpin:
	i915_gem_object_unpin_pages(obj);
	return ret;
793 794 795 796 797 798 799
}

int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
				     unsigned int *needs_clflush)
{
	int ret;

800 801
	lockdep_assert_held(&obj->base.dev->struct_mutex);

802 803 804 805
	*needs_clflush = 0;
	if (!i915_gem_object_has_struct_page(obj))
		return -ENODEV;

806 807 808 809 810 811
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   I915_WAIT_ALL,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
812 813 814
	if (ret)
		return ret;

C
Chris Wilson 已提交
815
	ret = i915_gem_object_pin_pages(obj);
816 817 818
	if (ret)
		return ret;

819 820
	i915_gem_object_flush_gtt_write_domain(obj);

821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837
	/* If we're not in the cpu write domain, set ourself into the
	 * gtt write domain and manually flush cachelines (as required).
	 * This optimizes for the case when the gpu will use the data
	 * right away and we therefore have to clflush anyway.
	 */
	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
		*needs_clflush |= cpu_write_needs_clflush(obj) << 1;

	/* Same trick applies to invalidate partially written cachelines read
	 * before writing.
	 */
	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
		*needs_clflush |= !cpu_cache_is_coherent(obj->base.dev,
							 obj->cache_level);

	if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
		ret = i915_gem_object_set_to_cpu_domain(obj, true);
838 839 840
		if (ret)
			goto err_unpin;

841 842 843 844 845 846 847
		*needs_clflush = 0;
	}

	if ((*needs_clflush & CLFLUSH_AFTER) == 0)
		obj->cache_dirty = true;

	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
C
Chris Wilson 已提交
848
	obj->mm.dirty = true;
849
	/* return with the pages pinned */
850
	return 0;
851 852 853 854

err_unpin:
	i915_gem_object_unpin_pages(obj);
	return ret;
855 856
}

857 858 859 860
static void
shmem_clflush_swizzled_range(char *addr, unsigned long length,
			     bool swizzled)
{
861
	if (unlikely(swizzled)) {
862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878
		unsigned long start = (unsigned long) addr;
		unsigned long end = (unsigned long) addr + length;

		/* For swizzling simply ensure that we always flush both
		 * channels. Lame, but simple and it works. Swizzled
		 * pwrite/pread is far from a hotpath - current userspace
		 * doesn't use it at all. */
		start = round_down(start, 128);
		end = round_up(end, 128);

		drm_clflush_virt_range((void *)start, end - start);
	} else {
		drm_clflush_virt_range(addr, length);
	}

}

879 880 881
/* Only difference to the fast-path function is that this can handle bit17
 * and uses non-atomic copy and kmap functions. */
static int
882
shmem_pread_slow(struct page *page, int offset, int length,
883 884 885 886 887 888 889 890
		 char __user *user_data,
		 bool page_do_bit17_swizzling, bool needs_clflush)
{
	char *vaddr;
	int ret;

	vaddr = kmap(page);
	if (needs_clflush)
891
		shmem_clflush_swizzled_range(vaddr + offset, length,
892
					     page_do_bit17_swizzling);
893 894

	if (page_do_bit17_swizzling)
895
		ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
896
	else
897
		ret = __copy_to_user(user_data, vaddr + offset, length);
898 899
	kunmap(page);

900
	return ret ? - EFAULT : 0;
901 902
}

903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978
static int
shmem_pread(struct page *page, int offset, int length, char __user *user_data,
	    bool page_do_bit17_swizzling, bool needs_clflush)
{
	int ret;

	ret = -ENODEV;
	if (!page_do_bit17_swizzling) {
		char *vaddr = kmap_atomic(page);

		if (needs_clflush)
			drm_clflush_virt_range(vaddr + offset, length);
		ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
		kunmap_atomic(vaddr);
	}
	if (ret == 0)
		return 0;

	return shmem_pread_slow(page, offset, length, user_data,
				page_do_bit17_swizzling, needs_clflush);
}

static int
i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pread *args)
{
	char __user *user_data;
	u64 remain;
	unsigned int obj_do_bit17_swizzling;
	unsigned int needs_clflush;
	unsigned int idx, offset;
	int ret;

	obj_do_bit17_swizzling = 0;
	if (i915_gem_object_needs_bit17_swizzle(obj))
		obj_do_bit17_swizzling = BIT(17);

	ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
	if (ret)
		return ret;

	ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
	mutex_unlock(&obj->base.dev->struct_mutex);
	if (ret)
		return ret;

	remain = args->size;
	user_data = u64_to_user_ptr(args->data_ptr);
	offset = offset_in_page(args->offset);
	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
		struct page *page = i915_gem_object_get_page(obj, idx);
		int length;

		length = remain;
		if (offset + length > PAGE_SIZE)
			length = PAGE_SIZE - offset;

		ret = shmem_pread(page, offset, length, user_data,
				  page_to_phys(page) & obj_do_bit17_swizzling,
				  needs_clflush);
		if (ret)
			break;

		remain -= length;
		user_data += length;
		offset = 0;
	}

	i915_gem_obj_finish_shmem_access(obj);
	return ret;
}

static inline bool
gtt_user_read(struct io_mapping *mapping,
	      loff_t base, int offset,
	      char __user *user_data, int length)
979 980
{
	void *vaddr;
981
	unsigned long unwritten;
982 983

	/* We can use the cpu mem copy function because this is X86. */
984 985 986 987 988 989 990 991 992
	vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
	unwritten = __copy_to_user_inatomic(user_data, vaddr + offset, length);
	io_mapping_unmap_atomic(vaddr);
	if (unwritten) {
		vaddr = (void __force *)
			io_mapping_map_wc(mapping, base, PAGE_SIZE);
		unwritten = copy_to_user(user_data, vaddr + offset, length);
		io_mapping_unmap(vaddr);
	}
993 994 995 996
	return unwritten;
}

static int
997 998
i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
		   const struct drm_i915_gem_pread *args)
999
{
1000 1001
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	struct i915_ggtt *ggtt = &i915->ggtt;
1002
	struct drm_mm_node node;
1003 1004 1005
	struct i915_vma *vma;
	void __user *user_data;
	u64 remain, offset;
1006 1007
	int ret;

1008 1009 1010 1011 1012 1013 1014
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
	if (ret)
		return ret;

	intel_runtime_pm_get(i915);
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
				       PIN_MAPPABLE | PIN_NONBLOCK);
1015 1016 1017
	if (!IS_ERR(vma)) {
		node.start = i915_ggtt_offset(vma);
		node.allocated = false;
1018
		ret = i915_vma_put_fence(vma);
1019 1020 1021 1022 1023
		if (ret) {
			i915_vma_unpin(vma);
			vma = ERR_PTR(ret);
		}
	}
C
Chris Wilson 已提交
1024
	if (IS_ERR(vma)) {
1025
		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1026
		if (ret)
1027 1028
			goto out_unlock;
		GEM_BUG_ON(!node.allocated);
1029 1030 1031 1032 1033 1034
	}

	ret = i915_gem_object_set_to_gtt_domain(obj, false);
	if (ret)
		goto out_unpin;

1035
	mutex_unlock(&i915->drm.struct_mutex);
1036

1037 1038 1039
	user_data = u64_to_user_ptr(args->data_ptr);
	remain = args->size;
	offset = args->offset;
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055

	while (remain > 0) {
		/* Operation in this page
		 *
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
		 */
		u32 page_base = node.start;
		unsigned page_offset = offset_in_page(offset);
		unsigned page_length = PAGE_SIZE - page_offset;
		page_length = remain < page_length ? remain : page_length;
		if (node.allocated) {
			wmb();
			ggtt->base.insert_page(&ggtt->base,
					       i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1056
					       node.start, I915_CACHE_NONE, 0);
1057 1058 1059 1060
			wmb();
		} else {
			page_base += offset & PAGE_MASK;
		}
1061 1062 1063

		if (gtt_user_read(&ggtt->mappable, page_base, page_offset,
				  user_data, page_length)) {
1064 1065 1066 1067 1068 1069 1070 1071 1072
			ret = -EFAULT;
			break;
		}

		remain -= page_length;
		user_data += page_length;
		offset += page_length;
	}

1073
	mutex_lock(&i915->drm.struct_mutex);
1074 1075 1076 1077
out_unpin:
	if (node.allocated) {
		wmb();
		ggtt->base.clear_range(&ggtt->base,
1078
				       node.start, node.size);
1079 1080
		remove_mappable_node(&node);
	} else {
C
Chris Wilson 已提交
1081
		i915_vma_unpin(vma);
1082
	}
1083 1084 1085
out_unlock:
	intel_runtime_pm_put(i915);
	mutex_unlock(&i915->drm.struct_mutex);
1086

1087 1088 1089
	return ret;
}

1090 1091
/**
 * Reads data from the object referenced by handle.
1092 1093 1094
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
1095 1096 1097 1098 1099
 *
 * On error, the contents of *data are undefined.
 */
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1100
		     struct drm_file *file)
1101 1102
{
	struct drm_i915_gem_pread *args = data;
1103
	struct drm_i915_gem_object *obj;
1104
	int ret;
1105

1106 1107 1108 1109
	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_WRITE,
1110
		       u64_to_user_ptr(args->data_ptr),
1111 1112 1113
		       args->size))
		return -EFAULT;

1114
	obj = i915_gem_object_lookup(file, args->handle);
1115 1116
	if (!obj)
		return -ENOENT;
1117

1118
	/* Bounds check source.  */
1119
	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
C
Chris Wilson 已提交
1120
		ret = -EINVAL;
1121
		goto out;
C
Chris Wilson 已提交
1122 1123
	}

C
Chris Wilson 已提交
1124 1125
	trace_i915_gem_object_pread(obj, args->offset, args->size);

1126 1127 1128 1129
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE,
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
1130
	if (ret)
1131
		goto out;
1132

1133
	ret = i915_gem_object_pin_pages(obj);
1134
	if (ret)
1135
		goto out;
1136

1137
	ret = i915_gem_shmem_pread(obj, args);
1138
	if (ret == -EFAULT || ret == -ENODEV)
1139
		ret = i915_gem_gtt_pread(obj, args);
1140

1141 1142
	i915_gem_object_unpin_pages(obj);
out:
C
Chris Wilson 已提交
1143
	i915_gem_object_put(obj);
1144
	return ret;
1145 1146
}

1147 1148
/* This is the fast write path which cannot handle
 * page faults in the source data
1149
 */
1150

1151 1152 1153 1154
static inline bool
ggtt_write(struct io_mapping *mapping,
	   loff_t base, int offset,
	   char __user *user_data, int length)
1155
{
1156
	void *vaddr;
1157
	unsigned long unwritten;
1158

1159
	/* We can use the cpu mem copy function because this is X86. */
1160 1161
	vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
	unwritten = __copy_from_user_inatomic_nocache(vaddr + offset,
1162
						      user_data, length);
1163 1164 1165 1166 1167 1168 1169
	io_mapping_unmap_atomic(vaddr);
	if (unwritten) {
		vaddr = (void __force *)
			io_mapping_map_wc(mapping, base, PAGE_SIZE);
		unwritten = copy_from_user(vaddr + offset, user_data, length);
		io_mapping_unmap(vaddr);
	}
1170 1171 1172 1173

	return unwritten;
}

1174 1175 1176
/**
 * This is the fast pwrite path, where we copy the data directly from the
 * user into the GTT, uncached.
1177
 * @obj: i915 GEM object
1178
 * @args: pwrite arguments structure
1179
 */
1180
static int
1181 1182
i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
			 const struct drm_i915_gem_pwrite *args)
1183
{
1184
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
1185 1186
	struct i915_ggtt *ggtt = &i915->ggtt;
	struct drm_mm_node node;
1187 1188 1189
	struct i915_vma *vma;
	u64 remain, offset;
	void __user *user_data;
1190
	int ret;
1191

1192 1193 1194
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
	if (ret)
		return ret;
D
Daniel Vetter 已提交
1195

1196
	intel_runtime_pm_get(i915);
C
Chris Wilson 已提交
1197
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1198
				       PIN_MAPPABLE | PIN_NONBLOCK);
1199 1200 1201
	if (!IS_ERR(vma)) {
		node.start = i915_ggtt_offset(vma);
		node.allocated = false;
1202
		ret = i915_vma_put_fence(vma);
1203 1204 1205 1206 1207
		if (ret) {
			i915_vma_unpin(vma);
			vma = ERR_PTR(ret);
		}
	}
C
Chris Wilson 已提交
1208
	if (IS_ERR(vma)) {
1209
		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1210
		if (ret)
1211 1212
			goto out_unlock;
		GEM_BUG_ON(!node.allocated);
1213
	}
D
Daniel Vetter 已提交
1214 1215 1216 1217 1218

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		goto out_unpin;

1219 1220
	mutex_unlock(&i915->drm.struct_mutex);

1221
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1222

1223 1224 1225 1226
	user_data = u64_to_user_ptr(args->data_ptr);
	offset = args->offset;
	remain = args->size;
	while (remain) {
1227 1228
		/* Operation in this page
		 *
1229 1230 1231
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
1232
		 */
1233
		u32 page_base = node.start;
1234 1235
		unsigned int page_offset = offset_in_page(offset);
		unsigned int page_length = PAGE_SIZE - page_offset;
1236 1237 1238 1239 1240 1241 1242 1243 1244 1245
		page_length = remain < page_length ? remain : page_length;
		if (node.allocated) {
			wmb(); /* flush the write before we modify the GGTT */
			ggtt->base.insert_page(&ggtt->base,
					       i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
					       node.start, I915_CACHE_NONE, 0);
			wmb(); /* flush modifications to the GGTT (insert_page) */
		} else {
			page_base += offset & PAGE_MASK;
		}
1246
		/* If we get a fault while copying data, then (presumably) our
1247 1248
		 * source page isn't available.  Return the error and we'll
		 * retry in the slow path.
1249 1250
		 * If the object is non-shmem backed, we retry again with the
		 * path that handles page fault.
1251
		 */
1252 1253 1254 1255
		if (ggtt_write(&ggtt->mappable, page_base, page_offset,
			       user_data, page_length)) {
			ret = -EFAULT;
			break;
D
Daniel Vetter 已提交
1256
		}
1257

1258 1259 1260
		remain -= page_length;
		user_data += page_length;
		offset += page_length;
1261
	}
1262
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1263 1264

	mutex_lock(&i915->drm.struct_mutex);
D
Daniel Vetter 已提交
1265
out_unpin:
1266 1267 1268
	if (node.allocated) {
		wmb();
		ggtt->base.clear_range(&ggtt->base,
1269
				       node.start, node.size);
1270 1271
		remove_mappable_node(&node);
	} else {
C
Chris Wilson 已提交
1272
		i915_vma_unpin(vma);
1273
	}
1274
out_unlock:
1275
	intel_runtime_pm_put(i915);
1276
	mutex_unlock(&i915->drm.struct_mutex);
1277
	return ret;
1278 1279
}

1280
static int
1281
shmem_pwrite_slow(struct page *page, int offset, int length,
1282 1283 1284 1285
		  char __user *user_data,
		  bool page_do_bit17_swizzling,
		  bool needs_clflush_before,
		  bool needs_clflush_after)
1286
{
1287 1288
	char *vaddr;
	int ret;
1289

1290
	vaddr = kmap(page);
1291
	if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1292
		shmem_clflush_swizzled_range(vaddr + offset, length,
1293
					     page_do_bit17_swizzling);
1294
	if (page_do_bit17_swizzling)
1295 1296
		ret = __copy_from_user_swizzled(vaddr, offset, user_data,
						length);
1297
	else
1298
		ret = __copy_from_user(vaddr + offset, user_data, length);
1299
	if (needs_clflush_after)
1300
		shmem_clflush_swizzled_range(vaddr + offset, length,
1301
					     page_do_bit17_swizzling);
1302
	kunmap(page);
1303

1304
	return ret ? -EFAULT : 0;
1305 1306
}

1307 1308 1309 1310 1311
/* Per-page copy function for the shmem pwrite fastpath.
 * Flushes invalid cachelines before writing to the target if
 * needs_clflush_before is set and flushes out any written cachelines after
 * writing if needs_clflush is set.
 */
1312
static int
1313 1314 1315 1316
shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
	     bool page_do_bit17_swizzling,
	     bool needs_clflush_before,
	     bool needs_clflush_after)
1317
{
1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349
	int ret;

	ret = -ENODEV;
	if (!page_do_bit17_swizzling) {
		char *vaddr = kmap_atomic(page);

		if (needs_clflush_before)
			drm_clflush_virt_range(vaddr + offset, len);
		ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
		if (needs_clflush_after)
			drm_clflush_virt_range(vaddr + offset, len);

		kunmap_atomic(vaddr);
	}
	if (ret == 0)
		return ret;

	return shmem_pwrite_slow(page, offset, len, user_data,
				 page_do_bit17_swizzling,
				 needs_clflush_before,
				 needs_clflush_after);
}

static int
i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
		      const struct drm_i915_gem_pwrite *args)
{
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	void __user *user_data;
	u64 remain;
	unsigned int obj_do_bit17_swizzling;
	unsigned int partial_cacheline_write;
1350
	unsigned int needs_clflush;
1351 1352
	unsigned int offset, idx;
	int ret;
1353

1354
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1355 1356 1357
	if (ret)
		return ret;

1358 1359 1360 1361
	ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
	mutex_unlock(&i915->drm.struct_mutex);
	if (ret)
		return ret;
1362

1363 1364 1365
	obj_do_bit17_swizzling = 0;
	if (i915_gem_object_needs_bit17_swizzle(obj))
		obj_do_bit17_swizzling = BIT(17);
1366

1367 1368 1369 1370 1371 1372 1373
	/* If we don't overwrite a cacheline completely we need to be
	 * careful to have up-to-date data by first clflushing. Don't
	 * overcomplicate things and flush the entire patch.
	 */
	partial_cacheline_write = 0;
	if (needs_clflush & CLFLUSH_BEFORE)
		partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
1374

1375 1376 1377 1378 1379 1380
	user_data = u64_to_user_ptr(args->data_ptr);
	remain = args->size;
	offset = offset_in_page(args->offset);
	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
		struct page *page = i915_gem_object_get_page(obj, idx);
		int length;
1381

1382 1383 1384
		length = remain;
		if (offset + length > PAGE_SIZE)
			length = PAGE_SIZE - offset;
1385

1386 1387 1388 1389
		ret = shmem_pwrite(page, offset, length, user_data,
				   page_to_phys(page) & obj_do_bit17_swizzling,
				   (offset | length) & partial_cacheline_write,
				   needs_clflush & CLFLUSH_AFTER);
1390
		if (ret)
1391
			break;
1392

1393 1394 1395
		remain -= length;
		user_data += length;
		offset = 0;
1396
	}
1397

1398
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1399
	i915_gem_obj_finish_shmem_access(obj);
1400
	return ret;
1401 1402 1403 1404
}

/**
 * Writes data to the object referenced by handle.
1405 1406 1407
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1408 1409 1410 1411 1412
 *
 * On error, the contents of the buffer that were to be modified are undefined.
 */
int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1413
		      struct drm_file *file)
1414 1415
{
	struct drm_i915_gem_pwrite *args = data;
1416
	struct drm_i915_gem_object *obj;
1417 1418 1419 1420 1421 1422
	int ret;

	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_READ,
1423
		       u64_to_user_ptr(args->data_ptr),
1424 1425 1426
		       args->size))
		return -EFAULT;

1427
	obj = i915_gem_object_lookup(file, args->handle);
1428 1429
	if (!obj)
		return -ENOENT;
1430

1431
	/* Bounds check destination. */
1432
	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
C
Chris Wilson 已提交
1433
		ret = -EINVAL;
1434
		goto err;
C
Chris Wilson 已提交
1435 1436
	}

C
Chris Wilson 已提交
1437 1438
	trace_i915_gem_object_pwrite(obj, args->offset, args->size);

1439 1440 1441 1442 1443
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_ALL,
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
1444 1445 1446
	if (ret)
		goto err;

1447
	ret = i915_gem_object_pin_pages(obj);
1448
	if (ret)
1449
		goto err;
1450

D
Daniel Vetter 已提交
1451
	ret = -EFAULT;
1452 1453 1454 1455 1456 1457
	/* We can only do the GTT pwrite on untiled buffers, as otherwise
	 * it would end up going through the fenced access, and we'll get
	 * different detiling behavior between reading and writing.
	 * pread/pwrite currently are reading and writing from the CPU
	 * perspective, requiring manual detiling by the client.
	 */
1458
	if (!i915_gem_object_has_struct_page(obj) ||
1459
	    cpu_write_needs_clflush(obj))
D
Daniel Vetter 已提交
1460 1461
		/* Note that the gtt paths might fail with non-page-backed user
		 * pointers (e.g. gtt mappings when moving data between
1462 1463
		 * textures). Fallback to the shmem path in that case.
		 */
1464
		ret = i915_gem_gtt_pwrite_fast(obj, args);
1465

1466
	if (ret == -EFAULT || ret == -ENOSPC) {
1467 1468
		if (obj->phys_handle)
			ret = i915_gem_phys_pwrite(obj, args, file);
1469
		else
1470
			ret = i915_gem_shmem_pwrite(obj, args);
1471
	}
1472

1473
	i915_gem_object_unpin_pages(obj);
1474
err:
C
Chris Wilson 已提交
1475
	i915_gem_object_put(obj);
1476
	return ret;
1477 1478
}

1479
static inline enum fb_op_origin
1480 1481
write_origin(struct drm_i915_gem_object *obj, unsigned domain)
{
1482 1483
	return (domain == I915_GEM_DOMAIN_GTT ?
		obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
1484 1485
}

1486 1487 1488 1489 1490 1491 1492 1493
static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *i915;
	struct list_head *list;
	struct i915_vma *vma;

	list_for_each_entry(vma, &obj->vma_list, obj_link) {
		if (!i915_vma_is_ggtt(vma))
1494
			break;
1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506

		if (i915_vma_is_active(vma))
			continue;

		if (!drm_mm_node_allocated(&vma->node))
			continue;

		list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
	}

	i915 = to_i915(obj->base.dev);
	list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
1507
	list_move_tail(&obj->global_link, list);
1508 1509
}

1510
/**
1511 1512
 * Called when user space prepares to use an object with the CPU, either
 * through the mmap ioctl's mapping or a GTT mapping.
1513 1514 1515
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1516 1517 1518
 */
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1519
			  struct drm_file *file)
1520 1521
{
	struct drm_i915_gem_set_domain *args = data;
1522
	struct drm_i915_gem_object *obj;
1523 1524
	uint32_t read_domains = args->read_domains;
	uint32_t write_domain = args->write_domain;
1525
	int err;
1526

1527
	/* Only handle setting domains to types used by the CPU. */
1528
	if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
1529 1530 1531 1532 1533 1534 1535 1536
		return -EINVAL;

	/* Having something in the write domain implies it's in the read
	 * domain, and only that read domain.  Enforce that in the request.
	 */
	if (write_domain != 0 && read_domains != write_domain)
		return -EINVAL;

1537
	obj = i915_gem_object_lookup(file, args->handle);
1538 1539
	if (!obj)
		return -ENOENT;
1540

1541 1542 1543 1544
	/* Try to flush the object off the GPU without holding the lock.
	 * We will repeat the flush holding the lock in the normal manner
	 * to catch cases where we are gazumped.
	 */
1545
	err = i915_gem_object_wait(obj,
1546 1547 1548 1549
				   I915_WAIT_INTERRUPTIBLE |
				   (write_domain ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
1550
	if (err)
C
Chris Wilson 已提交
1551
		goto out;
1552

1553 1554 1555 1556 1557 1558 1559 1560 1561 1562
	/* Flush and acquire obj->pages so that we are coherent through
	 * direct access in memory with previous cached writes through
	 * shmemfs and that our cache domain tracking remains valid.
	 * For example, if the obj->filp was moved to swap without us
	 * being notified and releasing the pages, we would mistakenly
	 * continue to assume that the obj remained out of the CPU cached
	 * domain.
	 */
	err = i915_gem_object_pin_pages(obj);
	if (err)
C
Chris Wilson 已提交
1563
		goto out;
1564 1565 1566

	err = i915_mutex_lock_interruptible(dev);
	if (err)
C
Chris Wilson 已提交
1567
		goto out_unpin;
1568

1569
	if (read_domains & I915_GEM_DOMAIN_GTT)
1570
		err = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1571
	else
1572
		err = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1573

1574 1575
	/* And bump the LRU for this access */
	i915_gem_object_bump_inactive_ggtt(obj);
1576

1577
	mutex_unlock(&dev->struct_mutex);
1578

1579 1580 1581
	if (write_domain != 0)
		intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));

C
Chris Wilson 已提交
1582
out_unpin:
1583
	i915_gem_object_unpin_pages(obj);
C
Chris Wilson 已提交
1584 1585
out:
	i915_gem_object_put(obj);
1586
	return err;
1587 1588 1589 1590
}

/**
 * Called when user space has done writes to this buffer
1591 1592 1593
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1594 1595 1596
 */
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1597
			 struct drm_file *file)
1598 1599
{
	struct drm_i915_gem_sw_finish *args = data;
1600
	struct drm_i915_gem_object *obj;
1601
	int err = 0;
1602

1603
	obj = i915_gem_object_lookup(file, args->handle);
1604 1605
	if (!obj)
		return -ENOENT;
1606 1607

	/* Pinned buffers may be scanout, so flush the cache */
1608 1609 1610 1611 1612 1613 1614
	if (READ_ONCE(obj->pin_display)) {
		err = i915_mutex_lock_interruptible(dev);
		if (!err) {
			i915_gem_object_flush_cpu_write_domain(obj);
			mutex_unlock(&dev->struct_mutex);
		}
	}
1615

C
Chris Wilson 已提交
1616
	i915_gem_object_put(obj);
1617
	return err;
1618 1619 1620
}

/**
1621 1622 1623 1624 1625
 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
 *			 it is mapped to.
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1626 1627 1628
 *
 * While the mapping holds a reference on the contents of the object, it doesn't
 * imply a ref on the object itself.
1629 1630 1631 1632 1633 1634 1635 1636 1637 1638
 *
 * IMPORTANT:
 *
 * DRM driver writers who look a this function as an example for how to do GEM
 * mmap support, please don't implement mmap support like here. The modern way
 * to implement DRM mmap support is with an mmap offset ioctl (like
 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
 * That way debug tooling like valgrind will understand what's going on, hiding
 * the mmap call in a driver private ioctl will break that. The i915 driver only
 * does cpu mmaps this way because we didn't know better.
1639 1640 1641
 */
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1642
		    struct drm_file *file)
1643 1644
{
	struct drm_i915_gem_mmap *args = data;
1645
	struct drm_i915_gem_object *obj;
1646 1647
	unsigned long addr;

1648 1649 1650
	if (args->flags & ~(I915_MMAP_WC))
		return -EINVAL;

1651
	if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1652 1653
		return -ENODEV;

1654 1655
	obj = i915_gem_object_lookup(file, args->handle);
	if (!obj)
1656
		return -ENOENT;
1657

1658 1659 1660
	/* prime objects have no backing filp to GEM mmap
	 * pages from.
	 */
1661
	if (!obj->base.filp) {
C
Chris Wilson 已提交
1662
		i915_gem_object_put(obj);
1663 1664 1665
		return -EINVAL;
	}

1666
	addr = vm_mmap(obj->base.filp, 0, args->size,
1667 1668
		       PROT_READ | PROT_WRITE, MAP_SHARED,
		       args->offset);
1669 1670 1671 1672
	if (args->flags & I915_MMAP_WC) {
		struct mm_struct *mm = current->mm;
		struct vm_area_struct *vma;

1673
		if (down_write_killable(&mm->mmap_sem)) {
C
Chris Wilson 已提交
1674
			i915_gem_object_put(obj);
1675 1676
			return -EINTR;
		}
1677 1678 1679 1680 1681 1682 1683
		vma = find_vma(mm, addr);
		if (vma)
			vma->vm_page_prot =
				pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
		else
			addr = -ENOMEM;
		up_write(&mm->mmap_sem);
1684 1685

		/* This may race, but that's ok, it only gets set */
1686
		WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
1687
	}
C
Chris Wilson 已提交
1688
	i915_gem_object_put(obj);
1689 1690 1691 1692 1693 1694 1695 1696
	if (IS_ERR((void *)addr))
		return addr;

	args->addr_ptr = (uint64_t) addr;

	return 0;
}

1697 1698
static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
{
1699
	return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
1700 1701
}

1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751
/**
 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
 *
 * A history of the GTT mmap interface:
 *
 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
 *     aligned and suitable for fencing, and still fit into the available
 *     mappable space left by the pinned display objects. A classic problem
 *     we called the page-fault-of-doom where we would ping-pong between
 *     two objects that could not fit inside the GTT and so the memcpy
 *     would page one object in at the expense of the other between every
 *     single byte.
 *
 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
 *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
 *     object is too large for the available space (or simply too large
 *     for the mappable aperture!), a view is created instead and faulted
 *     into userspace. (This view is aligned and sized appropriately for
 *     fenced access.)
 *
 * Restrictions:
 *
 *  * snoopable objects cannot be accessed via the GTT. It can cause machine
 *    hangs on some architectures, corruption on others. An attempt to service
 *    a GTT page fault from a snoopable object will generate a SIGBUS.
 *
 *  * the object must be able to fit into RAM (physical memory, though no
 *    limited to the mappable aperture).
 *
 *
 * Caveats:
 *
 *  * a new GTT page fault will synchronize rendering from the GPU and flush
 *    all data to system memory. Subsequent access will not be synchronized.
 *
 *  * all mappings are revoked on runtime device suspend.
 *
 *  * there are only 8, 16 or 32 fence registers to share between all users
 *    (older machines require fence register for display and blitter access
 *    as well). Contention of the fence registers will cause the previous users
 *    to be unmapped and any new access will generate new page faults.
 *
 *  * running out of memory while servicing a fault may generate a SIGBUS,
 *    rather than the expected SIGSEGV.
 */
int i915_gem_mmap_gtt_version(void)
{
	return 1;
}

1752 1753
/**
 * i915_gem_fault - fault a page into the GTT
C
Chris Wilson 已提交
1754
 * @area: CPU VMA in question
1755
 * @vmf: fault info
1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766
 *
 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
 * from userspace.  The fault handler takes care of binding the object to
 * the GTT (if needed), allocating and programming a fence register (again,
 * only if needed based on whether the old reg is still valid or the object
 * is tiled) and inserting a new PTE into the faulting process.
 *
 * Note that the faulting process may involve evicting existing objects
 * from the GTT and/or fence registers to make room.  So performance may
 * suffer if the GTT working set is large or there are few fence registers
 * left.
1767 1768 1769
 *
 * The current feature set supported by i915_gem_fault() and thus GTT mmaps
 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
1770
 */
C
Chris Wilson 已提交
1771
int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
1772
{
1773
#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
C
Chris Wilson 已提交
1774
	struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
1775
	struct drm_device *dev = obj->base.dev;
1776 1777
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
1778
	bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
C
Chris Wilson 已提交
1779
	struct i915_vma *vma;
1780
	pgoff_t page_offset;
1781
	unsigned int flags;
1782
	int ret;
1783

1784
	/* We don't use vmf->pgoff since that has the fake offset */
1785
	page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
1786

C
Chris Wilson 已提交
1787 1788
	trace_i915_gem_object_fault(obj, page_offset, true, write);

1789
	/* Try to flush the object off the GPU first without holding the lock.
1790
	 * Upon acquiring the lock, we will perform our sanity checks and then
1791 1792 1793
	 * repeat the flush holding the lock in the normal manner to catch cases
	 * where we are gazumped.
	 */
1794 1795 1796 1797
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
1798
	if (ret)
1799 1800
		goto err;

1801 1802 1803 1804
	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		goto err;

1805 1806 1807 1808 1809
	intel_runtime_pm_get(dev_priv);

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto err_rpm;
1810

1811
	/* Access to snoopable pages through the GTT is incoherent. */
1812
	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
1813
		ret = -EFAULT;
1814
		goto err_unlock;
1815 1816
	}

1817 1818 1819 1820 1821 1822 1823 1824
	/* If the object is smaller than a couple of partial vma, it is
	 * not worth only creating a single partial vma - we may as well
	 * clear enough space for the full object.
	 */
	flags = PIN_MAPPABLE;
	if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
		flags |= PIN_NONBLOCK | PIN_NONFAULT;

1825
	/* Now pin it into the GTT as needed */
1826
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
1827 1828
	if (IS_ERR(vma)) {
		struct i915_ggtt_view view;
1829 1830
		unsigned int chunk_size;

1831
		/* Use a partial view if it is bigger than available space */
1832 1833
		chunk_size = MIN_CHUNK_PAGES;
		if (i915_gem_object_is_tiled(obj))
1834
			chunk_size = roundup(chunk_size, tile_row_pages(obj));
1835

1836 1837 1838 1839
		memset(&view, 0, sizeof(view));
		view.type = I915_GGTT_VIEW_PARTIAL;
		view.params.partial.offset = rounddown(page_offset, chunk_size);
		view.params.partial.size =
1840
			min_t(unsigned int, chunk_size,
1841
			      vma_pages(area) - view.params.partial.offset);
1842

1843 1844 1845 1846 1847 1848
		/* If the partial covers the entire object, just create a
		 * normal VMA.
		 */
		if (chunk_size >= obj->base.size >> PAGE_SHIFT)
			view.type = I915_GGTT_VIEW_NORMAL;

1849 1850 1851 1852 1853
		/* Userspace is now writing through an untracked VMA, abandon
		 * all hope that the hardware is able to track future writes.
		 */
		obj->frontbuffer_ggtt_origin = ORIGIN_CPU;

1854 1855
		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
	}
C
Chris Wilson 已提交
1856 1857
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
1858
		goto err_unlock;
C
Chris Wilson 已提交
1859
	}
1860

1861 1862
	ret = i915_gem_object_set_to_gtt_domain(obj, write);
	if (ret)
1863
		goto err_unpin;
1864

1865
	ret = i915_vma_get_fence(vma);
1866
	if (ret)
1867
		goto err_unpin;
1868

1869
	/* Mark as being mmapped into userspace for later revocation */
1870
	assert_rpm_wakelock_held(dev_priv);
1871 1872 1873
	if (list_empty(&obj->userfault_link))
		list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);

1874
	/* Finally, remap it using the new GTT offset */
1875 1876 1877 1878 1879
	ret = remap_io_mapping(area,
			       area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
			       (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
			       min_t(u64, vma->size, area->vm_end - area->vm_start),
			       &ggtt->mappable);
1880

1881
err_unpin:
C
Chris Wilson 已提交
1882
	__i915_vma_unpin(vma);
1883
err_unlock:
1884
	mutex_unlock(&dev->struct_mutex);
1885 1886
err_rpm:
	intel_runtime_pm_put(dev_priv);
1887
	i915_gem_object_unpin_pages(obj);
1888
err:
1889
	switch (ret) {
1890
	case -EIO:
1891 1892 1893 1894 1895 1896 1897
		/*
		 * We eat errors when the gpu is terminally wedged to avoid
		 * userspace unduly crashing (gl has no provisions for mmaps to
		 * fail). But any other -EIO isn't ours (e.g. swap in failure)
		 * and so needs to be reported.
		 */
		if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1898 1899 1900
			ret = VM_FAULT_SIGBUS;
			break;
		}
1901
	case -EAGAIN:
D
Daniel Vetter 已提交
1902 1903 1904 1905
		/*
		 * EAGAIN means the gpu is hung and we'll wait for the error
		 * handler to reset everything when re-faulting in
		 * i915_mutex_lock_interruptible.
1906
		 */
1907 1908
	case 0:
	case -ERESTARTSYS:
1909
	case -EINTR:
1910 1911 1912 1913 1914
	case -EBUSY:
		/*
		 * EBUSY is ok: this just means that another thread
		 * already did the job.
		 */
1915 1916
		ret = VM_FAULT_NOPAGE;
		break;
1917
	case -ENOMEM:
1918 1919
		ret = VM_FAULT_OOM;
		break;
1920
	case -ENOSPC:
1921
	case -EFAULT:
1922 1923
		ret = VM_FAULT_SIGBUS;
		break;
1924
	default:
1925
		WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1926 1927
		ret = VM_FAULT_SIGBUS;
		break;
1928
	}
1929
	return ret;
1930 1931
}

1932 1933 1934 1935
/**
 * i915_gem_release_mmap - remove physical page mappings
 * @obj: obj in question
 *
1936
 * Preserve the reservation of the mmapping with the DRM core code, but
1937 1938 1939 1940 1941 1942 1943 1944 1945
 * relinquish ownership of the pages back to the system.
 *
 * It is vital that we remove the page mapping if we have mapped a tiled
 * object through the GTT and then lose the fence register due to
 * resource pressure. Similarly if the object has been moved out of the
 * aperture, than pages mapped into userspace must be revoked. Removing the
 * mapping will then trigger a page fault on the next user access, allowing
 * fixup by i915_gem_fault().
 */
1946
void
1947
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1948
{
1949 1950
	struct drm_i915_private *i915 = to_i915(obj->base.dev);

1951 1952 1953
	/* Serialisation between user GTT access and our code depends upon
	 * revoking the CPU's PTE whilst the mutex is held. The next user
	 * pagefault then has to wait until we release the mutex.
1954 1955 1956 1957
	 *
	 * Note that RPM complicates somewhat by adding an additional
	 * requirement that operations to the GGTT be made holding the RPM
	 * wakeref.
1958
	 */
1959
	lockdep_assert_held(&i915->drm.struct_mutex);
1960
	intel_runtime_pm_get(i915);
1961

1962
	if (list_empty(&obj->userfault_link))
1963
		goto out;
1964

1965
	list_del_init(&obj->userfault_link);
1966 1967
	drm_vma_node_unmap(&obj->base.vma_node,
			   obj->base.dev->anon_inode->i_mapping);
1968 1969 1970 1971 1972 1973 1974 1975 1976

	/* Ensure that the CPU's PTE are revoked and there are not outstanding
	 * memory transactions from userspace before we return. The TLB
	 * flushing implied above by changing the PTE above *should* be
	 * sufficient, an extra barrier here just provides us with a bit
	 * of paranoid documentation about our requirement to serialise
	 * memory writes before touching registers / GSM.
	 */
	wmb();
1977 1978 1979

out:
	intel_runtime_pm_put(i915);
1980 1981
}

1982
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
1983
{
1984
	struct drm_i915_gem_object *obj, *on;
1985
	int i;
1986

1987 1988 1989 1990 1991 1992
	/*
	 * Only called during RPM suspend. All users of the userfault_list
	 * must be holding an RPM wakeref to ensure that this can not
	 * run concurrently with themselves (and use the struct_mutex for
	 * protection between themselves).
	 */
1993

1994 1995 1996
	list_for_each_entry_safe(obj, on,
				 &dev_priv->mm.userfault_list, userfault_link) {
		list_del_init(&obj->userfault_link);
1997 1998 1999
		drm_vma_node_unmap(&obj->base.vma_node,
				   obj->base.dev->anon_inode->i_mapping);
	}
2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016

	/* The fence will be lost when the device powers down. If any were
	 * in use by hardware (i.e. they are pinned), we should not be powering
	 * down! All other fences will be reacquired by the user upon waking.
	 */
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];

		if (WARN_ON(reg->pin_count))
			continue;

		if (!reg->vma)
			continue;

		GEM_BUG_ON(!list_empty(&reg->vma->obj->userfault_link));
		reg->dirty = true;
	}
2017 2018
}

2019 2020
/**
 * i915_gem_get_ggtt_size - return required global GTT size for an object
2021
 * @dev_priv: i915 device
2022 2023
 * @size: object size
 * @tiling_mode: tiling mode
2024
 * @stride: tiling stride
2025 2026 2027 2028
 *
 * Return the required global GTT size for an object, taking into account
 * potential fence register mapping.
 */
2029
u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
2030
			   u64 size, int tiling_mode, unsigned int stride)
2031
{
2032
	u64 ggtt_size;
2033

2034
	GEM_BUG_ON(!size);
2035

2036
	if (tiling_mode == I915_TILING_NONE)
2037
		return size;
2038

2039 2040 2041 2042 2043 2044 2045 2046
	GEM_BUG_ON(!stride);

	if (INTEL_GEN(dev_priv) >= 4) {
		stride *= i915_gem_tile_height(tiling_mode);
		GEM_BUG_ON(stride & 4095);
		return roundup(size, stride);
	}

2047
	/* Previous chips need a power-of-two fence region when tiling */
2048
	if (IS_GEN3(dev_priv))
2049
		ggtt_size = 1024*1024;
2050
	else
2051
		ggtt_size = 512*1024;
2052

2053 2054
	while (ggtt_size < size)
		ggtt_size <<= 1;
2055

2056
	return ggtt_size;
2057 2058
}

2059
/**
2060
 * i915_gem_get_ggtt_alignment - return required global GTT alignment
2061
 * @dev_priv: i915 device
2062 2063
 * @size: object size
 * @tiling_mode: tiling mode
2064
 * @stride: tiling stride
2065
 * @fenced: is fenced alignment required or not
2066
 *
2067
 * Return the required global GTT alignment for an object, taking into account
2068
 * potential fence register mapping.
2069
 */
2070
u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
2071 2072
				int tiling_mode, unsigned int stride,
				bool fenced)
2073
{
2074
	GEM_BUG_ON(!size);
2075

2076 2077 2078 2079
	/*
	 * Minimum alignment is 4k (GTT page size), but might be greater
	 * if a fence register is needed for the object.
	 */
2080 2081
	if (INTEL_GEN(dev_priv) >= 4 ||
	    (!fenced && (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))) ||
2082
	    tiling_mode == I915_TILING_NONE)
2083 2084
		return 4096;

2085 2086 2087 2088
	/*
	 * Previous chips need to be aligned to the size of the smallest
	 * fence register that can contain the object.
	 */
2089
	return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode, stride);
2090 2091
}

2092 2093
static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
{
2094
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2095
	int err;
2096

2097
	err = drm_gem_create_mmap_offset(&obj->base);
2098
	if (likely(!err))
2099
		return 0;
2100

2101 2102 2103 2104 2105
	/* Attempt to reap some mmap space from dead objects */
	do {
		err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
		if (err)
			break;
2106

2107
		i915_gem_drain_freed_objects(dev_priv);
2108
		err = drm_gem_create_mmap_offset(&obj->base);
2109 2110 2111 2112
		if (!err)
			break;

	} while (flush_delayed_work(&dev_priv->gt.retire_work));
2113

2114
	return err;
2115 2116 2117 2118 2119 2120 2121
}

static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
{
	drm_gem_free_mmap_offset(&obj->base);
}

2122
int
2123 2124
i915_gem_mmap_gtt(struct drm_file *file,
		  struct drm_device *dev,
2125
		  uint32_t handle,
2126
		  uint64_t *offset)
2127
{
2128
	struct drm_i915_gem_object *obj;
2129 2130
	int ret;

2131
	obj = i915_gem_object_lookup(file, handle);
2132 2133
	if (!obj)
		return -ENOENT;
2134

2135
	ret = i915_gem_object_create_mmap_offset(obj);
2136 2137
	if (ret == 0)
		*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2138

C
Chris Wilson 已提交
2139
	i915_gem_object_put(obj);
2140
	return ret;
2141 2142
}

2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163
/**
 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
 * @dev: DRM device
 * @data: GTT mapping ioctl data
 * @file: GEM object info
 *
 * Simply returns the fake offset to userspace so it can mmap it.
 * The mmap call will end up in drm_gem_mmap(), which will set things
 * up so we can get faults in the handler above.
 *
 * The fault handler will take care of binding the object into the GTT
 * (since it may have been evicted to make room for something), allocating
 * a fence register, and mapping the appropriate aperture address into
 * userspace.
 */
int
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file)
{
	struct drm_i915_gem_mmap_gtt *args = data;

2164
	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2165 2166
}

D
Daniel Vetter 已提交
2167 2168 2169
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2170
{
2171
	i915_gem_object_free_mmap_offset(obj);
2172

2173 2174
	if (obj->base.filp == NULL)
		return;
2175

D
Daniel Vetter 已提交
2176 2177 2178 2179 2180
	/* Our goal here is to return as much of the memory as
	 * is possible back to the system as we are called from OOM.
	 * To do this we must instruct the shmfs to drop all of its
	 * backing pages, *now*.
	 */
2181
	shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
C
Chris Wilson 已提交
2182
	obj->mm.madv = __I915_MADV_PURGED;
D
Daniel Vetter 已提交
2183
}
2184

2185
/* Try to discard unwanted pages */
2186
void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
D
Daniel Vetter 已提交
2187
{
2188 2189
	struct address_space *mapping;

2190 2191 2192
	lockdep_assert_held(&obj->mm.lock);
	GEM_BUG_ON(obj->mm.pages);

C
Chris Wilson 已提交
2193
	switch (obj->mm.madv) {
2194 2195 2196 2197 2198 2199 2200 2201 2202
	case I915_MADV_DONTNEED:
		i915_gem_object_truncate(obj);
	case __I915_MADV_PURGED:
		return;
	}

	if (obj->base.filp == NULL)
		return;

2203
	mapping = obj->base.filp->f_mapping,
2204
	invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2205 2206
}

2207
static void
2208 2209
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
			      struct sg_table *pages)
2210
{
2211 2212
	struct sgt_iter sgt_iter;
	struct page *page;
2213

2214
	__i915_gem_object_release_shmem(obj, pages, true);
2215

2216
	i915_gem_gtt_finish_pages(obj, pages);
I
Imre Deak 已提交
2217

2218
	if (i915_gem_object_needs_bit17_swizzle(obj))
2219
		i915_gem_object_save_bit_17_swizzle(obj, pages);
2220

2221
	for_each_sgt_page(page, sgt_iter, pages) {
C
Chris Wilson 已提交
2222
		if (obj->mm.dirty)
2223
			set_page_dirty(page);
2224

C
Chris Wilson 已提交
2225
		if (obj->mm.madv == I915_MADV_WILLNEED)
2226
			mark_page_accessed(page);
2227

2228
		put_page(page);
2229
	}
C
Chris Wilson 已提交
2230
	obj->mm.dirty = false;
2231

2232 2233
	sg_free_table(pages);
	kfree(pages);
2234
}
C
Chris Wilson 已提交
2235

2236 2237 2238 2239 2240
static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
{
	struct radix_tree_iter iter;
	void **slot;

C
Chris Wilson 已提交
2241 2242
	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
2243 2244
}

2245 2246
void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
				 enum i915_mm_subclass subclass)
2247
{
2248
	struct sg_table *pages;
2249

C
Chris Wilson 已提交
2250
	if (i915_gem_object_has_pinned_pages(obj))
2251
		return;
2252

2253
	GEM_BUG_ON(obj->bind_count);
2254 2255 2256 2257
	if (!READ_ONCE(obj->mm.pages))
		return;

	/* May be called by shrinker from within get_pages() (on another bo) */
2258
	mutex_lock_nested(&obj->mm.lock, subclass);
2259 2260
	if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
		goto unlock;
B
Ben Widawsky 已提交
2261

2262 2263 2264
	/* ->put_pages might need to allocate memory for the bit17 swizzle
	 * array, hence protect them from being reaped by removing them from gtt
	 * lists early. */
2265 2266
	pages = fetch_and_zero(&obj->mm.pages);
	GEM_BUG_ON(!pages);
2267

C
Chris Wilson 已提交
2268
	if (obj->mm.mapping) {
2269 2270
		void *ptr;

C
Chris Wilson 已提交
2271
		ptr = ptr_mask_bits(obj->mm.mapping);
2272 2273
		if (is_vmalloc_addr(ptr))
			vunmap(ptr);
2274
		else
2275 2276
			kunmap(kmap_to_page(ptr));

C
Chris Wilson 已提交
2277
		obj->mm.mapping = NULL;
2278 2279
	}

2280 2281
	__i915_gem_object_reset_page_iter(obj);

2282
	obj->ops->put_pages(obj, pages);
2283 2284
unlock:
	mutex_unlock(&obj->mm.lock);
C
Chris Wilson 已提交
2285 2286
}

2287
static unsigned int swiotlb_max_size(void)
2288 2289 2290 2291 2292 2293 2294 2295
{
#if IS_ENABLED(CONFIG_SWIOTLB)
	return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE);
#else
	return 0;
#endif
}

2296 2297 2298 2299 2300 2301 2302 2303 2304
static void i915_sg_trim(struct sg_table *orig_st)
{
	struct sg_table new_st;
	struct scatterlist *sg, *new_sg;
	unsigned int i;

	if (orig_st->nents == orig_st->orig_nents)
		return;

2305
	if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
2306 2307 2308 2309 2310 2311 2312 2313
		return;

	new_sg = new_st.sgl;
	for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
		sg_set_page(new_sg, sg_page(sg), sg->length, 0);
		/* called before being DMA mapped, no need to copy sg->dma_* */
		new_sg = sg_next(new_sg);
	}
2314
	GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
2315 2316 2317 2318 2319 2320

	sg_free_table(orig_st);

	*orig_st = new_st;
}

2321
static struct sg_table *
C
Chris Wilson 已提交
2322
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2323
{
2324
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2325 2326
	const unsigned long page_count = obj->base.size / PAGE_SIZE;
	unsigned long i;
2327
	struct address_space *mapping;
2328 2329
	struct sg_table *st;
	struct scatterlist *sg;
2330
	struct sgt_iter sgt_iter;
2331
	struct page *page;
2332
	unsigned long last_pfn = 0;	/* suppress gcc warning */
2333
	unsigned int max_segment;
I
Imre Deak 已提交
2334
	int ret;
C
Chris Wilson 已提交
2335
	gfp_t gfp;
2336

C
Chris Wilson 已提交
2337 2338 2339 2340
	/* Assert that the object is not currently in any GPU domain. As it
	 * wasn't in the GTT, there shouldn't be any way it could have been in
	 * a GPU cache
	 */
2341 2342
	GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
	GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
C
Chris Wilson 已提交
2343

2344 2345
	max_segment = swiotlb_max_size();
	if (!max_segment)
2346
		max_segment = rounddown(UINT_MAX, PAGE_SIZE);
2347

2348 2349
	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
2350
		return ERR_PTR(-ENOMEM);
2351

2352
rebuild_st:
2353 2354
	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
		kfree(st);
2355
		return ERR_PTR(-ENOMEM);
2356
	}
2357

2358 2359 2360 2361 2362
	/* Get the list of pages out of our struct file.  They'll be pinned
	 * at this point until we release them.
	 *
	 * Fail silently without starting the shrinker
	 */
2363
	mapping = obj->base.filp->f_mapping;
2364
	gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
2365
	gfp |= __GFP_NORETRY | __GFP_NOWARN;
2366 2367 2368
	sg = st->sgl;
	st->nents = 0;
	for (i = 0; i < page_count; i++) {
C
Chris Wilson 已提交
2369 2370
		page = shmem_read_mapping_page_gfp(mapping, i, gfp);
		if (IS_ERR(page)) {
2371 2372 2373 2374 2375
			i915_gem_shrink(dev_priv,
					page_count,
					I915_SHRINK_BOUND |
					I915_SHRINK_UNBOUND |
					I915_SHRINK_PURGEABLE);
C
Chris Wilson 已提交
2376 2377 2378 2379 2380 2381 2382
			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
		}
		if (IS_ERR(page)) {
			/* We've tried hard to allocate the memory by reaping
			 * our own buffer, now let the real VM do its job and
			 * go down in flames if truly OOM.
			 */
2383
			page = shmem_read_mapping_page(mapping, i);
I
Imre Deak 已提交
2384 2385
			if (IS_ERR(page)) {
				ret = PTR_ERR(page);
2386
				goto err_sg;
I
Imre Deak 已提交
2387
			}
C
Chris Wilson 已提交
2388
		}
2389 2390 2391
		if (!i ||
		    sg->length >= max_segment ||
		    page_to_pfn(page) != last_pfn + 1) {
2392 2393 2394 2395 2396 2397 2398 2399
			if (i)
				sg = sg_next(sg);
			st->nents++;
			sg_set_page(sg, page, PAGE_SIZE, 0);
		} else {
			sg->length += PAGE_SIZE;
		}
		last_pfn = page_to_pfn(page);
2400 2401 2402

		/* Check that the i965g/gm workaround works. */
		WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2403
	}
2404
	if (sg) /* loop terminated early; short sg table */
2405
		sg_mark_end(sg);
2406

2407 2408 2409
	/* Trim unused sg entries to avoid wasting memory. */
	i915_sg_trim(st);

2410
	ret = i915_gem_gtt_prepare_pages(obj, st);
2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429
	if (ret) {
		/* DMA remapping failed? One possible cause is that
		 * it could not reserve enough large entries, asking
		 * for PAGE_SIZE chunks instead may be helpful.
		 */
		if (max_segment > PAGE_SIZE) {
			for_each_sgt_page(page, sgt_iter, st)
				put_page(page);
			sg_free_table(st);

			max_segment = PAGE_SIZE;
			goto rebuild_st;
		} else {
			dev_warn(&dev_priv->drm.pdev->dev,
				 "Failed to DMA remap %lu pages\n",
				 page_count);
			goto err_pages;
		}
	}
I
Imre Deak 已提交
2430

2431
	if (i915_gem_object_needs_bit17_swizzle(obj))
2432
		i915_gem_object_do_bit_17_swizzle(obj, st);
2433

2434
	return st;
2435

2436
err_sg:
2437
	sg_mark_end(sg);
2438
err_pages:
2439 2440
	for_each_sgt_page(page, sgt_iter, st)
		put_page(page);
2441 2442
	sg_free_table(st);
	kfree(st);
2443 2444 2445 2446 2447 2448 2449 2450 2451

	/* shmemfs first checks if there is enough memory to allocate the page
	 * and reports ENOSPC should there be insufficient, along with the usual
	 * ENOMEM for a genuine allocation failure.
	 *
	 * We use ENOSPC in our driver to mean that we have run out of aperture
	 * space and so want to translate the error from shmemfs back to our
	 * usual understanding of ENOMEM.
	 */
I
Imre Deak 已提交
2452 2453 2454
	if (ret == -ENOSPC)
		ret = -ENOMEM;

2455 2456 2457 2458 2459 2460
	return ERR_PTR(ret);
}

void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
				 struct sg_table *pages)
{
2461
	lockdep_assert_held(&obj->mm.lock);
2462 2463 2464 2465 2466

	obj->mm.get_page.sg_pos = pages->sgl;
	obj->mm.get_page.sg_idx = 0;

	obj->mm.pages = pages;
2467 2468 2469 2470 2471 2472 2473

	if (i915_gem_object_is_tiled(obj) &&
	    to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
		GEM_BUG_ON(obj->mm.quirked);
		__i915_gem_object_pin_pages(obj);
		obj->mm.quirked = true;
	}
2474 2475 2476 2477 2478 2479
}

static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
	struct sg_table *pages;

2480 2481
	GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));

2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492
	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
		DRM_DEBUG("Attempting to obtain a purgeable object\n");
		return -EFAULT;
	}

	pages = obj->ops->get_pages(obj);
	if (unlikely(IS_ERR(pages)))
		return PTR_ERR(pages);

	__i915_gem_object_set_pages(obj, pages);
	return 0;
2493 2494
}

2495
/* Ensure that the associated pages are gathered from the backing storage
2496
 * and pinned into our object. i915_gem_object_pin_pages() may be called
2497
 * multiple times before they are released by a single call to
2498
 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
2499 2500 2501
 * either as a result of memory pressure (reaping pages under the shrinker)
 * or as the object is itself released.
 */
C
Chris Wilson 已提交
2502
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2503
{
2504
	int err;
2505

2506 2507 2508
	err = mutex_lock_interruptible(&obj->mm.lock);
	if (err)
		return err;
2509

2510 2511 2512 2513
	if (unlikely(!obj->mm.pages)) {
		err = ____i915_gem_object_get_pages(obj);
		if (err)
			goto unlock;
2514

2515 2516 2517
		smp_mb__before_atomic();
	}
	atomic_inc(&obj->mm.pages_pin_count);
2518

2519 2520
unlock:
	mutex_unlock(&obj->mm.lock);
2521
	return err;
2522 2523
}

2524
/* The 'mapping' part of i915_gem_object_pin_map() below */
2525 2526
static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
				 enum i915_map_type type)
2527 2528
{
	unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
C
Chris Wilson 已提交
2529
	struct sg_table *sgt = obj->mm.pages;
2530 2531
	struct sgt_iter sgt_iter;
	struct page *page;
2532 2533
	struct page *stack_pages[32];
	struct page **pages = stack_pages;
2534
	unsigned long i = 0;
2535
	pgprot_t pgprot;
2536 2537 2538
	void *addr;

	/* A single page can always be kmapped */
2539
	if (n_pages == 1 && type == I915_MAP_WB)
2540 2541
		return kmap(sg_page(sgt->sgl));

2542 2543 2544 2545 2546 2547
	if (n_pages > ARRAY_SIZE(stack_pages)) {
		/* Too big for stack -- allocate temporary array instead */
		pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
		if (!pages)
			return NULL;
	}
2548

2549 2550
	for_each_sgt_page(page, sgt_iter, sgt)
		pages[i++] = page;
2551 2552 2553 2554

	/* Check that we have the expected number of pages */
	GEM_BUG_ON(i != n_pages);

2555 2556 2557 2558 2559 2560 2561 2562 2563
	switch (type) {
	case I915_MAP_WB:
		pgprot = PAGE_KERNEL;
		break;
	case I915_MAP_WC:
		pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
		break;
	}
	addr = vmap(pages, n_pages, 0, pgprot);
2564

2565 2566
	if (pages != stack_pages)
		drm_free_large(pages);
2567 2568 2569 2570 2571

	return addr;
}

/* get, pin, and map the pages of the object into kernel space */
2572 2573
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
			      enum i915_map_type type)
2574
{
2575 2576 2577
	enum i915_map_type has_type;
	bool pinned;
	void *ptr;
2578 2579
	int ret;

2580
	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
2581

2582
	ret = mutex_lock_interruptible(&obj->mm.lock);
2583 2584 2585
	if (ret)
		return ERR_PTR(ret);

2586 2587
	pinned = true;
	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
2588 2589 2590 2591
		if (unlikely(!obj->mm.pages)) {
			ret = ____i915_gem_object_get_pages(obj);
			if (ret)
				goto err_unlock;
2592

2593 2594 2595
			smp_mb__before_atomic();
		}
		atomic_inc(&obj->mm.pages_pin_count);
2596 2597 2598
		pinned = false;
	}
	GEM_BUG_ON(!obj->mm.pages);
2599

C
Chris Wilson 已提交
2600
	ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
2601 2602 2603
	if (ptr && has_type != type) {
		if (pinned) {
			ret = -EBUSY;
2604
			goto err_unpin;
2605
		}
2606 2607 2608 2609 2610 2611

		if (is_vmalloc_addr(ptr))
			vunmap(ptr);
		else
			kunmap(kmap_to_page(ptr));

C
Chris Wilson 已提交
2612
		ptr = obj->mm.mapping = NULL;
2613 2614
	}

2615 2616 2617 2618
	if (!ptr) {
		ptr = i915_gem_object_map(obj, type);
		if (!ptr) {
			ret = -ENOMEM;
2619
			goto err_unpin;
2620 2621
		}

C
Chris Wilson 已提交
2622
		obj->mm.mapping = ptr_pack_bits(ptr, type);
2623 2624
	}

2625 2626
out_unlock:
	mutex_unlock(&obj->mm.lock);
2627 2628
	return ptr;

2629 2630 2631 2632 2633
err_unpin:
	atomic_dec(&obj->mm.pages_pin_count);
err_unlock:
	ptr = ERR_PTR(ret);
	goto out_unlock;
2634 2635
}

2636
static bool ban_context(const struct i915_gem_context *ctx)
2637
{
2638 2639
	return (i915_gem_context_is_bannable(ctx) &&
		ctx->ban_score >= CONTEXT_SCORE_BAN_THRESHOLD);
2640 2641
}

2642
static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
2643
{
2644
	ctx->guilty_count++;
2645 2646 2647
	ctx->ban_score += CONTEXT_SCORE_GUILTY;
	if (ban_context(ctx))
		i915_gem_context_set_banned(ctx);
2648 2649

	DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
2650
			 ctx->name, ctx->ban_score,
2651
			 yesno(i915_gem_context_is_banned(ctx)));
2652

2653
	if (!i915_gem_context_is_banned(ctx) || IS_ERR_OR_NULL(ctx->file_priv))
2654 2655
		return;

2656 2657 2658
	ctx->file_priv->context_bans++;
	DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
			 ctx->name, ctx->file_priv->context_bans);
2659 2660 2661 2662
}

static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
{
2663
	ctx->active_count++;
2664 2665
}

2666
struct drm_i915_gem_request *
2667
i915_gem_find_active_request(struct intel_engine_cs *engine)
2668
{
2669 2670
	struct drm_i915_gem_request *request;

2671 2672 2673 2674 2675 2676 2677 2678
	/* We are called by the error capture and reset at a random
	 * point in time. In particular, note that neither is crucially
	 * ordered with an interrupt. After a hang, the GPU is dead and we
	 * assume that no more writes can happen (we waited long enough for
	 * all writes that were in transaction to be flushed) - adding an
	 * extra delay for a recent interrupt is pointless. Hence, we do
	 * not need an engine->irq_seqno_barrier() before the seqno reads.
	 */
2679
	list_for_each_entry(request, &engine->timeline->requests, link) {
C
Chris Wilson 已提交
2680
		if (__i915_gem_request_completed(request))
2681
			continue;
2682

2683
		return request;
2684
	}
2685 2686 2687 2688

	return NULL;
}

2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705
static void reset_request(struct drm_i915_gem_request *request)
{
	void *vaddr = request->ring->vaddr;
	u32 head;

	/* As this request likely depends on state from the lost
	 * context, clear out all the user operations leaving the
	 * breadcrumb at the end (so we get the fence notifications).
	 */
	head = request->head;
	if (request->postfix < head) {
		memset(vaddr + head, 0, request->ring->size - head);
		head = 0;
	}
	memset(vaddr + head, 0, request->postfix - head);
}

2706 2707 2708 2709 2710
void i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
{
	i915_gem_revoke_fences(dev_priv);
}

2711
static void i915_gem_reset_engine(struct intel_engine_cs *engine)
2712 2713
{
	struct drm_i915_gem_request *request;
2714
	struct i915_gem_context *hung_ctx;
C
Chris Wilson 已提交
2715
	struct intel_timeline *timeline;
2716
	unsigned long flags;
2717 2718
	bool ring_hung;

2719 2720 2721
	if (engine->irq_seqno_barrier)
		engine->irq_seqno_barrier(engine);

2722
	request = i915_gem_find_active_request(engine);
2723
	if (!request)
2724 2725
		return;

2726 2727
	hung_ctx = request->ctx;

2728 2729 2730 2731 2732
	ring_hung = engine->hangcheck.stalled;
	if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) {
		DRM_DEBUG_DRIVER("%s pardoned, was guilty? %s\n",
				 engine->name,
				 yesno(ring_hung));
2733
		ring_hung = false;
2734
	}
2735

2736
	if (ring_hung)
2737
		i915_gem_context_mark_guilty(hung_ctx);
2738
	else
2739
		i915_gem_context_mark_innocent(hung_ctx);
2740

2741 2742 2743 2744
	if (!ring_hung)
		return;

	DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
2745
			 engine->name, request->global_seqno);
2746 2747 2748 2749

	/* Setup the CS to resume from the breadcrumb of the hung request */
	engine->reset_hw(engine, request);

2750 2751 2752 2753
	/* If this context is now banned, skip all of its pending requests. */
	if (!i915_gem_context_is_banned(hung_ctx))
		return;

2754 2755 2756 2757 2758 2759 2760 2761
	/* Users of the default context do not rely on logical state
	 * preserved between batches. They have to emit full state on
	 * every batch and so it is safe to execute queued requests following
	 * the hang.
	 *
	 * Other contexts preserve state, now corrupt. We want to skip all
	 * queued requests that reference the corrupt context.
	 */
2762
	if (i915_gem_context_is_default(hung_ctx))
2763 2764
		return;

2765
	timeline = i915_gem_context_lookup_timeline(hung_ctx, engine);
2766 2767 2768 2769

	spin_lock_irqsave(&engine->timeline->lock, flags);
	spin_lock(&timeline->lock);

2770
	list_for_each_entry_continue(request, &engine->timeline->requests, link)
2771
		if (request->ctx == hung_ctx)
2772
			reset_request(request);
C
Chris Wilson 已提交
2773 2774 2775

	list_for_each_entry(request, &timeline->requests, link)
		reset_request(request);
2776 2777 2778

	spin_unlock(&timeline->lock);
	spin_unlock_irqrestore(&engine->timeline->lock, flags);
2779
}
2780

2781
void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
2782
{
2783
	struct intel_engine_cs *engine;
2784
	enum intel_engine_id id;
2785

2786 2787
	lockdep_assert_held(&dev_priv->drm.struct_mutex);

2788 2789
	i915_gem_retire_requests(dev_priv);

2790
	for_each_engine(engine, dev_priv, id)
2791 2792
		i915_gem_reset_engine(engine);

2793
	i915_gem_restore_fences(dev_priv);
2794 2795 2796 2797 2798 2799 2800

	if (dev_priv->gt.awake) {
		intel_sanitize_gt_powersave(dev_priv);
		intel_enable_gt_powersave(dev_priv);
		if (INTEL_GEN(dev_priv) >= 6)
			gen6_rps_busy(dev_priv);
	}
2801 2802 2803 2804
}

static void nop_submit_request(struct drm_i915_gem_request *request)
{
2805 2806
	i915_gem_request_submit(request);
	intel_engine_init_global_seqno(request->engine, request->global_seqno);
2807 2808 2809 2810
}

static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
{
2811 2812 2813 2814 2815 2816
	/* We need to be sure that no thread is running the old callback as
	 * we install the nop handler (otherwise we would submit a request
	 * to hardware that will never complete). In order to prevent this
	 * race, we wait until the machine is idle before making the swap
	 * (using stop_machine()).
	 */
2817
	engine->submit_request = nop_submit_request;
2818

2819 2820 2821 2822
	/* Mark all pending requests as complete so that any concurrent
	 * (lockless) lookup doesn't try and wait upon the request as we
	 * reset it.
	 */
2823
	intel_engine_init_global_seqno(engine,
2824
				       intel_engine_last_submit(engine));
2825

2826 2827 2828 2829 2830 2831
	/*
	 * Clear the execlists queue up before freeing the requests, as those
	 * are the ones that keep the context and ringbuffer backing objects
	 * pinned in place.
	 */

2832
	if (i915.enable_execlists) {
2833 2834 2835 2836
		unsigned long flags;

		spin_lock_irqsave(&engine->timeline->lock, flags);

2837 2838 2839
		i915_gem_request_put(engine->execlist_port[0].request);
		i915_gem_request_put(engine->execlist_port[1].request);
		memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
2840 2841
		engine->execlist_queue = RB_ROOT;
		engine->execlist_first = NULL;
2842 2843

		spin_unlock_irqrestore(&engine->timeline->lock, flags);
2844
	}
2845 2846
}

2847
static int __i915_gem_set_wedged_BKL(void *data)
2848
{
2849
	struct drm_i915_private *i915 = data;
2850
	struct intel_engine_cs *engine;
2851
	enum intel_engine_id id;
2852

2853 2854 2855 2856 2857 2858 2859 2860
	for_each_engine(engine, i915, id)
		i915_gem_cleanup_engine(engine);

	return 0;
}

void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
{
2861 2862
	lockdep_assert_held(&dev_priv->drm.struct_mutex);
	set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
2863

2864
	stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL);
2865

2866
	i915_gem_context_lost(dev_priv);
2867
	i915_gem_retire_requests(dev_priv);
2868 2869

	mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
2870 2871
}

2872
static void
2873 2874
i915_gem_retire_work_handler(struct work_struct *work)
{
2875
	struct drm_i915_private *dev_priv =
2876
		container_of(work, typeof(*dev_priv), gt.retire_work.work);
2877
	struct drm_device *dev = &dev_priv->drm;
2878

2879
	/* Come back later if the device is busy... */
2880
	if (mutex_trylock(&dev->struct_mutex)) {
2881
		i915_gem_retire_requests(dev_priv);
2882
		mutex_unlock(&dev->struct_mutex);
2883
	}
2884 2885 2886 2887 2888

	/* Keep the retire handler running until we are finally idle.
	 * We do not need to do this test under locking as in the worst-case
	 * we queue the retire worker once too often.
	 */
2889 2890
	if (READ_ONCE(dev_priv->gt.awake)) {
		i915_queue_hangcheck(dev_priv);
2891 2892
		queue_delayed_work(dev_priv->wq,
				   &dev_priv->gt.retire_work,
2893
				   round_jiffies_up_relative(HZ));
2894
	}
2895
}
2896

2897 2898 2899 2900
static void
i915_gem_idle_work_handler(struct work_struct *work)
{
	struct drm_i915_private *dev_priv =
2901
		container_of(work, typeof(*dev_priv), gt.idle_work.work);
2902
	struct drm_device *dev = &dev_priv->drm;
2903
	struct intel_engine_cs *engine;
2904
	enum intel_engine_id id;
2905 2906 2907 2908 2909
	bool rearm_hangcheck;

	if (!READ_ONCE(dev_priv->gt.awake))
		return;

2910 2911 2912 2913 2914 2915 2916
	/*
	 * Wait for last execlists context complete, but bail out in case a
	 * new request is submitted.
	 */
	wait_for(READ_ONCE(dev_priv->gt.active_requests) ||
		 intel_execlists_idle(dev_priv), 10);

2917
	if (READ_ONCE(dev_priv->gt.active_requests))
2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930
		return;

	rearm_hangcheck =
		cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);

	if (!mutex_trylock(&dev->struct_mutex)) {
		/* Currently busy, come back later */
		mod_delayed_work(dev_priv->wq,
				 &dev_priv->gt.idle_work,
				 msecs_to_jiffies(50));
		goto out_rearm;
	}

2931 2932 2933 2934 2935 2936 2937
	/*
	 * New request retired after this work handler started, extend active
	 * period until next instance of the work.
	 */
	if (work_pending(work))
		goto out_unlock;

2938
	if (dev_priv->gt.active_requests)
2939
		goto out_unlock;
2940

2941 2942 2943
	if (wait_for(intel_execlists_idle(dev_priv), 10))
		DRM_ERROR("Timeout waiting for engines to idle\n");

2944
	for_each_engine(engine, dev_priv, id)
2945
		i915_gem_batch_pool_fini(&engine->batch_pool);
2946

2947 2948 2949
	GEM_BUG_ON(!dev_priv->gt.awake);
	dev_priv->gt.awake = false;
	rearm_hangcheck = false;
2950

2951 2952 2953 2954 2955
	if (INTEL_GEN(dev_priv) >= 6)
		gen6_rps_idle(dev_priv);
	intel_runtime_pm_put(dev_priv);
out_unlock:
	mutex_unlock(&dev->struct_mutex);
2956

2957 2958 2959 2960
out_rearm:
	if (rearm_hangcheck) {
		GEM_BUG_ON(!dev_priv->gt.awake);
		i915_queue_hangcheck(dev_priv);
2961
	}
2962 2963
}

2964 2965 2966 2967 2968 2969 2970 2971 2972 2973
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
	struct drm_i915_gem_object *obj = to_intel_bo(gem);
	struct drm_i915_file_private *fpriv = file->driver_priv;
	struct i915_vma *vma, *vn;

	mutex_lock(&obj->base.dev->struct_mutex);
	list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
		if (vma->vm->file == fpriv)
			i915_vma_close(vma);
2974 2975 2976 2977 2978 2979

	if (i915_gem_object_is_active(obj) &&
	    !i915_gem_object_has_active_reference(obj)) {
		i915_gem_object_set_active_reference(obj);
		i915_gem_object_get(obj);
	}
2980 2981 2982
	mutex_unlock(&obj->base.dev->struct_mutex);
}

2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993
static unsigned long to_wait_timeout(s64 timeout_ns)
{
	if (timeout_ns < 0)
		return MAX_SCHEDULE_TIMEOUT;

	if (timeout_ns == 0)
		return 0;

	return nsecs_to_jiffies_timeout(timeout_ns);
}

2994 2995
/**
 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2996 2997 2998
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022
 *
 * Returns 0 if successful, else an error is returned with the remaining time in
 * the timeout parameter.
 *  -ETIME: object is still busy after timeout
 *  -ERESTARTSYS: signal interrupted the wait
 *  -ENONENT: object doesn't exist
 * Also possible, but rare:
 *  -EAGAIN: GPU wedged
 *  -ENOMEM: damn
 *  -ENODEV: Internal IRQ fail
 *  -E?: The add request failed
 *
 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
 * non-zero timeout parameter the wait ioctl will wait for the given number of
 * nanoseconds on an object becoming unbusy. Since the wait itself does so
 * without holding struct_mutex the object may become re-busied before this
 * function completes. A similar but shorter * race condition exists in the busy
 * ioctl
 */
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
	struct drm_i915_gem_wait *args = data;
	struct drm_i915_gem_object *obj;
3023 3024
	ktime_t start;
	long ret;
3025

3026 3027 3028
	if (args->flags != 0)
		return -EINVAL;

3029
	obj = i915_gem_object_lookup(file, args->bo_handle);
3030
	if (!obj)
3031 3032
		return -ENOENT;

3033 3034 3035 3036 3037 3038 3039 3040 3041 3042 3043
	start = ktime_get();

	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
				   to_wait_timeout(args->timeout_ns),
				   to_rps_client(file));

	if (args->timeout_ns > 0) {
		args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
		if (args->timeout_ns < 0)
			args->timeout_ns = 0;
3044 3045
	}

C
Chris Wilson 已提交
3046
	i915_gem_object_put(obj);
3047
	return ret;
3048 3049
}

3050
static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
3051
{
3052
	int ret, i;
3053

3054 3055 3056 3057 3058
	for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
		ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
		if (ret)
			return ret;
	}
3059

3060 3061 3062 3063 3064 3065 3066
	return 0;
}

int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
{
	int ret;

3067 3068 3069 3070 3071 3072 3073 3074 3075 3076 3077 3078
	if (flags & I915_WAIT_LOCKED) {
		struct i915_gem_timeline *tl;

		lockdep_assert_held(&i915->drm.struct_mutex);

		list_for_each_entry(tl, &i915->gt.timelines, link) {
			ret = wait_for_timeline(tl, flags);
			if (ret)
				return ret;
		}
	} else {
		ret = wait_for_timeline(&i915->gt.global_timeline, flags);
3079 3080 3081
		if (ret)
			return ret;
	}
3082

3083
	return 0;
3084 3085
}

3086 3087
void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
			     bool force)
3088 3089 3090 3091 3092
{
	/* If we don't have a page list set up, then we're not pinned
	 * to GPU, and we can ignore the cache flush because it'll happen
	 * again at bind time.
	 */
C
Chris Wilson 已提交
3093
	if (!obj->mm.pages)
3094
		return;
3095

3096 3097 3098 3099
	/*
	 * Stolen memory is always coherent with the GPU as it is explicitly
	 * marked as wc by the system, or the system is cache-coherent.
	 */
3100
	if (obj->stolen || obj->phys_handle)
3101
		return;
3102

3103 3104 3105 3106 3107 3108 3109 3110
	/* If the GPU is snooping the contents of the CPU cache,
	 * we do not need to manually clear the CPU cache lines.  However,
	 * the caches are only snooped when the render cache is
	 * flushed/invalidated.  As we always have to emit invalidations
	 * and flushes when moving into and out of the RENDER domain, correct
	 * snooping behaviour occurs naturally as the result of our domain
	 * tracking.
	 */
3111 3112
	if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
		obj->cache_dirty = true;
3113
		return;
3114
	}
3115

C
Chris Wilson 已提交
3116
	trace_i915_gem_object_clflush(obj);
C
Chris Wilson 已提交
3117
	drm_clflush_sg(obj->mm.pages);
3118
	obj->cache_dirty = false;
3119 3120 3121 3122
}

/** Flushes the GTT write domain for the object if it's dirty. */
static void
3123
i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3124
{
3125
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
C
Chris Wilson 已提交
3126

3127
	if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3128 3129
		return;

3130
	/* No actual flushing is required for the GTT write domain.  Writes
3131
	 * to it "immediately" go to main memory as far as we know, so there's
3132
	 * no chipset flush.  It also doesn't land in render cache.
3133 3134 3135 3136
	 *
	 * However, we do have to enforce the order so that all writes through
	 * the GTT land before any writes to the device, such as updates to
	 * the GATT itself.
3137 3138 3139 3140 3141 3142 3143
	 *
	 * We also have to wait a bit for the writes to land from the GTT.
	 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
	 * timing. This issue has only been observed when switching quickly
	 * between GTT writes and CPU reads from inside the kernel on recent hw,
	 * and it appears to only affect discrete GTT blocks (i.e. on LLC
	 * system agents we cannot reproduce this behaviour).
3144
	 */
3145
	wmb();
3146
	if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
3147
		POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
3148

3149
	intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
3150

3151
	obj->base.write_domain = 0;
C
Chris Wilson 已提交
3152
	trace_i915_gem_object_change_domain(obj,
3153
					    obj->base.read_domains,
3154
					    I915_GEM_DOMAIN_GTT);
3155 3156 3157 3158
}

/** Flushes the CPU write domain for the object if it's dirty. */
static void
3159
i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3160
{
3161
	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3162 3163
		return;

3164
	i915_gem_clflush_object(obj, obj->pin_display);
3165
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
3166

3167
	obj->base.write_domain = 0;
C
Chris Wilson 已提交
3168
	trace_i915_gem_object_change_domain(obj,
3169
					    obj->base.read_domains,
3170
					    I915_GEM_DOMAIN_CPU);
3171 3172
}

3173 3174
/**
 * Moves a single object to the GTT read, and possibly write domain.
3175 3176
 * @obj: object to act on
 * @write: ask for write access or read only
3177 3178 3179 3180
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
J
Jesse Barnes 已提交
3181
int
3182
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3183
{
C
Chris Wilson 已提交
3184
	uint32_t old_write_domain, old_read_domains;
3185
	int ret;
3186

3187
	lockdep_assert_held(&obj->base.dev->struct_mutex);
3188

3189 3190 3191 3192 3193 3194
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   (write ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
3195 3196 3197
	if (ret)
		return ret;

3198 3199 3200
	if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
		return 0;

3201 3202 3203 3204 3205 3206 3207 3208
	/* Flush and acquire obj->pages so that we are coherent through
	 * direct access in memory with previous cached writes through
	 * shmemfs and that our cache domain tracking remains valid.
	 * For example, if the obj->filp was moved to swap without us
	 * being notified and releasing the pages, we would mistakenly
	 * continue to assume that the obj remained out of the CPU cached
	 * domain.
	 */
C
Chris Wilson 已提交
3209
	ret = i915_gem_object_pin_pages(obj);
3210 3211 3212
	if (ret)
		return ret;

3213
	i915_gem_object_flush_cpu_write_domain(obj);
C
Chris Wilson 已提交
3214

3215 3216 3217 3218 3219 3220 3221
	/* Serialise direct access to this object with the barriers for
	 * coherent writes from the GPU, by effectively invalidating the
	 * GTT domain upon first access.
	 */
	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
		mb();

3222 3223
	old_write_domain = obj->base.write_domain;
	old_read_domains = obj->base.read_domains;
C
Chris Wilson 已提交
3224

3225 3226 3227
	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3228
	GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3229
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3230
	if (write) {
3231 3232
		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
C
Chris Wilson 已提交
3233
		obj->mm.dirty = true;
3234 3235
	}

C
Chris Wilson 已提交
3236 3237 3238 3239
	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    old_write_domain);

C
Chris Wilson 已提交
3240
	i915_gem_object_unpin_pages(obj);
3241 3242 3243
	return 0;
}

3244 3245
/**
 * Changes the cache-level of an object across all VMA.
3246 3247
 * @obj: object to act on
 * @cache_level: new cache level to set for the object
3248 3249 3250 3251 3252 3253 3254 3255 3256 3257 3258
 *
 * After this function returns, the object will be in the new cache-level
 * across all GTT and the contents of the backing storage will be coherent,
 * with respect to the new cache-level. In order to keep the backing storage
 * coherent for all users, we only allow a single cache level to be set
 * globally on the object and prevent it from being changed whilst the
 * hardware is reading from the object. That is if the object is currently
 * on the scanout it will be set to uncached (or equivalent display
 * cache coherency) and all non-MOCS GPU access will also be uncached so
 * that all direct access to the scanout remains coherent.
 */
3259 3260 3261
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
				    enum i915_cache_level cache_level)
{
3262
	struct i915_vma *vma;
3263
	int ret;
3264

3265 3266
	lockdep_assert_held(&obj->base.dev->struct_mutex);

3267
	if (obj->cache_level == cache_level)
3268
		return 0;
3269

3270 3271 3272 3273 3274
	/* Inspect the list of currently bound VMA and unbind any that would
	 * be invalid given the new cache-level. This is principally to
	 * catch the issue of the CS prefetch crossing page boundaries and
	 * reading an invalid PTE on older architectures.
	 */
3275 3276
restart:
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
3277 3278 3279
		if (!drm_mm_node_allocated(&vma->node))
			continue;

3280
		if (i915_vma_is_pinned(vma)) {
3281 3282 3283 3284
			DRM_DEBUG("can not change the cache level of pinned objects\n");
			return -EBUSY;
		}

3285 3286 3287 3288 3289 3290 3291 3292 3293 3294 3295 3296
		if (i915_gem_valid_gtt_space(vma, cache_level))
			continue;

		ret = i915_vma_unbind(vma);
		if (ret)
			return ret;

		/* As unbinding may affect other elements in the
		 * obj->vma_list (due to side-effects from retiring
		 * an active vma), play safe and restart the iterator.
		 */
		goto restart;
3297 3298
	}

3299 3300 3301 3302 3303 3304 3305
	/* We can reuse the existing drm_mm nodes but need to change the
	 * cache-level on the PTE. We could simply unbind them all and
	 * rebind with the correct cache-level on next use. However since
	 * we already have a valid slot, dma mapping, pages etc, we may as
	 * rewrite the PTE in the belief that doing so tramples upon less
	 * state and so involves less work.
	 */
3306
	if (obj->bind_count) {
3307 3308 3309 3310
		/* Before we change the PTE, the GPU must not be accessing it.
		 * If we wait upon the object, we know that all the bound
		 * VMA are no longer active.
		 */
3311 3312 3313 3314 3315 3316
		ret = i915_gem_object_wait(obj,
					   I915_WAIT_INTERRUPTIBLE |
					   I915_WAIT_LOCKED |
					   I915_WAIT_ALL,
					   MAX_SCHEDULE_TIMEOUT,
					   NULL);
3317 3318 3319
		if (ret)
			return ret;

3320 3321
		if (!HAS_LLC(to_i915(obj->base.dev)) &&
		    cache_level != I915_CACHE_NONE) {
3322 3323 3324 3325 3326 3327 3328 3329 3330 3331 3332 3333 3334 3335 3336 3337
			/* Access to snoopable pages through the GTT is
			 * incoherent and on some machines causes a hard
			 * lockup. Relinquish the CPU mmaping to force
			 * userspace to refault in the pages and we can
			 * then double check if the GTT mapping is still
			 * valid for that pointer access.
			 */
			i915_gem_release_mmap(obj);

			/* As we no longer need a fence for GTT access,
			 * we can relinquish it now (and so prevent having
			 * to steal a fence from someone else on the next
			 * fence request). Note GPU activity would have
			 * dropped the fence as all snoopable access is
			 * supposed to be linear.
			 */
3338 3339 3340 3341 3342
			list_for_each_entry(vma, &obj->vma_list, obj_link) {
				ret = i915_vma_put_fence(vma);
				if (ret)
					return ret;
			}
3343 3344 3345 3346 3347 3348 3349 3350
		} else {
			/* We either have incoherent backing store and
			 * so no GTT access or the architecture is fully
			 * coherent. In such cases, existing GTT mmaps
			 * ignore the cache bit in the PTE and we can
			 * rewrite it without confusing the GPU or having
			 * to force userspace to fault back in its mmaps.
			 */
3351 3352
		}

3353
		list_for_each_entry(vma, &obj->vma_list, obj_link) {
3354 3355 3356 3357 3358 3359 3360
			if (!drm_mm_node_allocated(&vma->node))
				continue;

			ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
			if (ret)
				return ret;
		}
3361 3362
	}

3363 3364 3365 3366
	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU &&
	    cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
		obj->cache_dirty = true;

3367
	list_for_each_entry(vma, &obj->vma_list, obj_link)
3368 3369 3370
		vma->node.color = cache_level;
	obj->cache_level = cache_level;

3371 3372 3373
	return 0;
}

B
Ben Widawsky 已提交
3374 3375
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file)
3376
{
B
Ben Widawsky 已提交
3377
	struct drm_i915_gem_caching *args = data;
3378
	struct drm_i915_gem_object *obj;
3379
	int err = 0;
3380

3381 3382 3383 3384 3385 3386
	rcu_read_lock();
	obj = i915_gem_object_lookup_rcu(file, args->handle);
	if (!obj) {
		err = -ENOENT;
		goto out;
	}
3387

3388 3389 3390 3391 3392 3393
	switch (obj->cache_level) {
	case I915_CACHE_LLC:
	case I915_CACHE_L3_LLC:
		args->caching = I915_CACHING_CACHED;
		break;

3394 3395 3396 3397
	case I915_CACHE_WT:
		args->caching = I915_CACHING_DISPLAY;
		break;

3398 3399 3400 3401
	default:
		args->caching = I915_CACHING_NONE;
		break;
	}
3402 3403 3404
out:
	rcu_read_unlock();
	return err;
3405 3406
}

B
Ben Widawsky 已提交
3407 3408
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file)
3409
{
3410
	struct drm_i915_private *i915 = to_i915(dev);
B
Ben Widawsky 已提交
3411
	struct drm_i915_gem_caching *args = data;
3412 3413 3414 3415
	struct drm_i915_gem_object *obj;
	enum i915_cache_level level;
	int ret;

B
Ben Widawsky 已提交
3416 3417
	switch (args->caching) {
	case I915_CACHING_NONE:
3418 3419
		level = I915_CACHE_NONE;
		break;
B
Ben Widawsky 已提交
3420
	case I915_CACHING_CACHED:
3421 3422 3423 3424 3425 3426
		/*
		 * Due to a HW issue on BXT A stepping, GPU stores via a
		 * snooped mapping may leave stale data in a corresponding CPU
		 * cacheline, whereas normally such cachelines would get
		 * invalidated.
		 */
3427
		if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
3428 3429
			return -ENODEV;

3430 3431
		level = I915_CACHE_LLC;
		break;
3432
	case I915_CACHING_DISPLAY:
3433
		level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
3434
		break;
3435 3436 3437 3438
	default:
		return -EINVAL;
	}

B
Ben Widawsky 已提交
3439 3440
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
3441
		return ret;
B
Ben Widawsky 已提交
3442

3443 3444
	obj = i915_gem_object_lookup(file, args->handle);
	if (!obj) {
3445 3446 3447 3448 3449
		ret = -ENOENT;
		goto unlock;
	}

	ret = i915_gem_object_set_cache_level(obj, level);
3450
	i915_gem_object_put(obj);
3451 3452 3453 3454 3455
unlock:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

3456
/*
3457 3458 3459
 * Prepare buffer for display plane (scanout, cursors, etc).
 * Can be called from an uninterruptible phase (modesetting) and allows
 * any flushes to be pipelined (for pageflips).
3460
 */
C
Chris Wilson 已提交
3461
struct i915_vma *
3462 3463
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
				     u32 alignment,
3464
				     const struct i915_ggtt_view *view)
3465
{
C
Chris Wilson 已提交
3466
	struct i915_vma *vma;
3467
	u32 old_read_domains, old_write_domain;
3468 3469
	int ret;

3470 3471
	lockdep_assert_held(&obj->base.dev->struct_mutex);

3472 3473 3474
	/* Mark the pin_display early so that we account for the
	 * display coherency whilst setting up the cache domains.
	 */
3475
	obj->pin_display++;
3476

3477 3478 3479 3480 3481 3482 3483 3484 3485
	/* The display engine is not coherent with the LLC cache on gen6.  As
	 * a result, we make sure that the pinning that is about to occur is
	 * done with uncached PTEs. This is lowest common denominator for all
	 * chipsets.
	 *
	 * However for gen6+, we could do better by using the GFDT bit instead
	 * of uncaching, which would allow us to flush all the LLC-cached data
	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
	 */
3486
	ret = i915_gem_object_set_cache_level(obj,
3487 3488
					      HAS_WT(to_i915(obj->base.dev)) ?
					      I915_CACHE_WT : I915_CACHE_NONE);
C
Chris Wilson 已提交
3489 3490
	if (ret) {
		vma = ERR_PTR(ret);
3491
		goto err_unpin_display;
C
Chris Wilson 已提交
3492
	}
3493

3494 3495
	/* As the user may map the buffer once pinned in the display plane
	 * (e.g. libkms for the bootup splash), we have to ensure that we
3496 3497 3498 3499
	 * always use map_and_fenceable for all scanout buffers. However,
	 * it may simply be too big to fit into mappable, in which case
	 * put it anyway and hope that userspace can cope (but always first
	 * try to preserve the existing ABI).
3500
	 */
3501 3502 3503 3504
	vma = ERR_PTR(-ENOSPC);
	if (view->type == I915_GGTT_VIEW_NORMAL)
		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
					       PIN_MAPPABLE | PIN_NONBLOCK);
3505 3506 3507 3508 3509 3510 3511 3512 3513 3514 3515 3516 3517 3518 3519 3520
	if (IS_ERR(vma)) {
		struct drm_i915_private *i915 = to_i915(obj->base.dev);
		unsigned int flags;

		/* Valleyview is definitely limited to scanning out the first
		 * 512MiB. Lets presume this behaviour was inherited from the
		 * g4x display engine and that all earlier gen are similarly
		 * limited. Testing suggests that it is a little more
		 * complicated than this. For example, Cherryview appears quite
		 * happy to scanout from anywhere within its global aperture.
		 */
		flags = 0;
		if (HAS_GMCH_DISPLAY(i915))
			flags = PIN_MAPPABLE;
		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
	}
C
Chris Wilson 已提交
3521
	if (IS_ERR(vma))
3522
		goto err_unpin_display;
3523

3524 3525
	vma->display_alignment = max_t(u64, vma->display_alignment, alignment);

3526 3527 3528 3529 3530
	/* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
	if (obj->cache_dirty) {
		i915_gem_clflush_object(obj, true);
		intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
	}
3531

3532
	old_write_domain = obj->base.write_domain;
3533
	old_read_domains = obj->base.read_domains;
3534 3535 3536 3537

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3538
	obj->base.write_domain = 0;
3539
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3540 3541 3542

	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
3543
					    old_write_domain);
3544

C
Chris Wilson 已提交
3545
	return vma;
3546 3547

err_unpin_display:
3548
	obj->pin_display--;
C
Chris Wilson 已提交
3549
	return vma;
3550 3551 3552
}

void
C
Chris Wilson 已提交
3553
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
3554
{
3555
	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
3556

C
Chris Wilson 已提交
3557
	if (WARN_ON(vma->obj->pin_display == 0))
3558 3559
		return;

3560 3561
	if (--vma->obj->pin_display == 0)
		vma->display_alignment = 0;
3562

3563 3564 3565 3566
	/* Bump the LRU to try and avoid premature eviction whilst flipping  */
	if (!i915_vma_is_active(vma))
		list_move_tail(&vma->vm_link, &vma->vm->inactive_list);

C
Chris Wilson 已提交
3567
	i915_vma_unpin(vma);
3568 3569
}

3570 3571
/**
 * Moves a single object to the CPU read, and possibly write domain.
3572 3573
 * @obj: object to act on
 * @write: requesting write or read-only access
3574 3575 3576 3577
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
3578
int
3579
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3580
{
C
Chris Wilson 已提交
3581
	uint32_t old_write_domain, old_read_domains;
3582 3583
	int ret;

3584
	lockdep_assert_held(&obj->base.dev->struct_mutex);
3585

3586 3587 3588 3589 3590 3591
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   (write ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
3592 3593 3594
	if (ret)
		return ret;

3595 3596 3597
	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
		return 0;

3598
	i915_gem_object_flush_gtt_write_domain(obj);
3599

3600 3601
	old_write_domain = obj->base.write_domain;
	old_read_domains = obj->base.read_domains;
C
Chris Wilson 已提交
3602

3603
	/* Flush the CPU cache if it's still invalid. */
3604
	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3605
		i915_gem_clflush_object(obj, false);
3606

3607
		obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3608 3609 3610 3611 3612
	}

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3613
	GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3614 3615 3616 3617 3618

	/* If we're writing through the CPU, then the GPU read domains will
	 * need to be invalidated at next use.
	 */
	if (write) {
3619 3620
		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3621
	}
3622

C
Chris Wilson 已提交
3623 3624 3625 3626
	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    old_write_domain);

3627 3628 3629
	return 0;
}

3630 3631 3632
/* Throttle our rendering by waiting until the ring has completed our requests
 * emitted over 20 msec ago.
 *
3633 3634 3635 3636
 * Note that if we were to use the current jiffies each time around the loop,
 * we wouldn't escape the function with any frames outstanding if the time to
 * render a frame was over 20ms.
 *
3637 3638 3639
 * This should get us reasonable parallelism between CPU and GPU but also
 * relatively low latency when blocking on a particular request to finish.
 */
3640
static int
3641
i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3642
{
3643
	struct drm_i915_private *dev_priv = to_i915(dev);
3644
	struct drm_i915_file_private *file_priv = file->driver_priv;
3645
	unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
3646
	struct drm_i915_gem_request *request, *target = NULL;
3647
	long ret;
3648

3649 3650 3651
	/* ABI: return -EIO if already wedged */
	if (i915_terminally_wedged(&dev_priv->gpu_error))
		return -EIO;
3652

3653
	spin_lock(&file_priv->mm.lock);
3654
	list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3655 3656
		if (time_after_eq(request->emitted_jiffies, recent_enough))
			break;
3657

3658 3659 3660 3661 3662 3663 3664
		/*
		 * Note that the request might not have been submitted yet.
		 * In which case emitted_jiffies will be zero.
		 */
		if (!request->emitted_jiffies)
			continue;

3665
		target = request;
3666
	}
3667
	if (target)
3668
		i915_gem_request_get(target);
3669
	spin_unlock(&file_priv->mm.lock);
3670

3671
	if (target == NULL)
3672
		return 0;
3673

3674 3675 3676
	ret = i915_wait_request(target,
				I915_WAIT_INTERRUPTIBLE,
				MAX_SCHEDULE_TIMEOUT);
3677
	i915_gem_request_put(target);
3678

3679
	return ret < 0 ? ret : 0;
3680 3681
}

C
Chris Wilson 已提交
3682
struct i915_vma *
3683 3684
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
			 const struct i915_ggtt_view *view,
3685
			 u64 size,
3686 3687
			 u64 alignment,
			 u64 flags)
3688
{
3689 3690
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
	struct i915_address_space *vm = &dev_priv->ggtt.base;
3691 3692
	struct i915_vma *vma;
	int ret;
3693

3694 3695
	lockdep_assert_held(&obj->base.dev->struct_mutex);

C
Chris Wilson 已提交
3696
	vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
3697
	if (IS_ERR(vma))
C
Chris Wilson 已提交
3698
		return vma;
3699 3700 3701 3702

	if (i915_vma_misplaced(vma, size, alignment, flags)) {
		if (flags & PIN_NONBLOCK &&
		    (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
C
Chris Wilson 已提交
3703
			return ERR_PTR(-ENOSPC);
3704

3705 3706 3707 3708
		if (flags & PIN_MAPPABLE) {
			u32 fence_size;

			fence_size = i915_gem_get_ggtt_size(dev_priv, vma->size,
3709 3710
							    i915_gem_object_get_tiling(obj),
							    i915_gem_object_get_stride(obj));
3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740
			/* If the required space is larger than the available
			 * aperture, we will not able to find a slot for the
			 * object and unbinding the object now will be in
			 * vain. Worse, doing so may cause us to ping-pong
			 * the object in and out of the Global GTT and
			 * waste a lot of cycles under the mutex.
			 */
			if (fence_size > dev_priv->ggtt.mappable_end)
				return ERR_PTR(-E2BIG);

			/* If NONBLOCK is set the caller is optimistically
			 * trying to cache the full object within the mappable
			 * aperture, and *must* have a fallback in place for
			 * situations where we cannot bind the object. We
			 * can be a little more lax here and use the fallback
			 * more often to avoid costly migrations of ourselves
			 * and other objects within the aperture.
			 *
			 * Half-the-aperture is used as a simple heuristic.
			 * More interesting would to do search for a free
			 * block prior to making the commitment to unbind.
			 * That caters for the self-harm case, and with a
			 * little more heuristics (e.g. NOFAULT, NOEVICT)
			 * we could try to minimise harm to others.
			 */
			if (flags & PIN_NONBLOCK &&
			    fence_size > dev_priv->ggtt.mappable_end / 2)
				return ERR_PTR(-ENOSPC);
		}

3741 3742
		WARN(i915_vma_is_pinned(vma),
		     "bo is already pinned in ggtt with incorrect alignment:"
3743 3744 3745
		     " offset=%08x, req.alignment=%llx,"
		     " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
		     i915_ggtt_offset(vma), alignment,
3746
		     !!(flags & PIN_MAPPABLE),
3747
		     i915_vma_is_map_and_fenceable(vma));
3748 3749
		ret = i915_vma_unbind(vma);
		if (ret)
C
Chris Wilson 已提交
3750
			return ERR_PTR(ret);
3751 3752
	}

C
Chris Wilson 已提交
3753 3754 3755
	ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
	if (ret)
		return ERR_PTR(ret);
3756

C
Chris Wilson 已提交
3757
	return vma;
3758 3759
}

3760
static __always_inline unsigned int __busy_read_flag(unsigned int id)
3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774
{
	/* Note that we could alias engines in the execbuf API, but
	 * that would be very unwise as it prevents userspace from
	 * fine control over engine selection. Ahem.
	 *
	 * This should be something like EXEC_MAX_ENGINE instead of
	 * I915_NUM_ENGINES.
	 */
	BUILD_BUG_ON(I915_NUM_ENGINES > 16);
	return 0x10000 << id;
}

static __always_inline unsigned int __busy_write_id(unsigned int id)
{
3775 3776 3777 3778 3779 3780 3781 3782 3783
	/* The uABI guarantees an active writer is also amongst the read
	 * engines. This would be true if we accessed the activity tracking
	 * under the lock, but as we perform the lookup of the object and
	 * its activity locklessly we can not guarantee that the last_write
	 * being active implies that we have set the same engine flag from
	 * last_read - hence we always set both read and write busy for
	 * last_write.
	 */
	return id | __busy_read_flag(id);
3784 3785
}

3786
static __always_inline unsigned int
3787
__busy_set_if_active(const struct dma_fence *fence,
3788 3789
		     unsigned int (*flag)(unsigned int id))
{
3790
	struct drm_i915_gem_request *rq;
3791

3792 3793 3794 3795
	/* We have to check the current hw status of the fence as the uABI
	 * guarantees forward progress. We could rely on the idle worker
	 * to eventually flush us, but to minimise latency just ask the
	 * hardware.
3796
	 *
3797
	 * Note we only report on the status of native fences.
3798
	 */
3799 3800 3801 3802 3803 3804 3805 3806 3807
	if (!dma_fence_is_i915(fence))
		return 0;

	/* opencode to_request() in order to avoid const warnings */
	rq = container_of(fence, struct drm_i915_gem_request, fence);
	if (i915_gem_request_completed(rq))
		return 0;

	return flag(rq->engine->exec_id);
3808 3809
}

3810
static __always_inline unsigned int
3811
busy_check_reader(const struct dma_fence *fence)
3812
{
3813
	return __busy_set_if_active(fence, __busy_read_flag);
3814 3815
}

3816
static __always_inline unsigned int
3817
busy_check_writer(const struct dma_fence *fence)
3818
{
3819 3820 3821 3822
	if (!fence)
		return 0;

	return __busy_set_if_active(fence, __busy_write_id);
3823 3824
}

3825 3826
int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3827
		    struct drm_file *file)
3828 3829
{
	struct drm_i915_gem_busy *args = data;
3830
	struct drm_i915_gem_object *obj;
3831 3832
	struct reservation_object_list *list;
	unsigned int seq;
3833
	int err;
3834

3835
	err = -ENOENT;
3836 3837
	rcu_read_lock();
	obj = i915_gem_object_lookup_rcu(file, args->handle);
3838
	if (!obj)
3839
		goto out;
3840

3841 3842 3843 3844 3845 3846 3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857 3858
	/* A discrepancy here is that we do not report the status of
	 * non-i915 fences, i.e. even though we may report the object as idle,
	 * a call to set-domain may still stall waiting for foreign rendering.
	 * This also means that wait-ioctl may report an object as busy,
	 * where busy-ioctl considers it idle.
	 *
	 * We trade the ability to warn of foreign fences to report on which
	 * i915 engines are active for the object.
	 *
	 * Alternatively, we can trade that extra information on read/write
	 * activity with
	 *	args->busy =
	 *		!reservation_object_test_signaled_rcu(obj->resv, true);
	 * to report the overall busyness. This is what the wait-ioctl does.
	 *
	 */
retry:
	seq = raw_read_seqcount(&obj->resv->seq);
3859

3860 3861
	/* Translate the exclusive fence to the READ *and* WRITE engine */
	args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
3862

3863 3864 3865 3866
	/* Translate shared fences to READ set of engines */
	list = rcu_dereference(obj->resv->fence);
	if (list) {
		unsigned int shared_count = list->shared_count, i;
3867

3868 3869 3870 3871 3872 3873
		for (i = 0; i < shared_count; ++i) {
			struct dma_fence *fence =
				rcu_dereference(list->shared[i]);

			args->busy |= busy_check_reader(fence);
		}
3874
	}
3875

3876 3877 3878 3879
	if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
		goto retry;

	err = 0;
3880 3881 3882
out:
	rcu_read_unlock();
	return err;
3883 3884 3885 3886 3887 3888
}

int
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file_priv)
{
3889
	return i915_gem_ring_throttle(dev, file_priv);
3890 3891
}

3892 3893 3894 3895
int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
3896
	struct drm_i915_private *dev_priv = to_i915(dev);
3897
	struct drm_i915_gem_madvise *args = data;
3898
	struct drm_i915_gem_object *obj;
3899
	int err;
3900 3901 3902 3903 3904 3905 3906 3907 3908

	switch (args->madv) {
	case I915_MADV_DONTNEED:
	case I915_MADV_WILLNEED:
	    break;
	default:
	    return -EINVAL;
	}

3909
	obj = i915_gem_object_lookup(file_priv, args->handle);
3910 3911 3912 3913 3914 3915
	if (!obj)
		return -ENOENT;

	err = mutex_lock_interruptible(&obj->mm.lock);
	if (err)
		goto out;
3916

C
Chris Wilson 已提交
3917
	if (obj->mm.pages &&
3918
	    i915_gem_object_is_tiled(obj) &&
3919
	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
3920 3921
		if (obj->mm.madv == I915_MADV_WILLNEED) {
			GEM_BUG_ON(!obj->mm.quirked);
C
Chris Wilson 已提交
3922
			__i915_gem_object_unpin_pages(obj);
3923 3924 3925
			obj->mm.quirked = false;
		}
		if (args->madv == I915_MADV_WILLNEED) {
3926
			GEM_BUG_ON(obj->mm.quirked);
C
Chris Wilson 已提交
3927
			__i915_gem_object_pin_pages(obj);
3928 3929
			obj->mm.quirked = true;
		}
3930 3931
	}

C
Chris Wilson 已提交
3932 3933
	if (obj->mm.madv != __I915_MADV_PURGED)
		obj->mm.madv = args->madv;
3934

C
Chris Wilson 已提交
3935
	/* if the object is no longer attached, discard its backing storage */
C
Chris Wilson 已提交
3936
	if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
3937 3938
		i915_gem_object_truncate(obj);

C
Chris Wilson 已提交
3939
	args->retained = obj->mm.madv != __I915_MADV_PURGED;
3940
	mutex_unlock(&obj->mm.lock);
C
Chris Wilson 已提交
3941

3942
out:
3943
	i915_gem_object_put(obj);
3944
	return err;
3945 3946
}

3947 3948 3949 3950 3951 3952 3953 3954 3955 3956
static void
frontbuffer_retire(struct i915_gem_active *active,
		   struct drm_i915_gem_request *request)
{
	struct drm_i915_gem_object *obj =
		container_of(active, typeof(*obj), frontbuffer_write);

	intel_fb_obj_flush(obj, true, ORIGIN_CS);
}

3957 3958
void i915_gem_object_init(struct drm_i915_gem_object *obj,
			  const struct drm_i915_gem_object_ops *ops)
3959
{
3960 3961
	mutex_init(&obj->mm.lock);

3962
	INIT_LIST_HEAD(&obj->global_link);
3963
	INIT_LIST_HEAD(&obj->userfault_link);
3964
	INIT_LIST_HEAD(&obj->obj_exec_link);
B
Ben Widawsky 已提交
3965
	INIT_LIST_HEAD(&obj->vma_list);
3966
	INIT_LIST_HEAD(&obj->batch_pool_link);
3967

3968 3969
	obj->ops = ops;

3970 3971 3972
	reservation_object_init(&obj->__builtin_resv);
	obj->resv = &obj->__builtin_resv;

3973
	obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
3974
	init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
C
Chris Wilson 已提交
3975 3976 3977 3978

	obj->mm.madv = I915_MADV_WILLNEED;
	INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
	mutex_init(&obj->mm.get_page.lock);
3979

3980
	i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
3981 3982
}

3983
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3984 3985
	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
		 I915_GEM_OBJECT_IS_SHRINKABLE,
3986 3987 3988 3989
	.get_pages = i915_gem_object_get_pages_gtt,
	.put_pages = i915_gem_object_put_pages_gtt,
};

3990
struct drm_i915_gem_object *
3991
i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
3992
{
3993
	struct drm_i915_gem_object *obj;
3994
	struct address_space *mapping;
D
Daniel Vetter 已提交
3995
	gfp_t mask;
3996
	int ret;
3997

3998 3999 4000 4001 4002 4003 4004 4005 4006 4007 4008
	/* There is a prevalence of the assumption that we fit the object's
	 * page count inside a 32bit _signed_ variable. Let's document this and
	 * catch if we ever need to fix it. In the meantime, if you do spot
	 * such a local variable, please consider fixing!
	 */
	if (WARN_ON(size >> PAGE_SHIFT > INT_MAX))
		return ERR_PTR(-E2BIG);

	if (overflows_type(size, obj->base.size))
		return ERR_PTR(-E2BIG);

4009
	obj = i915_gem_object_alloc(dev_priv);
4010
	if (obj == NULL)
4011
		return ERR_PTR(-ENOMEM);
4012

4013
	ret = drm_gem_object_init(&dev_priv->drm, &obj->base, size);
4014 4015
	if (ret)
		goto fail;
4016

4017
	mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4018
	if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) {
4019 4020 4021 4022 4023
		/* 965gm cannot relocate objects above 4GiB. */
		mask &= ~__GFP_HIGHMEM;
		mask |= __GFP_DMA32;
	}

4024
	mapping = obj->base.filp->f_mapping;
4025
	mapping_set_gfp_mask(mapping, mask);
4026

4027
	i915_gem_object_init(obj, &i915_gem_object_ops);
4028

4029 4030
	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4031

4032
	if (HAS_LLC(dev_priv)) {
4033
		/* On some devices, we can have the GPU use the LLC (the CPU
4034 4035 4036 4037 4038 4039 4040 4041 4042 4043 4044 4045 4046 4047 4048
		 * cache) for about a 10% performance improvement
		 * compared to uncached.  Graphics requests other than
		 * display scanout are coherent with the CPU in
		 * accessing this cache.  This means in this mode we
		 * don't need to clflush on the CPU side, and on the
		 * GPU side we only need to flush internal caches to
		 * get data visible to the CPU.
		 *
		 * However, we maintain the display planes as UC, and so
		 * need to rebind when first used as such.
		 */
		obj->cache_level = I915_CACHE_LLC;
	} else
		obj->cache_level = I915_CACHE_NONE;

4049 4050
	trace_i915_gem_object_create(obj);

4051
	return obj;
4052 4053 4054 4055

fail:
	i915_gem_object_free(obj);
	return ERR_PTR(ret);
4056 4057
}

4058 4059 4060 4061 4062 4063 4064 4065
static bool discard_backing_storage(struct drm_i915_gem_object *obj)
{
	/* If we are the last user of the backing storage (be it shmemfs
	 * pages or stolen etc), we know that the pages are going to be
	 * immediately released. In this case, we can then skip copying
	 * back the contents from the GPU.
	 */

C
Chris Wilson 已提交
4066
	if (obj->mm.madv != I915_MADV_WILLNEED)
4067 4068 4069 4070 4071 4072 4073 4074 4075 4076 4077 4078 4079 4080 4081
		return false;

	if (obj->base.filp == NULL)
		return true;

	/* At first glance, this looks racy, but then again so would be
	 * userspace racing mmap against close. However, the first external
	 * reference to the filp can only be obtained through the
	 * i915_gem_mmap_ioctl() which safeguards us against the user
	 * acquiring such a reference whilst we are in the middle of
	 * freeing the object.
	 */
	return atomic_long_read(&obj->base.filp->f_count) == 1;
}

4082 4083
static void __i915_gem_free_objects(struct drm_i915_private *i915,
				    struct llist_node *freed)
4084
{
4085
	struct drm_i915_gem_object *obj, *on;
4086

4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101
	mutex_lock(&i915->drm.struct_mutex);
	intel_runtime_pm_get(i915);
	llist_for_each_entry(obj, freed, freed) {
		struct i915_vma *vma, *vn;

		trace_i915_gem_object_destroy(obj);

		GEM_BUG_ON(i915_gem_object_is_active(obj));
		list_for_each_entry_safe(vma, vn,
					 &obj->vma_list, obj_link) {
			GEM_BUG_ON(!i915_vma_is_ggtt(vma));
			GEM_BUG_ON(i915_vma_is_active(vma));
			vma->flags &= ~I915_VMA_PIN_MASK;
			i915_vma_close(vma);
		}
4102 4103
		GEM_BUG_ON(!list_empty(&obj->vma_list));
		GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
4104

4105
		list_del(&obj->global_link);
4106 4107 4108 4109 4110 4111 4112 4113 4114 4115
	}
	intel_runtime_pm_put(i915);
	mutex_unlock(&i915->drm.struct_mutex);

	llist_for_each_entry_safe(obj, on, freed, freed) {
		GEM_BUG_ON(obj->bind_count);
		GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));

		if (obj->ops->release)
			obj->ops->release(obj);
4116

4117 4118
		if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
			atomic_set(&obj->mm.pages_pin_count, 0);
4119
		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
4120 4121 4122 4123 4124
		GEM_BUG_ON(obj->mm.pages);

		if (obj->base.import_attach)
			drm_prime_gem_destroy(&obj->base, NULL);

4125
		reservation_object_fini(&obj->__builtin_resv);
4126 4127 4128 4129 4130 4131 4132 4133 4134 4135 4136 4137 4138 4139 4140 4141 4142 4143 4144 4145 4146 4147
		drm_gem_object_release(&obj->base);
		i915_gem_info_remove_obj(i915, obj->base.size);

		kfree(obj->bit_17);
		i915_gem_object_free(obj);
	}
}

static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
{
	struct llist_node *freed;

	freed = llist_del_all(&i915->mm.free_list);
	if (unlikely(freed))
		__i915_gem_free_objects(i915, freed);
}

static void __i915_gem_free_work(struct work_struct *work)
{
	struct drm_i915_private *i915 =
		container_of(work, struct drm_i915_private, mm.free_work);
	struct llist_node *freed;
4148

4149 4150 4151 4152 4153 4154 4155
	/* All file-owned VMA should have been released by this point through
	 * i915_gem_close_object(), or earlier by i915_gem_context_close().
	 * However, the object may also be bound into the global GTT (e.g.
	 * older GPUs without per-process support, or for direct access through
	 * the GTT either for the user or for scanout). Those VMA still need to
	 * unbound now.
	 */
4156

4157 4158 4159
	while ((freed = llist_del_all(&i915->mm.free_list)))
		__i915_gem_free_objects(i915, freed);
}
4160

4161 4162 4163 4164 4165 4166 4167 4168 4169 4170 4171 4172 4173 4174
static void __i915_gem_free_object_rcu(struct rcu_head *head)
{
	struct drm_i915_gem_object *obj =
		container_of(head, typeof(*obj), rcu);
	struct drm_i915_private *i915 = to_i915(obj->base.dev);

	/* We can't simply use call_rcu() from i915_gem_free_object()
	 * as we need to block whilst unbinding, and the call_rcu
	 * task may be called from softirq context. So we take a
	 * detour through a worker.
	 */
	if (llist_add(&obj->freed, &i915->mm.free_list))
		schedule_work(&i915->mm.free_work);
}
4175

4176 4177 4178
void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
C
Chris Wilson 已提交
4179

4180 4181 4182
	if (obj->mm.quirked)
		__i915_gem_object_unpin_pages(obj);

4183
	if (discard_backing_storage(obj))
C
Chris Wilson 已提交
4184
		obj->mm.madv = I915_MADV_DONTNEED;
4185

4186 4187 4188 4189 4190 4191
	/* Before we free the object, make sure any pure RCU-only
	 * read-side critical sections are complete, e.g.
	 * i915_gem_busy_ioctl(). For the corresponding synchronized
	 * lookup see i915_gem_object_lookup_rcu().
	 */
	call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
4192 4193
}

4194 4195 4196 4197 4198 4199 4200 4201 4202 4203 4204
void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
{
	lockdep_assert_held(&obj->base.dev->struct_mutex);

	GEM_BUG_ON(i915_gem_object_has_active_reference(obj));
	if (i915_gem_object_is_active(obj))
		i915_gem_object_set_active_reference(obj);
	else
		i915_gem_object_put(obj);
}

4205 4206 4207 4208 4209 4210
static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	for_each_engine(engine, dev_priv, id)
4211
		GEM_BUG_ON(!i915_gem_context_is_kernel(engine->last_retired_context));
4212 4213
}

4214
int i915_gem_suspend(struct drm_i915_private *dev_priv)
4215
{
4216
	struct drm_device *dev = &dev_priv->drm;
4217
	int ret;
4218

4219 4220
	intel_suspend_gt_powersave(dev_priv);

4221
	mutex_lock(&dev->struct_mutex);
4222 4223 4224 4225 4226 4227 4228 4229 4230 4231 4232 4233 4234

	/* We have to flush all the executing contexts to main memory so
	 * that they can saved in the hibernation image. To ensure the last
	 * context image is coherent, we have to switch away from it. That
	 * leaves the dev_priv->kernel_context still active when
	 * we actually suspend, and its image in memory may not match the GPU
	 * state. Fortunately, the kernel_context is disposable and we do
	 * not rely on its state.
	 */
	ret = i915_gem_switch_to_kernel_context(dev_priv);
	if (ret)
		goto err;

4235 4236 4237
	ret = i915_gem_wait_for_idle(dev_priv,
				     I915_WAIT_INTERRUPTIBLE |
				     I915_WAIT_LOCKED);
4238
	if (ret)
4239
		goto err;
4240

4241
	i915_gem_retire_requests(dev_priv);
4242
	GEM_BUG_ON(dev_priv->gt.active_requests);
4243

4244
	assert_kernel_context_is_current(dev_priv);
4245
	i915_gem_context_lost(dev_priv);
4246 4247
	mutex_unlock(&dev->struct_mutex);

4248
	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4249
	cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4250 4251 4252 4253 4254 4255 4256 4257

	/* As the idle_work is rearming if it detects a race, play safe and
	 * repeat the flush until it is definitely idle.
	 */
	while (flush_delayed_work(&dev_priv->gt.idle_work))
		;

	i915_gem_drain_freed_objects(dev_priv);
4258

4259 4260 4261
	/* Assert that we sucessfully flushed all the work and
	 * reset the GPU back to its idle, low power state.
	 */
4262
	WARN_ON(dev_priv->gt.awake);
4263
	WARN_ON(!intel_execlists_idle(dev_priv));
4264

4265 4266 4267 4268 4269 4270 4271 4272 4273 4274 4275 4276 4277 4278 4279 4280 4281 4282 4283
	/*
	 * Neither the BIOS, ourselves or any other kernel
	 * expects the system to be in execlists mode on startup,
	 * so we need to reset the GPU back to legacy mode. And the only
	 * known way to disable logical contexts is through a GPU reset.
	 *
	 * So in order to leave the system in a known default configuration,
	 * always reset the GPU upon unload and suspend. Afterwards we then
	 * clean up the GEM state tracking, flushing off the requests and
	 * leaving the system in a known idle state.
	 *
	 * Note that is of the upmost importance that the GPU is idle and
	 * all stray writes are flushed *before* we dismantle the backing
	 * storage for the pinned objects.
	 *
	 * However, since we are uncertain that resetting the GPU on older
	 * machines is a good idea, we don't - just in case it leaves the
	 * machine in an unusable condition.
	 */
4284
	if (HAS_HW_CONTEXTS(dev_priv)) {
4285 4286 4287 4288
		int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
		WARN_ON(reset && reset != -ENODEV);
	}

4289
	return 0;
4290 4291 4292 4293

err:
	mutex_unlock(&dev->struct_mutex);
	return ret;
4294 4295
}

4296
void i915_gem_resume(struct drm_i915_private *dev_priv)
4297
{
4298
	struct drm_device *dev = &dev_priv->drm;
4299

4300 4301
	WARN_ON(dev_priv->gt.awake);

4302
	mutex_lock(&dev->struct_mutex);
4303
	i915_gem_restore_gtt_mappings(dev_priv);
4304 4305 4306 4307 4308

	/* As we didn't flush the kernel context before suspend, we cannot
	 * guarantee that the context image is complete. So let's just reset
	 * it and start again.
	 */
4309
	dev_priv->gt.resume(dev_priv);
4310 4311 4312 4313

	mutex_unlock(&dev->struct_mutex);
}

4314
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
4315
{
4316
	if (INTEL_GEN(dev_priv) < 5 ||
4317 4318 4319 4320 4321 4322
	    dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
		return;

	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
				 DISP_TILE_SURFACE_SWIZZLING);

4323
	if (IS_GEN5(dev_priv))
4324 4325
		return;

4326
	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4327
	if (IS_GEN6(dev_priv))
4328
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4329
	else if (IS_GEN7(dev_priv))
4330
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4331
	else if (IS_GEN8(dev_priv))
B
Ben Widawsky 已提交
4332
		I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4333 4334
	else
		BUG();
4335
}
D
Daniel Vetter 已提交
4336

4337
static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
4338 4339 4340 4341 4342 4343 4344
{
	I915_WRITE(RING_CTL(base), 0);
	I915_WRITE(RING_HEAD(base), 0);
	I915_WRITE(RING_TAIL(base), 0);
	I915_WRITE(RING_START(base), 0);
}

4345
static void init_unused_rings(struct drm_i915_private *dev_priv)
4346
{
4347 4348 4349 4350 4351 4352 4353 4354 4355 4356 4357 4358
	if (IS_I830(dev_priv)) {
		init_unused_ring(dev_priv, PRB1_BASE);
		init_unused_ring(dev_priv, SRB0_BASE);
		init_unused_ring(dev_priv, SRB1_BASE);
		init_unused_ring(dev_priv, SRB2_BASE);
		init_unused_ring(dev_priv, SRB3_BASE);
	} else if (IS_GEN2(dev_priv)) {
		init_unused_ring(dev_priv, SRB0_BASE);
		init_unused_ring(dev_priv, SRB1_BASE);
	} else if (IS_GEN3(dev_priv)) {
		init_unused_ring(dev_priv, PRB1_BASE);
		init_unused_ring(dev_priv, PRB2_BASE);
4359 4360 4361
	}
}

4362
int
4363
i915_gem_init_hw(struct drm_i915_private *dev_priv)
4364
{
4365
	struct intel_engine_cs *engine;
4366
	enum intel_engine_id id;
C
Chris Wilson 已提交
4367
	int ret;
4368

4369 4370
	dev_priv->gt.last_init_time = ktime_get();

4371 4372 4373
	/* Double layer security blanket, see i915_gem_init() */
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

4374
	if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
4375
		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4376

4377
	if (IS_HASWELL(dev_priv))
4378
		I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
4379
			   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4380

4381
	if (HAS_PCH_NOP(dev_priv)) {
4382
		if (IS_IVYBRIDGE(dev_priv)) {
4383 4384 4385
			u32 temp = I915_READ(GEN7_MSG_CTL);
			temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
			I915_WRITE(GEN7_MSG_CTL, temp);
4386
		} else if (INTEL_GEN(dev_priv) >= 7) {
4387 4388 4389 4390
			u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
			temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
			I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
		}
4391 4392
	}

4393
	i915_gem_init_swizzling(dev_priv);
4394

4395 4396 4397 4398 4399 4400
	/*
	 * At least 830 can leave some of the unused rings
	 * "active" (ie. head != tail) after resume which
	 * will prevent c3 entry. Makes sure all unused rings
	 * are totally idle.
	 */
4401
	init_unused_rings(dev_priv);
4402

4403
	BUG_ON(!dev_priv->kernel_context);
4404

4405
	ret = i915_ppgtt_init_hw(dev_priv);
4406 4407 4408 4409 4410 4411
	if (ret) {
		DRM_ERROR("PPGTT enable HW failed %d\n", ret);
		goto out;
	}

	/* Need to do basic initialisation of all rings first: */
4412
	for_each_engine(engine, dev_priv, id) {
4413
		ret = engine->init_hw(engine);
D
Daniel Vetter 已提交
4414
		if (ret)
4415
			goto out;
D
Daniel Vetter 已提交
4416
	}
4417

4418
	intel_mocs_init_l3cc_table(dev_priv);
4419

4420
	/* We can't enable contexts until all firmware is loaded */
4421
	ret = intel_guc_setup(dev_priv);
4422 4423
	if (ret)
		goto out;
4424

4425 4426
out:
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4427
	return ret;
4428 4429
}

4430 4431 4432 4433 4434 4435 4436 4437 4438 4439 4440 4441 4442 4443 4444 4445 4446 4447 4448 4449 4450
bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
{
	if (INTEL_INFO(dev_priv)->gen < 6)
		return false;

	/* TODO: make semaphores and Execlists play nicely together */
	if (i915.enable_execlists)
		return false;

	if (value >= 0)
		return value;

#ifdef CONFIG_INTEL_IOMMU
	/* Enable semaphores on SNB when IO remapping is off */
	if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
		return false;
#endif

	return true;
}

4451
int i915_gem_init(struct drm_i915_private *dev_priv)
4452 4453 4454
{
	int ret;

4455
	mutex_lock(&dev_priv->drm.struct_mutex);
4456

4457
	if (!i915.enable_execlists) {
4458
		dev_priv->gt.resume = intel_legacy_submission_resume;
4459
		dev_priv->gt.cleanup_engine = intel_engine_cleanup;
4460
	} else {
4461
		dev_priv->gt.resume = intel_lr_context_resume;
4462
		dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
4463 4464
	}

4465 4466 4467 4468 4469 4470 4471 4472
	/* This is just a security blanket to placate dragons.
	 * On some systems, we very sporadically observe that the first TLBs
	 * used by the CS may be stale, despite us poking the TLB reset. If
	 * we hold the forcewake during initialisation these problems
	 * just magically go away.
	 */
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

4473
	i915_gem_init_userptr(dev_priv);
4474 4475 4476 4477

	ret = i915_gem_init_ggtt(dev_priv);
	if (ret)
		goto out_unlock;
4478

4479
	ret = i915_gem_context_init(dev_priv);
4480 4481
	if (ret)
		goto out_unlock;
4482

4483
	ret = intel_engines_init(dev_priv);
D
Daniel Vetter 已提交
4484
	if (ret)
4485
		goto out_unlock;
4486

4487
	ret = i915_gem_init_hw(dev_priv);
4488
	if (ret == -EIO) {
4489
		/* Allow engine initialisation to fail by marking the GPU as
4490 4491 4492 4493
		 * wedged. But we only want to do this where the GPU is angry,
		 * for all other failure, such as an allocation failure, bail.
		 */
		DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4494
		i915_gem_set_wedged(dev_priv);
4495
		ret = 0;
4496
	}
4497 4498

out_unlock:
4499
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4500
	mutex_unlock(&dev_priv->drm.struct_mutex);
4501

4502
	return ret;
4503 4504
}

4505
void
4506
i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
4507
{
4508
	struct intel_engine_cs *engine;
4509
	enum intel_engine_id id;
4510

4511
	for_each_engine(engine, dev_priv, id)
4512
		dev_priv->gt.cleanup_engine(engine);
4513 4514
}

4515 4516 4517
void
i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
4518
	int i;
4519 4520 4521 4522

	if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
	    !IS_CHERRYVIEW(dev_priv))
		dev_priv->num_fence_regs = 32;
4523 4524 4525
	else if (INTEL_INFO(dev_priv)->gen >= 4 ||
		 IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
		 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
4526 4527 4528 4529
		dev_priv->num_fence_regs = 16;
	else
		dev_priv->num_fence_regs = 8;

4530
	if (intel_vgpu_active(dev_priv))
4531 4532 4533 4534
		dev_priv->num_fence_regs =
				I915_READ(vgtif_reg(avail_rs.fence_num));

	/* Initialize fence registers to zero */
4535 4536 4537 4538 4539 4540 4541
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
		struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];

		fence->i915 = dev_priv;
		fence->id = i;
		list_add_tail(&fence->link, &dev_priv->mm.fence_list);
	}
4542
	i915_gem_restore_fences(dev_priv);
4543

4544
	i915_gem_detect_bit_6_swizzle(dev_priv);
4545 4546
}

4547
int
4548
i915_gem_load_init(struct drm_i915_private *dev_priv)
4549
{
4550
	int err = -ENOMEM;
4551

4552 4553
	dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
	if (!dev_priv->objects)
4554 4555
		goto err_out;

4556 4557
	dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
	if (!dev_priv->vmas)
4558 4559
		goto err_objects;

4560 4561 4562 4563 4564
	dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
					SLAB_HWCACHE_ALIGN |
					SLAB_RECLAIM_ACCOUNT |
					SLAB_DESTROY_BY_RCU);
	if (!dev_priv->requests)
4565 4566
		goto err_vmas;

4567 4568 4569 4570 4571 4572
	dev_priv->dependencies = KMEM_CACHE(i915_dependency,
					    SLAB_HWCACHE_ALIGN |
					    SLAB_RECLAIM_ACCOUNT);
	if (!dev_priv->dependencies)
		goto err_requests;

4573 4574
	mutex_lock(&dev_priv->drm.struct_mutex);
	INIT_LIST_HEAD(&dev_priv->gt.timelines);
4575
	err = i915_gem_timeline_init__global(dev_priv);
4576 4577
	mutex_unlock(&dev_priv->drm.struct_mutex);
	if (err)
4578
		goto err_dependencies;
4579

4580
	INIT_LIST_HEAD(&dev_priv->context_list);
4581 4582
	INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
	init_llist_head(&dev_priv->mm.free_list);
C
Chris Wilson 已提交
4583 4584
	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4585
	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4586
	INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
4587
	INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
4588
			  i915_gem_retire_work_handler);
4589
	INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
4590
			  i915_gem_idle_work_handler);
4591
	init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
4592
	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4593

4594 4595
	dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;

4596
	init_waitqueue_head(&dev_priv->pending_flip_queue);
4597

4598 4599
	dev_priv->mm.interruptible = true;

4600 4601
	atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);

4602
	spin_lock_init(&dev_priv->fb_tracking.lock);
4603 4604 4605

	return 0;

4606 4607
err_dependencies:
	kmem_cache_destroy(dev_priv->dependencies);
4608 4609 4610 4611 4612 4613 4614 4615
err_requests:
	kmem_cache_destroy(dev_priv->requests);
err_vmas:
	kmem_cache_destroy(dev_priv->vmas);
err_objects:
	kmem_cache_destroy(dev_priv->objects);
err_out:
	return err;
4616
}
4617

4618
void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
4619
{
4620 4621
	WARN_ON(!llist_empty(&dev_priv->mm.free_list));

4622 4623 4624 4625 4626
	mutex_lock(&dev_priv->drm.struct_mutex);
	i915_gem_timeline_fini(&dev_priv->gt.global_timeline);
	WARN_ON(!list_empty(&dev_priv->gt.timelines));
	mutex_unlock(&dev_priv->drm.struct_mutex);

4627
	kmem_cache_destroy(dev_priv->dependencies);
4628 4629 4630
	kmem_cache_destroy(dev_priv->requests);
	kmem_cache_destroy(dev_priv->vmas);
	kmem_cache_destroy(dev_priv->objects);
4631 4632 4633

	/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
	rcu_barrier();
4634 4635
}

4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648
int i915_gem_freeze(struct drm_i915_private *dev_priv)
{
	intel_runtime_pm_get(dev_priv);

	mutex_lock(&dev_priv->drm.struct_mutex);
	i915_gem_shrink_all(dev_priv);
	mutex_unlock(&dev_priv->drm.struct_mutex);

	intel_runtime_pm_put(dev_priv);

	return 0;
}

4649 4650 4651
int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
{
	struct drm_i915_gem_object *obj;
4652 4653 4654 4655 4656
	struct list_head *phases[] = {
		&dev_priv->mm.unbound_list,
		&dev_priv->mm.bound_list,
		NULL
	}, **p;
4657 4658 4659 4660 4661 4662 4663 4664 4665 4666

	/* Called just before we write the hibernation image.
	 *
	 * We need to update the domain tracking to reflect that the CPU
	 * will be accessing all the pages to create and restore from the
	 * hibernation, and so upon restoration those pages will be in the
	 * CPU domain.
	 *
	 * To make sure the hibernation image contains the latest state,
	 * we update that state just before writing out the image.
4667 4668 4669
	 *
	 * To try and reduce the hibernation image, we manually shrink
	 * the objects as well.
4670 4671
	 */

4672 4673
	mutex_lock(&dev_priv->drm.struct_mutex);
	i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
4674

4675
	for (p = phases; *p; p++) {
4676
		list_for_each_entry(obj, *p, global_link) {
4677 4678 4679
			obj->base.read_domains = I915_GEM_DOMAIN_CPU;
			obj->base.write_domain = I915_GEM_DOMAIN_CPU;
		}
4680
	}
4681
	mutex_unlock(&dev_priv->drm.struct_mutex);
4682 4683 4684 4685

	return 0;
}

4686
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4687
{
4688
	struct drm_i915_file_private *file_priv = file->driver_priv;
4689
	struct drm_i915_gem_request *request;
4690 4691 4692 4693 4694

	/* Clean up our request list when the client is going away, so that
	 * later retire_requests won't dereference our soon-to-be-gone
	 * file_priv.
	 */
4695
	spin_lock(&file_priv->mm.lock);
4696
	list_for_each_entry(request, &file_priv->mm.request_list, client_list)
4697
		request->file_priv = NULL;
4698
	spin_unlock(&file_priv->mm.lock);
4699

4700
	if (!list_empty(&file_priv->rps.link)) {
4701
		spin_lock(&to_i915(dev)->rps.client_lock);
4702
		list_del(&file_priv->rps.link);
4703
		spin_unlock(&to_i915(dev)->rps.client_lock);
4704
	}
4705 4706 4707 4708 4709
}

int i915_gem_open(struct drm_device *dev, struct drm_file *file)
{
	struct drm_i915_file_private *file_priv;
4710
	int ret;
4711

4712
	DRM_DEBUG("\n");
4713 4714 4715 4716 4717 4718

	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
	if (!file_priv)
		return -ENOMEM;

	file->driver_priv = file_priv;
4719
	file_priv->dev_priv = to_i915(dev);
4720
	file_priv->file = file;
4721
	INIT_LIST_HEAD(&file_priv->rps.link);
4722 4723 4724 4725

	spin_lock_init(&file_priv->mm.lock);
	INIT_LIST_HEAD(&file_priv->mm.request_list);

4726
	file_priv->bsd_engine = -1;
4727

4728 4729 4730
	ret = i915_gem_context_open(dev, file);
	if (ret)
		kfree(file_priv);
4731

4732
	return ret;
4733 4734
}

4735 4736
/**
 * i915_gem_track_fb - update frontbuffer tracking
4737 4738 4739
 * @old: current GEM buffer for the frontbuffer slots
 * @new: new GEM buffer for the frontbuffer slots
 * @frontbuffer_bits: bitmask of frontbuffer slots
4740 4741 4742 4743
 *
 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
 * from @old and setting them in @new. Both @old and @new can be NULL.
 */
4744 4745 4746 4747
void i915_gem_track_fb(struct drm_i915_gem_object *old,
		       struct drm_i915_gem_object *new,
		       unsigned frontbuffer_bits)
{
4748 4749 4750 4751 4752 4753 4754 4755 4756
	/* Control of individual bits within the mask are guarded by
	 * the owning plane->mutex, i.e. we can never see concurrent
	 * manipulation of individual bits. But since the bitfield as a whole
	 * is updated using RMW, we need to use atomics in order to update
	 * the bits.
	 */
	BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
		     sizeof(atomic_t) * BITS_PER_BYTE);

4757
	if (old) {
4758 4759
		WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
		atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
4760 4761 4762
	}

	if (new) {
4763 4764
		WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
		atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
4765 4766 4767
	}
}

4768 4769
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
4770
i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
4771 4772 4773 4774 4775 4776 4777
			         const void *data, size_t size)
{
	struct drm_i915_gem_object *obj;
	struct sg_table *sg;
	size_t bytes;
	int ret;

4778
	obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
4779
	if (IS_ERR(obj))
4780 4781 4782 4783 4784 4785
		return obj;

	ret = i915_gem_object_set_to_cpu_domain(obj, true);
	if (ret)
		goto fail;

C
Chris Wilson 已提交
4786
	ret = i915_gem_object_pin_pages(obj);
4787 4788 4789
	if (ret)
		goto fail;

C
Chris Wilson 已提交
4790
	sg = obj->mm.pages;
4791
	bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
C
Chris Wilson 已提交
4792
	obj->mm.dirty = true; /* Backing store is now out of date */
4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803
	i915_gem_object_unpin_pages(obj);

	if (WARN_ON(bytes != size)) {
		DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
		ret = -EFAULT;
		goto fail;
	}

	return obj;

fail:
4804
	i915_gem_object_put(obj);
4805 4806
	return ERR_PTR(ret);
}
4807 4808 4809 4810 4811 4812

struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
		       unsigned int n,
		       unsigned int *offset)
{
C
Chris Wilson 已提交
4813
	struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
4814 4815 4816 4817 4818
	struct scatterlist *sg;
	unsigned int idx, count;

	might_sleep();
	GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
C
Chris Wilson 已提交
4819
	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938 4939 4940 4941 4942 4943

	/* As we iterate forward through the sg, we record each entry in a
	 * radixtree for quick repeated (backwards) lookups. If we have seen
	 * this index previously, we will have an entry for it.
	 *
	 * Initial lookup is O(N), but this is amortized to O(1) for
	 * sequential page access (where each new request is consecutive
	 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
	 * i.e. O(1) with a large constant!
	 */
	if (n < READ_ONCE(iter->sg_idx))
		goto lookup;

	mutex_lock(&iter->lock);

	/* We prefer to reuse the last sg so that repeated lookup of this
	 * (or the subsequent) sg are fast - comparing against the last
	 * sg is faster than going through the radixtree.
	 */

	sg = iter->sg_pos;
	idx = iter->sg_idx;
	count = __sg_page_count(sg);

	while (idx + count <= n) {
		unsigned long exception, i;
		int ret;

		/* If we cannot allocate and insert this entry, or the
		 * individual pages from this range, cancel updating the
		 * sg_idx so that on this lookup we are forced to linearly
		 * scan onwards, but on future lookups we will try the
		 * insertion again (in which case we need to be careful of
		 * the error return reporting that we have already inserted
		 * this index).
		 */
		ret = radix_tree_insert(&iter->radix, idx, sg);
		if (ret && ret != -EEXIST)
			goto scan;

		exception =
			RADIX_TREE_EXCEPTIONAL_ENTRY |
			idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
		for (i = 1; i < count; i++) {
			ret = radix_tree_insert(&iter->radix, idx + i,
						(void *)exception);
			if (ret && ret != -EEXIST)
				goto scan;
		}

		idx += count;
		sg = ____sg_next(sg);
		count = __sg_page_count(sg);
	}

scan:
	iter->sg_pos = sg;
	iter->sg_idx = idx;

	mutex_unlock(&iter->lock);

	if (unlikely(n < idx)) /* insertion completed by another thread */
		goto lookup;

	/* In case we failed to insert the entry into the radixtree, we need
	 * to look beyond the current sg.
	 */
	while (idx + count <= n) {
		idx += count;
		sg = ____sg_next(sg);
		count = __sg_page_count(sg);
	}

	*offset = n - idx;
	return sg;

lookup:
	rcu_read_lock();

	sg = radix_tree_lookup(&iter->radix, n);
	GEM_BUG_ON(!sg);

	/* If this index is in the middle of multi-page sg entry,
	 * the radixtree will contain an exceptional entry that points
	 * to the start of that range. We will return the pointer to
	 * the base page and the offset of this page within the
	 * sg entry's range.
	 */
	*offset = 0;
	if (unlikely(radix_tree_exception(sg))) {
		unsigned long base =
			(unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;

		sg = radix_tree_lookup(&iter->radix, base);
		GEM_BUG_ON(!sg);

		*offset = n - base;
	}

	rcu_read_unlock();

	return sg;
}

struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
{
	struct scatterlist *sg;
	unsigned int offset;

	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));

	sg = i915_gem_object_get_sg(obj, n, &offset);
	return nth_page(sg_page(sg), offset);
}

/* Like i915_gem_object_get_page(), but mark the returned page dirty */
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
			       unsigned int n)
{
	struct page *page;

	page = i915_gem_object_get_page(obj, n);
C
Chris Wilson 已提交
4944
	if (!obj->mm.dirty)
4945 4946 4947 4948 4949 4950 4951 4952 4953 4954 4955 4956 4957 4958 4959
		set_page_dirty(page);

	return page;
}

dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
				unsigned long n)
{
	struct scatterlist *sg;
	unsigned int offset;

	sg = i915_gem_object_get_sg(obj, n, &offset);
	return sg_dma_address(sg) + (offset << PAGE_SHIFT);
}