i915_gem.c 130.0 KB
Newer Older
1
/*
2
 * Copyright © 2008-2015 Intel Corporation
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *
 */

28
#include <drm/drmP.h>
29
#include <drm/drm_vma_manager.h>
30
#include <drm/i915_drm.h>
31
#include "i915_drv.h"
32
#include "i915_vgpu.h"
C
Chris Wilson 已提交
33
#include "i915_trace.h"
34
#include "intel_drv.h"
35
#include "intel_frontbuffer.h"
36
#include "intel_mocs.h"
37
#include <linux/dma-fence-array.h>
38
#include <linux/reservation.h>
39
#include <linux/shmem_fs.h>
40
#include <linux/slab.h>
41
#include <linux/swap.h>
J
Jesse Barnes 已提交
42
#include <linux/pci.h>
43
#include <linux/dma-buf.h>
44

45
static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
46
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
47
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
48

49 50 51
static bool cpu_cache_is_coherent(struct drm_device *dev,
				  enum i915_cache_level level)
{
52
	return HAS_LLC(to_i915(dev)) || level != I915_CACHE_NONE;
53 54
}

55 56
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
57 58 59
	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
		return false;

60 61 62 63 64 65
	if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
		return true;

	return obj->pin_display;
}

66
static int
67
insert_mappable_node(struct i915_ggtt *ggtt,
68 69 70
                     struct drm_mm_node *node, u32 size)
{
	memset(node, 0, sizeof(*node));
71 72 73
	return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node,
						   size, 0, -1,
						   0, ggtt->mappable_end,
74 75 76 77 78 79 80 81 82 83
						   DRM_MM_SEARCH_DEFAULT,
						   DRM_MM_CREATE_DEFAULT);
}

static void
remove_mappable_node(struct drm_mm_node *node)
{
	drm_mm_remove_node(node);
}

84 85
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
86
				  u64 size)
87
{
88
	spin_lock(&dev_priv->mm.object_stat_lock);
89 90
	dev_priv->mm.object_count++;
	dev_priv->mm.object_memory += size;
91
	spin_unlock(&dev_priv->mm.object_stat_lock);
92 93 94
}

static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
95
				     u64 size)
96
{
97
	spin_lock(&dev_priv->mm.object_stat_lock);
98 99
	dev_priv->mm.object_count--;
	dev_priv->mm.object_memory -= size;
100
	spin_unlock(&dev_priv->mm.object_stat_lock);
101 102
}

103
static int
104
i915_gem_wait_for_error(struct i915_gpu_error *error)
105 106 107
{
	int ret;

108 109
	might_sleep();

110
	if (!i915_reset_in_progress(error))
111 112
		return 0;

113 114 115 116 117
	/*
	 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
	 * userspace. If it takes that long something really bad is going on and
	 * we should simply try to bail out and fail as gracefully as possible.
	 */
118
	ret = wait_event_interruptible_timeout(error->reset_queue,
119
					       !i915_reset_in_progress(error),
120
					       I915_RESET_TIMEOUT);
121 122 123 124
	if (ret == 0) {
		DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
		return -EIO;
	} else if (ret < 0) {
125
		return ret;
126 127
	} else {
		return 0;
128
	}
129 130
}

131
int i915_mutex_lock_interruptible(struct drm_device *dev)
132
{
133
	struct drm_i915_private *dev_priv = to_i915(dev);
134 135
	int ret;

136
	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
137 138 139 140 141 142 143 144 145
	if (ret)
		return ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	return 0;
}
146

147 148
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
149
			    struct drm_file *file)
150
{
151
	struct drm_i915_private *dev_priv = to_i915(dev);
152
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
153
	struct drm_i915_gem_get_aperture *args = data;
154
	struct i915_vma *vma;
155
	size_t pinned;
156

157
	pinned = 0;
158
	mutex_lock(&dev->struct_mutex);
159
	list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
160
		if (i915_vma_is_pinned(vma))
161
			pinned += vma->node.size;
162
	list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
163
		if (i915_vma_is_pinned(vma))
164
			pinned += vma->node.size;
165
	mutex_unlock(&dev->struct_mutex);
166

167
	args->aper_size = ggtt->base.total;
168
	args->aper_available_size = args->aper_size - pinned;
169

170 171 172
	return 0;
}

173
static struct sg_table *
174
i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
175
{
176
	struct address_space *mapping = obj->base.filp->f_mapping;
177 178 179 180
	char *vaddr = obj->phys_handle->vaddr;
	struct sg_table *st;
	struct scatterlist *sg;
	int i;
181

182
	if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
183
		return ERR_PTR(-EINVAL);
184 185 186 187 188 189 190

	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
		struct page *page;
		char *src;

		page = shmem_read_mapping_page(mapping, i);
		if (IS_ERR(page))
191
			return ERR_CAST(page);
192 193 194 195 196 197

		src = kmap_atomic(page);
		memcpy(vaddr, src, PAGE_SIZE);
		drm_clflush_virt_range(vaddr, PAGE_SIZE);
		kunmap_atomic(src);

198
		put_page(page);
199 200 201
		vaddr += PAGE_SIZE;
	}

202
	i915_gem_chipset_flush(to_i915(obj->base.dev));
203 204 205

	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
206
		return ERR_PTR(-ENOMEM);
207 208 209

	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
		kfree(st);
210
		return ERR_PTR(-ENOMEM);
211 212 213 214 215
	}

	sg = st->sgl;
	sg->offset = 0;
	sg->length = obj->base.size;
216

217 218 219
	sg_dma_address(sg) = obj->phys_handle->busaddr;
	sg_dma_len(sg) = obj->base.size;

220
	return st;
221 222 223
}

static void
224 225
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
				struct sg_table *pages)
226
{
C
Chris Wilson 已提交
227
	GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
228

C
Chris Wilson 已提交
229 230
	if (obj->mm.madv == I915_MADV_DONTNEED)
		obj->mm.dirty = false;
231

232 233
	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
	    !cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
234
		drm_clflush_sg(pages);
235 236 237 238 239 240 241 242 243

	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}

static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
			       struct sg_table *pages)
{
244
	__i915_gem_object_release_shmem(obj, pages);
245

C
Chris Wilson 已提交
246
	if (obj->mm.dirty) {
247
		struct address_space *mapping = obj->base.filp->f_mapping;
248
		char *vaddr = obj->phys_handle->vaddr;
249 250 251
		int i;

		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
252 253 254 255 256 257 258 259 260 261 262 263 264
			struct page *page;
			char *dst;

			page = shmem_read_mapping_page(mapping, i);
			if (IS_ERR(page))
				continue;

			dst = kmap_atomic(page);
			drm_clflush_virt_range(vaddr, PAGE_SIZE);
			memcpy(dst, vaddr, PAGE_SIZE);
			kunmap_atomic(dst);

			set_page_dirty(page);
C
Chris Wilson 已提交
265
			if (obj->mm.madv == I915_MADV_WILLNEED)
266
				mark_page_accessed(page);
267
			put_page(page);
268 269
			vaddr += PAGE_SIZE;
		}
C
Chris Wilson 已提交
270
		obj->mm.dirty = false;
271 272
	}

273 274
	sg_free_table(pages);
	kfree(pages);
275 276 277 278 279 280
}

static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{
	drm_pci_free(obj->base.dev, obj->phys_handle);
C
Chris Wilson 已提交
281
	i915_gem_object_unpin_pages(obj);
282 283 284 285 286 287 288 289
}

static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
	.get_pages = i915_gem_object_get_pages_phys,
	.put_pages = i915_gem_object_put_pages_phys,
	.release = i915_gem_object_release_phys,
};

290
int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
291 292 293
{
	struct i915_vma *vma;
	LIST_HEAD(still_in_list);
294 295 296
	int ret;

	lockdep_assert_held(&obj->base.dev->struct_mutex);
297

298 299 300 301
	/* Closed vma are removed from the obj->vma_list - but they may
	 * still have an active binding on the object. To remove those we
	 * must wait for all rendering to complete to the object (as unbinding
	 * must anyway), and retire the requests.
302
	 */
303 304 305 306 307 308
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   I915_WAIT_ALL,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
309 310 311 312 313
	if (ret)
		return ret;

	i915_gem_retire_requests(to_i915(obj->base.dev));

314 315 316 317 318 319 320 321 322 323 324 325 326
	while ((vma = list_first_entry_or_null(&obj->vma_list,
					       struct i915_vma,
					       obj_link))) {
		list_move_tail(&vma->obj_link, &still_in_list);
		ret = i915_vma_unbind(vma);
		if (ret)
			break;
	}
	list_splice(&still_in_list, &obj->vma_list);

	return ret;
}

327 328 329 330 331
static long
i915_gem_object_wait_fence(struct dma_fence *fence,
			   unsigned int flags,
			   long timeout,
			   struct intel_rps_client *rps)
332
{
333
	struct drm_i915_gem_request *rq;
334

335
	BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
336

337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368
	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
		return timeout;

	if (!dma_fence_is_i915(fence))
		return dma_fence_wait_timeout(fence,
					      flags & I915_WAIT_INTERRUPTIBLE,
					      timeout);

	rq = to_request(fence);
	if (i915_gem_request_completed(rq))
		goto out;

	/* This client is about to stall waiting for the GPU. In many cases
	 * this is undesirable and limits the throughput of the system, as
	 * many clients cannot continue processing user input/output whilst
	 * blocked. RPS autotuning may take tens of milliseconds to respond
	 * to the GPU load and thus incurs additional latency for the client.
	 * We can circumvent that by promoting the GPU frequency to maximum
	 * before we wait. This makes the GPU throttle up much more quickly
	 * (good for benchmarks and user experience, e.g. window animations),
	 * but at a cost of spending more power processing the workload
	 * (bad for battery). Not all clients even want their results
	 * immediately and for them we should just let the GPU select its own
	 * frequency to maximise efficiency. To prevent a single client from
	 * forcing the clocks too high for the whole system, we only allow
	 * each client to waitboost once in a busy period.
	 */
	if (rps) {
		if (INTEL_GEN(rq->i915) >= 6)
			gen6_rps_boost(rq->i915, rps, rq->emitted_jiffies);
		else
			rps = NULL;
369 370
	}

371 372 373 374 375 376
	timeout = i915_wait_request(rq, flags, timeout);

out:
	if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
		i915_gem_request_retire_upto(rq);

377
	if (rps && rq->global_seqno == intel_engine_last_submit(rq->engine)) {
378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
		/* The GPU is now idle and this client has stalled.
		 * Since no other client has submitted a request in the
		 * meantime, assume that this client is the only one
		 * supplying work to the GPU but is unable to keep that
		 * work supplied because it is waiting. Since the GPU is
		 * then never kept fully busy, RPS autoclocking will
		 * keep the clocks relatively low, causing further delays.
		 * Compensate by giving the synchronous client credit for
		 * a waitboost next time.
		 */
		spin_lock(&rq->i915->rps.client_lock);
		list_del_init(&rps->link);
		spin_unlock(&rq->i915->rps.client_lock);
	}

	return timeout;
}

static long
i915_gem_object_wait_reservation(struct reservation_object *resv,
				 unsigned int flags,
				 long timeout,
				 struct intel_rps_client *rps)
{
	struct dma_fence *excl;

	if (flags & I915_WAIT_ALL) {
		struct dma_fence **shared;
		unsigned int count, i;
407 408
		int ret;

409 410
		ret = reservation_object_get_fences_rcu(resv,
							&excl, &count, &shared);
411 412 413
		if (ret)
			return ret;

414 415 416 417 418 419
		for (i = 0; i < count; i++) {
			timeout = i915_gem_object_wait_fence(shared[i],
							     flags, timeout,
							     rps);
			if (timeout <= 0)
				break;
420

421 422 423 424 425 426 427 428
			dma_fence_put(shared[i]);
		}

		for (; i < count; i++)
			dma_fence_put(shared[i]);
		kfree(shared);
	} else {
		excl = reservation_object_get_excl_rcu(resv);
429 430
	}

431 432 433 434 435 436
	if (excl && timeout > 0)
		timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);

	dma_fence_put(excl);

	return timeout;
437 438
}

439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
static void __fence_set_priority(struct dma_fence *fence, int prio)
{
	struct drm_i915_gem_request *rq;
	struct intel_engine_cs *engine;

	if (!dma_fence_is_i915(fence))
		return;

	rq = to_request(fence);
	engine = rq->engine;
	if (!engine->schedule)
		return;

	engine->schedule(rq, prio);
}

static void fence_set_priority(struct dma_fence *fence, int prio)
{
	/* Recurse once into a fence-array */
	if (dma_fence_is_array(fence)) {
		struct dma_fence_array *array = to_dma_fence_array(fence);
		int i;

		for (i = 0; i < array->num_fences; i++)
			__fence_set_priority(array->fences[i], prio);
	} else {
		__fence_set_priority(fence, prio);
	}
}

int
i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
			      unsigned int flags,
			      int prio)
{
	struct dma_fence *excl;

	if (flags & I915_WAIT_ALL) {
		struct dma_fence **shared;
		unsigned int count, i;
		int ret;

		ret = reservation_object_get_fences_rcu(obj->resv,
							&excl, &count, &shared);
		if (ret)
			return ret;

		for (i = 0; i < count; i++) {
			fence_set_priority(shared[i], prio);
			dma_fence_put(shared[i]);
		}

		kfree(shared);
	} else {
		excl = reservation_object_get_excl_rcu(obj->resv);
	}

	if (excl) {
		fence_set_priority(excl, prio);
		dma_fence_put(excl);
	}
	return 0;
}

503 504 505 506 507 508
/**
 * Waits for rendering to the object to be completed
 * @obj: i915 gem object
 * @flags: how to wait (under a lock, for all rendering or just for writes etc)
 * @timeout: how long to wait
 * @rps: client (user process) to charge for any waitboosting
509
 */
510 511 512 513 514
int
i915_gem_object_wait(struct drm_i915_gem_object *obj,
		     unsigned int flags,
		     long timeout,
		     struct intel_rps_client *rps)
515
{
516 517 518 519 520 521 522
	might_sleep();
#if IS_ENABLED(CONFIG_LOCKDEP)
	GEM_BUG_ON(debug_locks &&
		   !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
		   !!(flags & I915_WAIT_LOCKED));
#endif
	GEM_BUG_ON(timeout < 0);
523

524 525 526
	timeout = i915_gem_object_wait_reservation(obj->resv,
						   flags, timeout,
						   rps);
527
	return timeout < 0 ? timeout : 0;
528 529 530 531 532 533 534 535 536
}

static struct intel_rps_client *to_rps_client(struct drm_file *file)
{
	struct drm_i915_file_private *fpriv = file->driver_priv;

	return &fpriv->rps;
}

537 538 539 540 541
int
i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
			    int align)
{
	drm_dma_handle_t *phys;
542
	int ret;
543 544 545 546 547 548 549 550

	if (obj->phys_handle) {
		if ((unsigned long)obj->phys_handle->vaddr & (align -1))
			return -EBUSY;

		return 0;
	}

C
Chris Wilson 已提交
551
	if (obj->mm.madv != I915_MADV_WILLNEED)
552 553 554 555 556
		return -EFAULT;

	if (obj->base.filp == NULL)
		return -EINVAL;

C
Chris Wilson 已提交
557 558 559 560
	ret = i915_gem_object_unbind(obj);
	if (ret)
		return ret;

561
	__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
562 563
	if (obj->mm.pages)
		return -EBUSY;
564

565 566 567 568 569 570
	/* create a new object */
	phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
	if (!phys)
		return -ENOMEM;

	obj->phys_handle = phys;
571 572
	obj->ops = &i915_gem_phys_ops;

C
Chris Wilson 已提交
573
	return i915_gem_object_pin_pages(obj);
574 575 576 577 578
}

static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pwrite *args,
579
		     struct drm_file *file)
580 581 582
{
	struct drm_device *dev = obj->base.dev;
	void *vaddr = obj->phys_handle->vaddr + args->offset;
583
	char __user *user_data = u64_to_user_ptr(args->data_ptr);
584
	int ret;
585 586 587 588

	/* We manually control the domain here and pretend that it
	 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
	 */
589 590 591 592 593 594
	lockdep_assert_held(&obj->base.dev->struct_mutex);
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   I915_WAIT_ALL,
				   MAX_SCHEDULE_TIMEOUT,
595
				   to_rps_client(file));
596 597
	if (ret)
		return ret;
598

599
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
600 601 602 603 604 605 606 607 608 609
	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
		unsigned long unwritten;

		/* The physical object once assigned is fixed for the lifetime
		 * of the obj, so we can safely drop the lock and continue
		 * to access vaddr.
		 */
		mutex_unlock(&dev->struct_mutex);
		unwritten = copy_from_user(vaddr, user_data, args->size);
		mutex_lock(&dev->struct_mutex);
610 611 612 613
		if (unwritten) {
			ret = -EFAULT;
			goto out;
		}
614 615
	}

616
	drm_clflush_virt_range(vaddr, args->size);
617
	i915_gem_chipset_flush(to_i915(dev));
618 619

out:
620
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
621
	return ret;
622 623
}

624 625
void *i915_gem_object_alloc(struct drm_device *dev)
{
626
	struct drm_i915_private *dev_priv = to_i915(dev);
627
	return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
628 629 630 631
}

void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
632
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
633
	kmem_cache_free(dev_priv->objects, obj);
634 635
}

636 637 638 639 640
static int
i915_gem_create(struct drm_file *file,
		struct drm_device *dev,
		uint64_t size,
		uint32_t *handle_p)
641
{
642
	struct drm_i915_gem_object *obj;
643 644
	int ret;
	u32 handle;
645

646
	size = roundup(size, PAGE_SIZE);
647 648
	if (size == 0)
		return -EINVAL;
649 650

	/* Allocate the new object */
651
	obj = i915_gem_object_create(dev, size);
652 653
	if (IS_ERR(obj))
		return PTR_ERR(obj);
654

655
	ret = drm_gem_handle_create(file, &obj->base, &handle);
656
	/* drop reference from allocate - handle holds it now */
C
Chris Wilson 已提交
657
	i915_gem_object_put(obj);
658 659
	if (ret)
		return ret;
660

661
	*handle_p = handle;
662 663 664
	return 0;
}

665 666 667 668 669 670
int
i915_gem_dumb_create(struct drm_file *file,
		     struct drm_device *dev,
		     struct drm_mode_create_dumb *args)
{
	/* have to work out size/pitch and return them */
671
	args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
672 673
	args->size = args->pitch * args->height;
	return i915_gem_create(file, dev,
674
			       args->size, &args->handle);
675 676 677 678
}

/**
 * Creates a new mm object and returns a handle to it.
679 680 681
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
682 683 684 685 686 687
 */
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file)
{
	struct drm_i915_gem_create *args = data;
688

689 690
	i915_gem_flush_free_objects(to_i915(dev));

691
	return i915_gem_create(file, dev,
692
			       args->size, &args->handle);
693 694
}

695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
			const char *gpu_vaddr, int gpu_offset,
			int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_to_user(cpu_vaddr + cpu_offset,
				     gpu_vaddr + swizzled_gpu_offset,
				     this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

721
static inline int
722 723
__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
			  const char __user *cpu_vaddr,
724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746
			  int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
				       cpu_vaddr + cpu_offset,
				       this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

747 748 749 750 751 752
/*
 * Pins the specified object's pages and synchronizes the object with
 * GPU accesses. Sets needs_clflush to non-zero if the caller should
 * flush the object from the CPU cache.
 */
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
753
				    unsigned int *needs_clflush)
754 755 756
{
	int ret;

757
	lockdep_assert_held(&obj->base.dev->struct_mutex);
758

759
	*needs_clflush = 0;
760 761
	if (!i915_gem_object_has_struct_page(obj))
		return -ENODEV;
762

763 764 765 766 767
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
768 769 770
	if (ret)
		return ret;

C
Chris Wilson 已提交
771
	ret = i915_gem_object_pin_pages(obj);
772 773 774
	if (ret)
		return ret;

775 776
	i915_gem_object_flush_gtt_write_domain(obj);

777 778 779 780 781 782
	/* If we're not in the cpu read domain, set ourself into the gtt
	 * read domain and manually flush cachelines (if required). This
	 * optimizes for the case when the gpu will dirty the data
	 * anyway again before the next pread happens.
	 */
	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
783 784
		*needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
							obj->cache_level);
785 786 787

	if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
		ret = i915_gem_object_set_to_cpu_domain(obj, false);
788 789 790
		if (ret)
			goto err_unpin;

791
		*needs_clflush = 0;
792 793
	}

794
	/* return with the pages pinned */
795
	return 0;
796 797 798 799

err_unpin:
	i915_gem_object_unpin_pages(obj);
	return ret;
800 801 802 803 804 805 806
}

int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
				     unsigned int *needs_clflush)
{
	int ret;

807 808
	lockdep_assert_held(&obj->base.dev->struct_mutex);

809 810 811 812
	*needs_clflush = 0;
	if (!i915_gem_object_has_struct_page(obj))
		return -ENODEV;

813 814 815 816 817 818
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   I915_WAIT_ALL,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
819 820 821
	if (ret)
		return ret;

C
Chris Wilson 已提交
822
	ret = i915_gem_object_pin_pages(obj);
823 824 825
	if (ret)
		return ret;

826 827
	i915_gem_object_flush_gtt_write_domain(obj);

828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
	/* If we're not in the cpu write domain, set ourself into the
	 * gtt write domain and manually flush cachelines (as required).
	 * This optimizes for the case when the gpu will use the data
	 * right away and we therefore have to clflush anyway.
	 */
	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
		*needs_clflush |= cpu_write_needs_clflush(obj) << 1;

	/* Same trick applies to invalidate partially written cachelines read
	 * before writing.
	 */
	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
		*needs_clflush |= !cpu_cache_is_coherent(obj->base.dev,
							 obj->cache_level);

	if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
		ret = i915_gem_object_set_to_cpu_domain(obj, true);
845 846 847
		if (ret)
			goto err_unpin;

848 849 850 851 852 853 854
		*needs_clflush = 0;
	}

	if ((*needs_clflush & CLFLUSH_AFTER) == 0)
		obj->cache_dirty = true;

	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
C
Chris Wilson 已提交
855
	obj->mm.dirty = true;
856
	/* return with the pages pinned */
857
	return 0;
858 859 860 861

err_unpin:
	i915_gem_object_unpin_pages(obj);
	return ret;
862 863
}

864 865 866 867
static void
shmem_clflush_swizzled_range(char *addr, unsigned long length,
			     bool swizzled)
{
868
	if (unlikely(swizzled)) {
869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885
		unsigned long start = (unsigned long) addr;
		unsigned long end = (unsigned long) addr + length;

		/* For swizzling simply ensure that we always flush both
		 * channels. Lame, but simple and it works. Swizzled
		 * pwrite/pread is far from a hotpath - current userspace
		 * doesn't use it at all. */
		start = round_down(start, 128);
		end = round_up(end, 128);

		drm_clflush_virt_range((void *)start, end - start);
	} else {
		drm_clflush_virt_range(addr, length);
	}

}

886 887 888
/* Only difference to the fast-path function is that this can handle bit17
 * and uses non-atomic copy and kmap functions. */
static int
889
shmem_pread_slow(struct page *page, int offset, int length,
890 891 892 893 894 895 896 897
		 char __user *user_data,
		 bool page_do_bit17_swizzling, bool needs_clflush)
{
	char *vaddr;
	int ret;

	vaddr = kmap(page);
	if (needs_clflush)
898
		shmem_clflush_swizzled_range(vaddr + offset, length,
899
					     page_do_bit17_swizzling);
900 901

	if (page_do_bit17_swizzling)
902
		ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
903
	else
904
		ret = __copy_to_user(user_data, vaddr + offset, length);
905 906
	kunmap(page);

907
	return ret ? - EFAULT : 0;
908 909
}

910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985
static int
shmem_pread(struct page *page, int offset, int length, char __user *user_data,
	    bool page_do_bit17_swizzling, bool needs_clflush)
{
	int ret;

	ret = -ENODEV;
	if (!page_do_bit17_swizzling) {
		char *vaddr = kmap_atomic(page);

		if (needs_clflush)
			drm_clflush_virt_range(vaddr + offset, length);
		ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
		kunmap_atomic(vaddr);
	}
	if (ret == 0)
		return 0;

	return shmem_pread_slow(page, offset, length, user_data,
				page_do_bit17_swizzling, needs_clflush);
}

static int
i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pread *args)
{
	char __user *user_data;
	u64 remain;
	unsigned int obj_do_bit17_swizzling;
	unsigned int needs_clflush;
	unsigned int idx, offset;
	int ret;

	obj_do_bit17_swizzling = 0;
	if (i915_gem_object_needs_bit17_swizzle(obj))
		obj_do_bit17_swizzling = BIT(17);

	ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
	if (ret)
		return ret;

	ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
	mutex_unlock(&obj->base.dev->struct_mutex);
	if (ret)
		return ret;

	remain = args->size;
	user_data = u64_to_user_ptr(args->data_ptr);
	offset = offset_in_page(args->offset);
	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
		struct page *page = i915_gem_object_get_page(obj, idx);
		int length;

		length = remain;
		if (offset + length > PAGE_SIZE)
			length = PAGE_SIZE - offset;

		ret = shmem_pread(page, offset, length, user_data,
				  page_to_phys(page) & obj_do_bit17_swizzling,
				  needs_clflush);
		if (ret)
			break;

		remain -= length;
		user_data += length;
		offset = 0;
	}

	i915_gem_obj_finish_shmem_access(obj);
	return ret;
}

static inline bool
gtt_user_read(struct io_mapping *mapping,
	      loff_t base, int offset,
	      char __user *user_data, int length)
986 987
{
	void *vaddr;
988
	unsigned long unwritten;
989 990

	/* We can use the cpu mem copy function because this is X86. */
991 992 993 994 995 996 997 998 999
	vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
	unwritten = __copy_to_user_inatomic(user_data, vaddr + offset, length);
	io_mapping_unmap_atomic(vaddr);
	if (unwritten) {
		vaddr = (void __force *)
			io_mapping_map_wc(mapping, base, PAGE_SIZE);
		unwritten = copy_to_user(user_data, vaddr + offset, length);
		io_mapping_unmap(vaddr);
	}
1000 1001 1002 1003
	return unwritten;
}

static int
1004 1005
i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
		   const struct drm_i915_gem_pread *args)
1006
{
1007 1008
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	struct i915_ggtt *ggtt = &i915->ggtt;
1009
	struct drm_mm_node node;
1010 1011 1012
	struct i915_vma *vma;
	void __user *user_data;
	u64 remain, offset;
1013 1014
	int ret;

1015 1016 1017 1018 1019 1020 1021
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
	if (ret)
		return ret;

	intel_runtime_pm_get(i915);
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
				       PIN_MAPPABLE | PIN_NONBLOCK);
1022 1023 1024
	if (!IS_ERR(vma)) {
		node.start = i915_ggtt_offset(vma);
		node.allocated = false;
1025
		ret = i915_vma_put_fence(vma);
1026 1027 1028 1029 1030
		if (ret) {
			i915_vma_unpin(vma);
			vma = ERR_PTR(ret);
		}
	}
C
Chris Wilson 已提交
1031
	if (IS_ERR(vma)) {
1032
		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1033
		if (ret)
1034 1035
			goto out_unlock;
		GEM_BUG_ON(!node.allocated);
1036 1037 1038 1039 1040 1041
	}

	ret = i915_gem_object_set_to_gtt_domain(obj, false);
	if (ret)
		goto out_unpin;

1042
	mutex_unlock(&i915->drm.struct_mutex);
1043

1044 1045 1046
	user_data = u64_to_user_ptr(args->data_ptr);
	remain = args->size;
	offset = args->offset;
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062

	while (remain > 0) {
		/* Operation in this page
		 *
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
		 */
		u32 page_base = node.start;
		unsigned page_offset = offset_in_page(offset);
		unsigned page_length = PAGE_SIZE - page_offset;
		page_length = remain < page_length ? remain : page_length;
		if (node.allocated) {
			wmb();
			ggtt->base.insert_page(&ggtt->base,
					       i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1063
					       node.start, I915_CACHE_NONE, 0);
1064 1065 1066 1067
			wmb();
		} else {
			page_base += offset & PAGE_MASK;
		}
1068 1069 1070

		if (gtt_user_read(&ggtt->mappable, page_base, page_offset,
				  user_data, page_length)) {
1071 1072 1073 1074 1075 1076 1077 1078 1079
			ret = -EFAULT;
			break;
		}

		remain -= page_length;
		user_data += page_length;
		offset += page_length;
	}

1080
	mutex_lock(&i915->drm.struct_mutex);
1081 1082 1083 1084
out_unpin:
	if (node.allocated) {
		wmb();
		ggtt->base.clear_range(&ggtt->base,
1085
				       node.start, node.size);
1086 1087
		remove_mappable_node(&node);
	} else {
C
Chris Wilson 已提交
1088
		i915_vma_unpin(vma);
1089
	}
1090 1091 1092
out_unlock:
	intel_runtime_pm_put(i915);
	mutex_unlock(&i915->drm.struct_mutex);
1093

1094 1095 1096
	return ret;
}

1097 1098
/**
 * Reads data from the object referenced by handle.
1099 1100 1101
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
1102 1103 1104 1105 1106
 *
 * On error, the contents of *data are undefined.
 */
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1107
		     struct drm_file *file)
1108 1109
{
	struct drm_i915_gem_pread *args = data;
1110
	struct drm_i915_gem_object *obj;
1111
	int ret;
1112

1113 1114 1115 1116
	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_WRITE,
1117
		       u64_to_user_ptr(args->data_ptr),
1118 1119 1120
		       args->size))
		return -EFAULT;

1121
	obj = i915_gem_object_lookup(file, args->handle);
1122 1123
	if (!obj)
		return -ENOENT;
1124

1125
	/* Bounds check source.  */
1126 1127
	if (args->offset > obj->base.size ||
	    args->size > obj->base.size - args->offset) {
C
Chris Wilson 已提交
1128
		ret = -EINVAL;
1129
		goto out;
C
Chris Wilson 已提交
1130 1131
	}

C
Chris Wilson 已提交
1132 1133
	trace_i915_gem_object_pread(obj, args->offset, args->size);

1134 1135 1136 1137
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE,
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
1138
	if (ret)
1139
		goto out;
1140

1141
	ret = i915_gem_object_pin_pages(obj);
1142
	if (ret)
1143
		goto out;
1144

1145
	ret = i915_gem_shmem_pread(obj, args);
1146
	if (ret == -EFAULT || ret == -ENODEV)
1147
		ret = i915_gem_gtt_pread(obj, args);
1148

1149 1150
	i915_gem_object_unpin_pages(obj);
out:
C
Chris Wilson 已提交
1151
	i915_gem_object_put(obj);
1152
	return ret;
1153 1154
}

1155 1156
/* This is the fast write path which cannot handle
 * page faults in the source data
1157
 */
1158

1159 1160 1161 1162
static inline bool
ggtt_write(struct io_mapping *mapping,
	   loff_t base, int offset,
	   char __user *user_data, int length)
1163
{
1164
	void *vaddr;
1165
	unsigned long unwritten;
1166

1167
	/* We can use the cpu mem copy function because this is X86. */
1168 1169
	vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
	unwritten = __copy_from_user_inatomic_nocache(vaddr + offset,
1170
						      user_data, length);
1171 1172 1173 1174 1175 1176 1177
	io_mapping_unmap_atomic(vaddr);
	if (unwritten) {
		vaddr = (void __force *)
			io_mapping_map_wc(mapping, base, PAGE_SIZE);
		unwritten = copy_from_user(vaddr + offset, user_data, length);
		io_mapping_unmap(vaddr);
	}
1178 1179 1180 1181

	return unwritten;
}

1182 1183 1184
/**
 * This is the fast pwrite path, where we copy the data directly from the
 * user into the GTT, uncached.
1185
 * @obj: i915 GEM object
1186
 * @args: pwrite arguments structure
1187
 */
1188
static int
1189 1190
i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
			 const struct drm_i915_gem_pwrite *args)
1191
{
1192
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
1193 1194
	struct i915_ggtt *ggtt = &i915->ggtt;
	struct drm_mm_node node;
1195 1196 1197
	struct i915_vma *vma;
	u64 remain, offset;
	void __user *user_data;
1198
	int ret;
1199

1200 1201 1202
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
	if (ret)
		return ret;
D
Daniel Vetter 已提交
1203

1204
	intel_runtime_pm_get(i915);
C
Chris Wilson 已提交
1205
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1206
				       PIN_MAPPABLE | PIN_NONBLOCK);
1207 1208 1209
	if (!IS_ERR(vma)) {
		node.start = i915_ggtt_offset(vma);
		node.allocated = false;
1210
		ret = i915_vma_put_fence(vma);
1211 1212 1213 1214 1215
		if (ret) {
			i915_vma_unpin(vma);
			vma = ERR_PTR(ret);
		}
	}
C
Chris Wilson 已提交
1216
	if (IS_ERR(vma)) {
1217
		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1218
		if (ret)
1219 1220
			goto out_unlock;
		GEM_BUG_ON(!node.allocated);
1221
	}
D
Daniel Vetter 已提交
1222 1223 1224 1225 1226

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		goto out_unpin;

1227 1228
	mutex_unlock(&i915->drm.struct_mutex);

1229
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1230

1231 1232 1233 1234
	user_data = u64_to_user_ptr(args->data_ptr);
	offset = args->offset;
	remain = args->size;
	while (remain) {
1235 1236
		/* Operation in this page
		 *
1237 1238 1239
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
1240
		 */
1241
		u32 page_base = node.start;
1242 1243
		unsigned int page_offset = offset_in_page(offset);
		unsigned int page_length = PAGE_SIZE - page_offset;
1244 1245 1246 1247 1248 1249 1250 1251 1252 1253
		page_length = remain < page_length ? remain : page_length;
		if (node.allocated) {
			wmb(); /* flush the write before we modify the GGTT */
			ggtt->base.insert_page(&ggtt->base,
					       i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
					       node.start, I915_CACHE_NONE, 0);
			wmb(); /* flush modifications to the GGTT (insert_page) */
		} else {
			page_base += offset & PAGE_MASK;
		}
1254
		/* If we get a fault while copying data, then (presumably) our
1255 1256
		 * source page isn't available.  Return the error and we'll
		 * retry in the slow path.
1257 1258
		 * If the object is non-shmem backed, we retry again with the
		 * path that handles page fault.
1259
		 */
1260 1261 1262 1263
		if (ggtt_write(&ggtt->mappable, page_base, page_offset,
			       user_data, page_length)) {
			ret = -EFAULT;
			break;
D
Daniel Vetter 已提交
1264
		}
1265

1266 1267 1268
		remain -= page_length;
		user_data += page_length;
		offset += page_length;
1269
	}
1270
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1271 1272

	mutex_lock(&i915->drm.struct_mutex);
D
Daniel Vetter 已提交
1273
out_unpin:
1274 1275 1276
	if (node.allocated) {
		wmb();
		ggtt->base.clear_range(&ggtt->base,
1277
				       node.start, node.size);
1278 1279
		remove_mappable_node(&node);
	} else {
C
Chris Wilson 已提交
1280
		i915_vma_unpin(vma);
1281
	}
1282
out_unlock:
1283
	intel_runtime_pm_put(i915);
1284
	mutex_unlock(&i915->drm.struct_mutex);
1285
	return ret;
1286 1287
}

1288
static int
1289
shmem_pwrite_slow(struct page *page, int offset, int length,
1290 1291 1292 1293
		  char __user *user_data,
		  bool page_do_bit17_swizzling,
		  bool needs_clflush_before,
		  bool needs_clflush_after)
1294
{
1295 1296
	char *vaddr;
	int ret;
1297

1298
	vaddr = kmap(page);
1299
	if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1300
		shmem_clflush_swizzled_range(vaddr + offset, length,
1301
					     page_do_bit17_swizzling);
1302
	if (page_do_bit17_swizzling)
1303 1304
		ret = __copy_from_user_swizzled(vaddr, offset, user_data,
						length);
1305
	else
1306
		ret = __copy_from_user(vaddr + offset, user_data, length);
1307
	if (needs_clflush_after)
1308
		shmem_clflush_swizzled_range(vaddr + offset, length,
1309
					     page_do_bit17_swizzling);
1310
	kunmap(page);
1311

1312
	return ret ? -EFAULT : 0;
1313 1314
}

1315 1316 1317 1318 1319
/* Per-page copy function for the shmem pwrite fastpath.
 * Flushes invalid cachelines before writing to the target if
 * needs_clflush_before is set and flushes out any written cachelines after
 * writing if needs_clflush is set.
 */
1320
static int
1321 1322 1323 1324
shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
	     bool page_do_bit17_swizzling,
	     bool needs_clflush_before,
	     bool needs_clflush_after)
1325
{
1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357
	int ret;

	ret = -ENODEV;
	if (!page_do_bit17_swizzling) {
		char *vaddr = kmap_atomic(page);

		if (needs_clflush_before)
			drm_clflush_virt_range(vaddr + offset, len);
		ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
		if (needs_clflush_after)
			drm_clflush_virt_range(vaddr + offset, len);

		kunmap_atomic(vaddr);
	}
	if (ret == 0)
		return ret;

	return shmem_pwrite_slow(page, offset, len, user_data,
				 page_do_bit17_swizzling,
				 needs_clflush_before,
				 needs_clflush_after);
}

static int
i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
		      const struct drm_i915_gem_pwrite *args)
{
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	void __user *user_data;
	u64 remain;
	unsigned int obj_do_bit17_swizzling;
	unsigned int partial_cacheline_write;
1358
	unsigned int needs_clflush;
1359 1360
	unsigned int offset, idx;
	int ret;
1361

1362
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1363 1364 1365
	if (ret)
		return ret;

1366 1367 1368 1369
	ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
	mutex_unlock(&i915->drm.struct_mutex);
	if (ret)
		return ret;
1370

1371 1372 1373
	obj_do_bit17_swizzling = 0;
	if (i915_gem_object_needs_bit17_swizzle(obj))
		obj_do_bit17_swizzling = BIT(17);
1374

1375 1376 1377 1378 1379 1380 1381
	/* If we don't overwrite a cacheline completely we need to be
	 * careful to have up-to-date data by first clflushing. Don't
	 * overcomplicate things and flush the entire patch.
	 */
	partial_cacheline_write = 0;
	if (needs_clflush & CLFLUSH_BEFORE)
		partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
1382

1383 1384 1385 1386 1387 1388
	user_data = u64_to_user_ptr(args->data_ptr);
	remain = args->size;
	offset = offset_in_page(args->offset);
	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
		struct page *page = i915_gem_object_get_page(obj, idx);
		int length;
1389

1390 1391 1392
		length = remain;
		if (offset + length > PAGE_SIZE)
			length = PAGE_SIZE - offset;
1393

1394 1395 1396 1397
		ret = shmem_pwrite(page, offset, length, user_data,
				   page_to_phys(page) & obj_do_bit17_swizzling,
				   (offset | length) & partial_cacheline_write,
				   needs_clflush & CLFLUSH_AFTER);
1398
		if (ret)
1399
			break;
1400

1401 1402 1403
		remain -= length;
		user_data += length;
		offset = 0;
1404
	}
1405

1406
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1407
	i915_gem_obj_finish_shmem_access(obj);
1408
	return ret;
1409 1410 1411 1412
}

/**
 * Writes data to the object referenced by handle.
1413 1414 1415
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1416 1417 1418 1419 1420
 *
 * On error, the contents of the buffer that were to be modified are undefined.
 */
int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1421
		      struct drm_file *file)
1422 1423
{
	struct drm_i915_gem_pwrite *args = data;
1424
	struct drm_i915_gem_object *obj;
1425 1426 1427 1428 1429 1430
	int ret;

	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_READ,
1431
		       u64_to_user_ptr(args->data_ptr),
1432 1433 1434
		       args->size))
		return -EFAULT;

1435
	obj = i915_gem_object_lookup(file, args->handle);
1436 1437
	if (!obj)
		return -ENOENT;
1438

1439
	/* Bounds check destination. */
1440 1441
	if (args->offset > obj->base.size ||
	    args->size > obj->base.size - args->offset) {
C
Chris Wilson 已提交
1442
		ret = -EINVAL;
1443
		goto err;
C
Chris Wilson 已提交
1444 1445
	}

C
Chris Wilson 已提交
1446 1447
	trace_i915_gem_object_pwrite(obj, args->offset, args->size);

1448 1449 1450 1451 1452
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_ALL,
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
1453 1454 1455
	if (ret)
		goto err;

1456
	ret = i915_gem_object_pin_pages(obj);
1457
	if (ret)
1458
		goto err;
1459

D
Daniel Vetter 已提交
1460
	ret = -EFAULT;
1461 1462 1463 1464 1465 1466
	/* We can only do the GTT pwrite on untiled buffers, as otherwise
	 * it would end up going through the fenced access, and we'll get
	 * different detiling behavior between reading and writing.
	 * pread/pwrite currently are reading and writing from the CPU
	 * perspective, requiring manual detiling by the client.
	 */
1467
	if (!i915_gem_object_has_struct_page(obj) ||
1468
	    cpu_write_needs_clflush(obj))
D
Daniel Vetter 已提交
1469 1470
		/* Note that the gtt paths might fail with non-page-backed user
		 * pointers (e.g. gtt mappings when moving data between
1471 1472
		 * textures). Fallback to the shmem path in that case.
		 */
1473
		ret = i915_gem_gtt_pwrite_fast(obj, args);
1474

1475
	if (ret == -EFAULT || ret == -ENOSPC) {
1476 1477
		if (obj->phys_handle)
			ret = i915_gem_phys_pwrite(obj, args, file);
1478
		else
1479
			ret = i915_gem_shmem_pwrite(obj, args);
1480
	}
1481

1482
	i915_gem_object_unpin_pages(obj);
1483
err:
C
Chris Wilson 已提交
1484
	i915_gem_object_put(obj);
1485
	return ret;
1486 1487
}

1488
static inline enum fb_op_origin
1489 1490
write_origin(struct drm_i915_gem_object *obj, unsigned domain)
{
1491 1492
	return (domain == I915_GEM_DOMAIN_GTT ?
		obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
1493 1494
}

1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515
static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *i915;
	struct list_head *list;
	struct i915_vma *vma;

	list_for_each_entry(vma, &obj->vma_list, obj_link) {
		if (!i915_vma_is_ggtt(vma))
			continue;

		if (i915_vma_is_active(vma))
			continue;

		if (!drm_mm_node_allocated(&vma->node))
			continue;

		list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
	}

	i915 = to_i915(obj->base.dev);
	list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
1516
	list_move_tail(&obj->global_link, list);
1517 1518
}

1519
/**
1520 1521
 * Called when user space prepares to use an object with the CPU, either
 * through the mmap ioctl's mapping or a GTT mapping.
1522 1523 1524
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1525 1526 1527
 */
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1528
			  struct drm_file *file)
1529 1530
{
	struct drm_i915_gem_set_domain *args = data;
1531
	struct drm_i915_gem_object *obj;
1532 1533
	uint32_t read_domains = args->read_domains;
	uint32_t write_domain = args->write_domain;
1534
	int err;
1535

1536
	/* Only handle setting domains to types used by the CPU. */
1537
	if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
1538 1539 1540 1541 1542 1543 1544 1545
		return -EINVAL;

	/* Having something in the write domain implies it's in the read
	 * domain, and only that read domain.  Enforce that in the request.
	 */
	if (write_domain != 0 && read_domains != write_domain)
		return -EINVAL;

1546
	obj = i915_gem_object_lookup(file, args->handle);
1547 1548
	if (!obj)
		return -ENOENT;
1549

1550 1551 1552 1553
	/* Try to flush the object off the GPU without holding the lock.
	 * We will repeat the flush holding the lock in the normal manner
	 * to catch cases where we are gazumped.
	 */
1554
	err = i915_gem_object_wait(obj,
1555 1556 1557 1558
				   I915_WAIT_INTERRUPTIBLE |
				   (write_domain ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
1559
	if (err)
C
Chris Wilson 已提交
1560
		goto out;
1561

1562 1563 1564 1565 1566 1567 1568 1569 1570 1571
	/* Flush and acquire obj->pages so that we are coherent through
	 * direct access in memory with previous cached writes through
	 * shmemfs and that our cache domain tracking remains valid.
	 * For example, if the obj->filp was moved to swap without us
	 * being notified and releasing the pages, we would mistakenly
	 * continue to assume that the obj remained out of the CPU cached
	 * domain.
	 */
	err = i915_gem_object_pin_pages(obj);
	if (err)
C
Chris Wilson 已提交
1572
		goto out;
1573 1574 1575

	err = i915_mutex_lock_interruptible(dev);
	if (err)
C
Chris Wilson 已提交
1576
		goto out_unpin;
1577

1578
	if (read_domains & I915_GEM_DOMAIN_GTT)
1579
		err = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1580
	else
1581
		err = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1582

1583 1584
	/* And bump the LRU for this access */
	i915_gem_object_bump_inactive_ggtt(obj);
1585

1586
	mutex_unlock(&dev->struct_mutex);
1587

1588 1589 1590
	if (write_domain != 0)
		intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));

C
Chris Wilson 已提交
1591
out_unpin:
1592
	i915_gem_object_unpin_pages(obj);
C
Chris Wilson 已提交
1593 1594
out:
	i915_gem_object_put(obj);
1595
	return err;
1596 1597 1598 1599
}

/**
 * Called when user space has done writes to this buffer
1600 1601 1602
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1603 1604 1605
 */
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1606
			 struct drm_file *file)
1607 1608
{
	struct drm_i915_gem_sw_finish *args = data;
1609
	struct drm_i915_gem_object *obj;
1610
	int err = 0;
1611

1612
	obj = i915_gem_object_lookup(file, args->handle);
1613 1614
	if (!obj)
		return -ENOENT;
1615 1616

	/* Pinned buffers may be scanout, so flush the cache */
1617 1618 1619 1620 1621 1622 1623
	if (READ_ONCE(obj->pin_display)) {
		err = i915_mutex_lock_interruptible(dev);
		if (!err) {
			i915_gem_object_flush_cpu_write_domain(obj);
			mutex_unlock(&dev->struct_mutex);
		}
	}
1624

C
Chris Wilson 已提交
1625
	i915_gem_object_put(obj);
1626
	return err;
1627 1628 1629
}

/**
1630 1631 1632 1633 1634
 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
 *			 it is mapped to.
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1635 1636 1637
 *
 * While the mapping holds a reference on the contents of the object, it doesn't
 * imply a ref on the object itself.
1638 1639 1640 1641 1642 1643 1644 1645 1646 1647
 *
 * IMPORTANT:
 *
 * DRM driver writers who look a this function as an example for how to do GEM
 * mmap support, please don't implement mmap support like here. The modern way
 * to implement DRM mmap support is with an mmap offset ioctl (like
 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
 * That way debug tooling like valgrind will understand what's going on, hiding
 * the mmap call in a driver private ioctl will break that. The i915 driver only
 * does cpu mmaps this way because we didn't know better.
1648 1649 1650
 */
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1651
		    struct drm_file *file)
1652 1653
{
	struct drm_i915_gem_mmap *args = data;
1654
	struct drm_i915_gem_object *obj;
1655 1656
	unsigned long addr;

1657 1658 1659
	if (args->flags & ~(I915_MMAP_WC))
		return -EINVAL;

1660
	if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1661 1662
		return -ENODEV;

1663 1664
	obj = i915_gem_object_lookup(file, args->handle);
	if (!obj)
1665
		return -ENOENT;
1666

1667 1668 1669
	/* prime objects have no backing filp to GEM mmap
	 * pages from.
	 */
1670
	if (!obj->base.filp) {
C
Chris Wilson 已提交
1671
		i915_gem_object_put(obj);
1672 1673 1674
		return -EINVAL;
	}

1675
	addr = vm_mmap(obj->base.filp, 0, args->size,
1676 1677
		       PROT_READ | PROT_WRITE, MAP_SHARED,
		       args->offset);
1678 1679 1680 1681
	if (args->flags & I915_MMAP_WC) {
		struct mm_struct *mm = current->mm;
		struct vm_area_struct *vma;

1682
		if (down_write_killable(&mm->mmap_sem)) {
C
Chris Wilson 已提交
1683
			i915_gem_object_put(obj);
1684 1685
			return -EINTR;
		}
1686 1687 1688 1689 1690 1691 1692
		vma = find_vma(mm, addr);
		if (vma)
			vma->vm_page_prot =
				pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
		else
			addr = -ENOMEM;
		up_write(&mm->mmap_sem);
1693 1694

		/* This may race, but that's ok, it only gets set */
1695
		WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
1696
	}
C
Chris Wilson 已提交
1697
	i915_gem_object_put(obj);
1698 1699 1700 1701 1702 1703 1704 1705
	if (IS_ERR((void *)addr))
		return addr;

	args->addr_ptr = (uint64_t) addr;

	return 0;
}

1706 1707 1708 1709 1710 1711 1712 1713 1714 1715
static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
{
	u64 size;

	size = i915_gem_object_get_stride(obj);
	size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8;

	return size >> PAGE_SHIFT;
}

1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765
/**
 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
 *
 * A history of the GTT mmap interface:
 *
 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
 *     aligned and suitable for fencing, and still fit into the available
 *     mappable space left by the pinned display objects. A classic problem
 *     we called the page-fault-of-doom where we would ping-pong between
 *     two objects that could not fit inside the GTT and so the memcpy
 *     would page one object in at the expense of the other between every
 *     single byte.
 *
 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
 *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
 *     object is too large for the available space (or simply too large
 *     for the mappable aperture!), a view is created instead and faulted
 *     into userspace. (This view is aligned and sized appropriately for
 *     fenced access.)
 *
 * Restrictions:
 *
 *  * snoopable objects cannot be accessed via the GTT. It can cause machine
 *    hangs on some architectures, corruption on others. An attempt to service
 *    a GTT page fault from a snoopable object will generate a SIGBUS.
 *
 *  * the object must be able to fit into RAM (physical memory, though no
 *    limited to the mappable aperture).
 *
 *
 * Caveats:
 *
 *  * a new GTT page fault will synchronize rendering from the GPU and flush
 *    all data to system memory. Subsequent access will not be synchronized.
 *
 *  * all mappings are revoked on runtime device suspend.
 *
 *  * there are only 8, 16 or 32 fence registers to share between all users
 *    (older machines require fence register for display and blitter access
 *    as well). Contention of the fence registers will cause the previous users
 *    to be unmapped and any new access will generate new page faults.
 *
 *  * running out of memory while servicing a fault may generate a SIGBUS,
 *    rather than the expected SIGSEGV.
 */
int i915_gem_mmap_gtt_version(void)
{
	return 1;
}

1766 1767
/**
 * i915_gem_fault - fault a page into the GTT
C
Chris Wilson 已提交
1768
 * @area: CPU VMA in question
1769
 * @vmf: fault info
1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780
 *
 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
 * from userspace.  The fault handler takes care of binding the object to
 * the GTT (if needed), allocating and programming a fence register (again,
 * only if needed based on whether the old reg is still valid or the object
 * is tiled) and inserting a new PTE into the faulting process.
 *
 * Note that the faulting process may involve evicting existing objects
 * from the GTT and/or fence registers to make room.  So performance may
 * suffer if the GTT working set is large or there are few fence registers
 * left.
1781 1782 1783
 *
 * The current feature set supported by i915_gem_fault() and thus GTT mmaps
 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
1784
 */
C
Chris Wilson 已提交
1785
int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
1786
{
1787
#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
C
Chris Wilson 已提交
1788
	struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
1789
	struct drm_device *dev = obj->base.dev;
1790 1791
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
1792
	bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
C
Chris Wilson 已提交
1793
	struct i915_vma *vma;
1794
	pgoff_t page_offset;
1795
	unsigned int flags;
1796
	int ret;
1797

1798
	/* We don't use vmf->pgoff since that has the fake offset */
C
Chris Wilson 已提交
1799
	page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
1800 1801
		PAGE_SHIFT;

C
Chris Wilson 已提交
1802 1803
	trace_i915_gem_object_fault(obj, page_offset, true, write);

1804
	/* Try to flush the object off the GPU first without holding the lock.
1805
	 * Upon acquiring the lock, we will perform our sanity checks and then
1806 1807 1808
	 * repeat the flush holding the lock in the normal manner to catch cases
	 * where we are gazumped.
	 */
1809 1810 1811 1812
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
1813
	if (ret)
1814 1815
		goto err;

1816 1817 1818 1819
	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		goto err;

1820 1821 1822 1823 1824
	intel_runtime_pm_get(dev_priv);

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto err_rpm;
1825

1826
	/* Access to snoopable pages through the GTT is incoherent. */
1827
	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
1828
		ret = -EFAULT;
1829
		goto err_unlock;
1830 1831
	}

1832 1833 1834 1835 1836 1837 1838 1839
	/* If the object is smaller than a couple of partial vma, it is
	 * not worth only creating a single partial vma - we may as well
	 * clear enough space for the full object.
	 */
	flags = PIN_MAPPABLE;
	if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
		flags |= PIN_NONBLOCK | PIN_NONFAULT;

1840
	/* Now pin it into the GTT as needed */
1841
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
1842 1843
	if (IS_ERR(vma)) {
		struct i915_ggtt_view view;
1844 1845
		unsigned int chunk_size;

1846
		/* Use a partial view if it is bigger than available space */
1847 1848
		chunk_size = MIN_CHUNK_PAGES;
		if (i915_gem_object_is_tiled(obj))
1849
			chunk_size = roundup(chunk_size, tile_row_pages(obj));
1850

1851 1852 1853 1854
		memset(&view, 0, sizeof(view));
		view.type = I915_GGTT_VIEW_PARTIAL;
		view.params.partial.offset = rounddown(page_offset, chunk_size);
		view.params.partial.size =
1855
			min_t(unsigned int, chunk_size,
1856
			      vma_pages(area) - view.params.partial.offset);
1857

1858 1859 1860 1861 1862 1863
		/* If the partial covers the entire object, just create a
		 * normal VMA.
		 */
		if (chunk_size >= obj->base.size >> PAGE_SHIFT)
			view.type = I915_GGTT_VIEW_NORMAL;

1864 1865 1866 1867 1868
		/* Userspace is now writing through an untracked VMA, abandon
		 * all hope that the hardware is able to track future writes.
		 */
		obj->frontbuffer_ggtt_origin = ORIGIN_CPU;

1869 1870
		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
	}
C
Chris Wilson 已提交
1871 1872
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
1873
		goto err_unlock;
C
Chris Wilson 已提交
1874
	}
1875

1876 1877
	ret = i915_gem_object_set_to_gtt_domain(obj, write);
	if (ret)
1878
		goto err_unpin;
1879

1880
	ret = i915_vma_get_fence(vma);
1881
	if (ret)
1882
		goto err_unpin;
1883

1884
	/* Mark as being mmapped into userspace for later revocation */
1885
	assert_rpm_wakelock_held(dev_priv);
1886 1887 1888
	if (list_empty(&obj->userfault_link))
		list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);

1889
	/* Finally, remap it using the new GTT offset */
1890 1891 1892 1893 1894
	ret = remap_io_mapping(area,
			       area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
			       (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
			       min_t(u64, vma->size, area->vm_end - area->vm_start),
			       &ggtt->mappable);
1895

1896
err_unpin:
C
Chris Wilson 已提交
1897
	__i915_vma_unpin(vma);
1898
err_unlock:
1899
	mutex_unlock(&dev->struct_mutex);
1900 1901
err_rpm:
	intel_runtime_pm_put(dev_priv);
1902
	i915_gem_object_unpin_pages(obj);
1903
err:
1904
	switch (ret) {
1905
	case -EIO:
1906 1907 1908 1909 1910 1911 1912
		/*
		 * We eat errors when the gpu is terminally wedged to avoid
		 * userspace unduly crashing (gl has no provisions for mmaps to
		 * fail). But any other -EIO isn't ours (e.g. swap in failure)
		 * and so needs to be reported.
		 */
		if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1913 1914 1915
			ret = VM_FAULT_SIGBUS;
			break;
		}
1916
	case -EAGAIN:
D
Daniel Vetter 已提交
1917 1918 1919 1920
		/*
		 * EAGAIN means the gpu is hung and we'll wait for the error
		 * handler to reset everything when re-faulting in
		 * i915_mutex_lock_interruptible.
1921
		 */
1922 1923
	case 0:
	case -ERESTARTSYS:
1924
	case -EINTR:
1925 1926 1927 1928 1929
	case -EBUSY:
		/*
		 * EBUSY is ok: this just means that another thread
		 * already did the job.
		 */
1930 1931
		ret = VM_FAULT_NOPAGE;
		break;
1932
	case -ENOMEM:
1933 1934
		ret = VM_FAULT_OOM;
		break;
1935
	case -ENOSPC:
1936
	case -EFAULT:
1937 1938
		ret = VM_FAULT_SIGBUS;
		break;
1939
	default:
1940
		WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1941 1942
		ret = VM_FAULT_SIGBUS;
		break;
1943
	}
1944
	return ret;
1945 1946
}

1947 1948 1949 1950
/**
 * i915_gem_release_mmap - remove physical page mappings
 * @obj: obj in question
 *
1951
 * Preserve the reservation of the mmapping with the DRM core code, but
1952 1953 1954 1955 1956 1957 1958 1959 1960
 * relinquish ownership of the pages back to the system.
 *
 * It is vital that we remove the page mapping if we have mapped a tiled
 * object through the GTT and then lose the fence register due to
 * resource pressure. Similarly if the object has been moved out of the
 * aperture, than pages mapped into userspace must be revoked. Removing the
 * mapping will then trigger a page fault on the next user access, allowing
 * fixup by i915_gem_fault().
 */
1961
void
1962
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1963
{
1964 1965
	struct drm_i915_private *i915 = to_i915(obj->base.dev);

1966 1967 1968
	/* Serialisation between user GTT access and our code depends upon
	 * revoking the CPU's PTE whilst the mutex is held. The next user
	 * pagefault then has to wait until we release the mutex.
1969 1970 1971 1972
	 *
	 * Note that RPM complicates somewhat by adding an additional
	 * requirement that operations to the GGTT be made holding the RPM
	 * wakeref.
1973
	 */
1974
	lockdep_assert_held(&i915->drm.struct_mutex);
1975
	intel_runtime_pm_get(i915);
1976

1977
	if (list_empty(&obj->userfault_link))
1978
		goto out;
1979

1980
	list_del_init(&obj->userfault_link);
1981 1982
	drm_vma_node_unmap(&obj->base.vma_node,
			   obj->base.dev->anon_inode->i_mapping);
1983 1984 1985 1986 1987 1988 1989 1990 1991

	/* Ensure that the CPU's PTE are revoked and there are not outstanding
	 * memory transactions from userspace before we return. The TLB
	 * flushing implied above by changing the PTE above *should* be
	 * sufficient, an extra barrier here just provides us with a bit
	 * of paranoid documentation about our requirement to serialise
	 * memory writes before touching registers / GSM.
	 */
	wmb();
1992 1993 1994

out:
	intel_runtime_pm_put(i915);
1995 1996
}

1997
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
1998
{
1999
	struct drm_i915_gem_object *obj, *on;
2000
	int i;
2001

2002 2003 2004 2005 2006 2007
	/*
	 * Only called during RPM suspend. All users of the userfault_list
	 * must be holding an RPM wakeref to ensure that this can not
	 * run concurrently with themselves (and use the struct_mutex for
	 * protection between themselves).
	 */
2008

2009 2010 2011
	list_for_each_entry_safe(obj, on,
				 &dev_priv->mm.userfault_list, userfault_link) {
		list_del_init(&obj->userfault_link);
2012 2013 2014
		drm_vma_node_unmap(&obj->base.vma_node,
				   obj->base.dev->anon_inode->i_mapping);
	}
2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031

	/* The fence will be lost when the device powers down. If any were
	 * in use by hardware (i.e. they are pinned), we should not be powering
	 * down! All other fences will be reacquired by the user upon waking.
	 */
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];

		if (WARN_ON(reg->pin_count))
			continue;

		if (!reg->vma)
			continue;

		GEM_BUG_ON(!list_empty(&reg->vma->obj->userfault_link));
		reg->dirty = true;
	}
2032 2033
}

2034 2035
/**
 * i915_gem_get_ggtt_size - return required global GTT size for an object
2036
 * @dev_priv: i915 device
2037 2038 2039 2040 2041 2042
 * @size: object size
 * @tiling_mode: tiling mode
 *
 * Return the required global GTT size for an object, taking into account
 * potential fence register mapping.
 */
2043 2044
u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
			   u64 size, int tiling_mode)
2045
{
2046
	u64 ggtt_size;
2047

2048 2049
	GEM_BUG_ON(size == 0);

2050
	if (INTEL_GEN(dev_priv) >= 4 ||
2051 2052
	    tiling_mode == I915_TILING_NONE)
		return size;
2053 2054

	/* Previous chips need a power-of-two fence region when tiling */
2055
	if (IS_GEN3(dev_priv))
2056
		ggtt_size = 1024*1024;
2057
	else
2058
		ggtt_size = 512*1024;
2059

2060 2061
	while (ggtt_size < size)
		ggtt_size <<= 1;
2062

2063
	return ggtt_size;
2064 2065
}

2066
/**
2067
 * i915_gem_get_ggtt_alignment - return required global GTT alignment
2068
 * @dev_priv: i915 device
2069 2070
 * @size: object size
 * @tiling_mode: tiling mode
2071
 * @fenced: is fenced alignment required or not
2072
 *
2073
 * Return the required global GTT alignment for an object, taking into account
2074
 * potential fence register mapping.
2075
 */
2076
u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
2077
				int tiling_mode, bool fenced)
2078
{
2079 2080
	GEM_BUG_ON(size == 0);

2081 2082 2083 2084
	/*
	 * Minimum alignment is 4k (GTT page size), but might be greater
	 * if a fence register is needed for the object.
	 */
2085
	if (INTEL_GEN(dev_priv) >= 4 || (!fenced && IS_G33(dev_priv)) ||
2086
	    tiling_mode == I915_TILING_NONE)
2087 2088
		return 4096;

2089 2090 2091 2092
	/*
	 * Previous chips need to be aligned to the size of the smallest
	 * fence register that can contain the object.
	 */
2093
	return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
2094 2095
}

2096 2097
static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
{
2098
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2099
	int err;
2100

2101 2102 2103
	err = drm_gem_create_mmap_offset(&obj->base);
	if (!err)
		return 0;
2104

2105 2106 2107
	/* We can idle the GPU locklessly to flush stale objects, but in order
	 * to claim that space for ourselves, we need to take the big
	 * struct_mutex to free the requests+objects and allocate our slot.
2108
	 */
2109
	err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
2110 2111 2112 2113 2114 2115 2116 2117 2118
	if (err)
		return err;

	err = i915_mutex_lock_interruptible(&dev_priv->drm);
	if (!err) {
		i915_gem_retire_requests(dev_priv);
		err = drm_gem_create_mmap_offset(&obj->base);
		mutex_unlock(&dev_priv->drm.struct_mutex);
	}
2119

2120
	return err;
2121 2122 2123 2124 2125 2126 2127
}

static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
{
	drm_gem_free_mmap_offset(&obj->base);
}

2128
int
2129 2130
i915_gem_mmap_gtt(struct drm_file *file,
		  struct drm_device *dev,
2131
		  uint32_t handle,
2132
		  uint64_t *offset)
2133
{
2134
	struct drm_i915_gem_object *obj;
2135 2136
	int ret;

2137
	obj = i915_gem_object_lookup(file, handle);
2138 2139
	if (!obj)
		return -ENOENT;
2140

2141
	ret = i915_gem_object_create_mmap_offset(obj);
2142 2143
	if (ret == 0)
		*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2144

C
Chris Wilson 已提交
2145
	i915_gem_object_put(obj);
2146
	return ret;
2147 2148
}

2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169
/**
 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
 * @dev: DRM device
 * @data: GTT mapping ioctl data
 * @file: GEM object info
 *
 * Simply returns the fake offset to userspace so it can mmap it.
 * The mmap call will end up in drm_gem_mmap(), which will set things
 * up so we can get faults in the handler above.
 *
 * The fault handler will take care of binding the object into the GTT
 * (since it may have been evicted to make room for something), allocating
 * a fence register, and mapping the appropriate aperture address into
 * userspace.
 */
int
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file)
{
	struct drm_i915_gem_mmap_gtt *args = data;

2170
	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2171 2172
}

D
Daniel Vetter 已提交
2173 2174 2175
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2176
{
2177
	i915_gem_object_free_mmap_offset(obj);
2178

2179 2180
	if (obj->base.filp == NULL)
		return;
2181

D
Daniel Vetter 已提交
2182 2183 2184 2185 2186
	/* Our goal here is to return as much of the memory as
	 * is possible back to the system as we are called from OOM.
	 * To do this we must instruct the shmfs to drop all of its
	 * backing pages, *now*.
	 */
2187
	shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
C
Chris Wilson 已提交
2188
	obj->mm.madv = __I915_MADV_PURGED;
D
Daniel Vetter 已提交
2189
}
2190

2191
/* Try to discard unwanted pages */
2192
void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
D
Daniel Vetter 已提交
2193
{
2194 2195
	struct address_space *mapping;

2196 2197 2198
	lockdep_assert_held(&obj->mm.lock);
	GEM_BUG_ON(obj->mm.pages);

C
Chris Wilson 已提交
2199
	switch (obj->mm.madv) {
2200 2201 2202 2203 2204 2205 2206 2207 2208
	case I915_MADV_DONTNEED:
		i915_gem_object_truncate(obj);
	case __I915_MADV_PURGED:
		return;
	}

	if (obj->base.filp == NULL)
		return;

2209
	mapping = obj->base.filp->f_mapping,
2210
	invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2211 2212
}

2213
static void
2214 2215
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
			      struct sg_table *pages)
2216
{
2217 2218
	struct sgt_iter sgt_iter;
	struct page *page;
2219

2220
	__i915_gem_object_release_shmem(obj, pages);
2221

2222
	i915_gem_gtt_finish_pages(obj, pages);
I
Imre Deak 已提交
2223

2224
	if (i915_gem_object_needs_bit17_swizzle(obj))
2225
		i915_gem_object_save_bit_17_swizzle(obj, pages);
2226

2227
	for_each_sgt_page(page, sgt_iter, pages) {
C
Chris Wilson 已提交
2228
		if (obj->mm.dirty)
2229
			set_page_dirty(page);
2230

C
Chris Wilson 已提交
2231
		if (obj->mm.madv == I915_MADV_WILLNEED)
2232
			mark_page_accessed(page);
2233

2234
		put_page(page);
2235
	}
C
Chris Wilson 已提交
2236
	obj->mm.dirty = false;
2237

2238 2239
	sg_free_table(pages);
	kfree(pages);
2240
}
C
Chris Wilson 已提交
2241

2242 2243 2244 2245 2246
static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
{
	struct radix_tree_iter iter;
	void **slot;

C
Chris Wilson 已提交
2247 2248
	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
2249 2250
}

2251 2252
void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
				 enum i915_mm_subclass subclass)
2253
{
2254
	struct sg_table *pages;
2255

C
Chris Wilson 已提交
2256
	if (i915_gem_object_has_pinned_pages(obj))
2257
		return;
2258

2259
	GEM_BUG_ON(obj->bind_count);
2260 2261 2262 2263
	if (!READ_ONCE(obj->mm.pages))
		return;

	/* May be called by shrinker from within get_pages() (on another bo) */
2264
	mutex_lock_nested(&obj->mm.lock, subclass);
2265 2266
	if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
		goto unlock;
B
Ben Widawsky 已提交
2267

2268 2269 2270
	/* ->put_pages might need to allocate memory for the bit17 swizzle
	 * array, hence protect them from being reaped by removing them from gtt
	 * lists early. */
2271 2272
	pages = fetch_and_zero(&obj->mm.pages);
	GEM_BUG_ON(!pages);
2273

C
Chris Wilson 已提交
2274
	if (obj->mm.mapping) {
2275 2276
		void *ptr;

C
Chris Wilson 已提交
2277
		ptr = ptr_mask_bits(obj->mm.mapping);
2278 2279
		if (is_vmalloc_addr(ptr))
			vunmap(ptr);
2280
		else
2281 2282
			kunmap(kmap_to_page(ptr));

C
Chris Wilson 已提交
2283
		obj->mm.mapping = NULL;
2284 2285
	}

2286 2287
	__i915_gem_object_reset_page_iter(obj);

2288
	obj->ops->put_pages(obj, pages);
2289 2290
unlock:
	mutex_unlock(&obj->mm.lock);
C
Chris Wilson 已提交
2291 2292
}

2293
static unsigned int swiotlb_max_size(void)
2294 2295 2296 2297 2298 2299 2300 2301
{
#if IS_ENABLED(CONFIG_SWIOTLB)
	return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE);
#else
	return 0;
#endif
}

2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325
static void i915_sg_trim(struct sg_table *orig_st)
{
	struct sg_table new_st;
	struct scatterlist *sg, *new_sg;
	unsigned int i;

	if (orig_st->nents == orig_st->orig_nents)
		return;

	if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL))
		return;

	new_sg = new_st.sgl;
	for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
		sg_set_page(new_sg, sg_page(sg), sg->length, 0);
		/* called before being DMA mapped, no need to copy sg->dma_* */
		new_sg = sg_next(new_sg);
	}

	sg_free_table(orig_st);

	*orig_st = new_st;
}

2326
static struct sg_table *
C
Chris Wilson 已提交
2327
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2328
{
2329
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2330 2331
	int page_count, i;
	struct address_space *mapping;
2332 2333
	struct sg_table *st;
	struct scatterlist *sg;
2334
	struct sgt_iter sgt_iter;
2335
	struct page *page;
2336
	unsigned long last_pfn = 0;	/* suppress gcc warning */
2337
	unsigned int max_segment;
I
Imre Deak 已提交
2338
	int ret;
C
Chris Wilson 已提交
2339
	gfp_t gfp;
2340

C
Chris Wilson 已提交
2341 2342 2343 2344
	/* Assert that the object is not currently in any GPU domain. As it
	 * wasn't in the GTT, there shouldn't be any way it could have been in
	 * a GPU cache
	 */
2345 2346
	GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
	GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
C
Chris Wilson 已提交
2347

2348 2349
	max_segment = swiotlb_max_size();
	if (!max_segment)
2350
		max_segment = rounddown(UINT_MAX, PAGE_SIZE);
2351

2352 2353
	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
2354
		return ERR_PTR(-ENOMEM);
2355

2356
	page_count = obj->base.size / PAGE_SIZE;
2357 2358
	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
		kfree(st);
2359
		return ERR_PTR(-ENOMEM);
2360
	}
2361

2362 2363 2364 2365 2366
	/* Get the list of pages out of our struct file.  They'll be pinned
	 * at this point until we release them.
	 *
	 * Fail silently without starting the shrinker
	 */
2367
	mapping = obj->base.filp->f_mapping;
2368
	gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
2369
	gfp |= __GFP_NORETRY | __GFP_NOWARN;
2370 2371 2372
	sg = st->sgl;
	st->nents = 0;
	for (i = 0; i < page_count; i++) {
C
Chris Wilson 已提交
2373 2374
		page = shmem_read_mapping_page_gfp(mapping, i, gfp);
		if (IS_ERR(page)) {
2375 2376 2377 2378 2379
			i915_gem_shrink(dev_priv,
					page_count,
					I915_SHRINK_BOUND |
					I915_SHRINK_UNBOUND |
					I915_SHRINK_PURGEABLE);
C
Chris Wilson 已提交
2380 2381 2382 2383 2384 2385 2386
			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
		}
		if (IS_ERR(page)) {
			/* We've tried hard to allocate the memory by reaping
			 * our own buffer, now let the real VM do its job and
			 * go down in flames if truly OOM.
			 */
2387
			page = shmem_read_mapping_page(mapping, i);
I
Imre Deak 已提交
2388 2389
			if (IS_ERR(page)) {
				ret = PTR_ERR(page);
2390
				goto err_sg;
I
Imre Deak 已提交
2391
			}
C
Chris Wilson 已提交
2392
		}
2393 2394 2395
		if (!i ||
		    sg->length >= max_segment ||
		    page_to_pfn(page) != last_pfn + 1) {
2396 2397 2398 2399 2400 2401 2402 2403
			if (i)
				sg = sg_next(sg);
			st->nents++;
			sg_set_page(sg, page, PAGE_SIZE, 0);
		} else {
			sg->length += PAGE_SIZE;
		}
		last_pfn = page_to_pfn(page);
2404 2405 2406

		/* Check that the i965g/gm workaround works. */
		WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2407
	}
2408
	if (sg) /* loop terminated early; short sg table */
2409
		sg_mark_end(sg);
2410

2411 2412 2413
	/* Trim unused sg entries to avoid wasting memory. */
	i915_sg_trim(st);

2414
	ret = i915_gem_gtt_prepare_pages(obj, st);
I
Imre Deak 已提交
2415 2416 2417
	if (ret)
		goto err_pages;

2418
	if (i915_gem_object_needs_bit17_swizzle(obj))
2419
		i915_gem_object_do_bit_17_swizzle(obj, st);
2420

2421
	return st;
2422

2423
err_sg:
2424
	sg_mark_end(sg);
2425
err_pages:
2426 2427
	for_each_sgt_page(page, sgt_iter, st)
		put_page(page);
2428 2429
	sg_free_table(st);
	kfree(st);
2430 2431 2432 2433 2434 2435 2436 2437 2438

	/* shmemfs first checks if there is enough memory to allocate the page
	 * and reports ENOSPC should there be insufficient, along with the usual
	 * ENOMEM for a genuine allocation failure.
	 *
	 * We use ENOSPC in our driver to mean that we have run out of aperture
	 * space and so want to translate the error from shmemfs back to our
	 * usual understanding of ENOMEM.
	 */
I
Imre Deak 已提交
2439 2440 2441
	if (ret == -ENOSPC)
		ret = -ENOMEM;

2442 2443 2444 2445 2446 2447
	return ERR_PTR(ret);
}

void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
				 struct sg_table *pages)
{
2448
	lockdep_assert_held(&obj->mm.lock);
2449 2450 2451 2452 2453

	obj->mm.get_page.sg_pos = pages->sgl;
	obj->mm.get_page.sg_idx = 0;

	obj->mm.pages = pages;
2454 2455 2456 2457 2458 2459 2460

	if (i915_gem_object_is_tiled(obj) &&
	    to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
		GEM_BUG_ON(obj->mm.quirked);
		__i915_gem_object_pin_pages(obj);
		obj->mm.quirked = true;
	}
2461 2462 2463 2464 2465 2466
}

static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
	struct sg_table *pages;

2467 2468
	GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));

2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479
	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
		DRM_DEBUG("Attempting to obtain a purgeable object\n");
		return -EFAULT;
	}

	pages = obj->ops->get_pages(obj);
	if (unlikely(IS_ERR(pages)))
		return PTR_ERR(pages);

	__i915_gem_object_set_pages(obj, pages);
	return 0;
2480 2481
}

2482
/* Ensure that the associated pages are gathered from the backing storage
2483
 * and pinned into our object. i915_gem_object_pin_pages() may be called
2484
 * multiple times before they are released by a single call to
2485
 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
2486 2487 2488
 * either as a result of memory pressure (reaping pages under the shrinker)
 * or as the object is itself released.
 */
C
Chris Wilson 已提交
2489
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2490
{
2491
	int err;
2492

2493 2494 2495
	err = mutex_lock_interruptible(&obj->mm.lock);
	if (err)
		return err;
2496

2497 2498 2499 2500
	if (unlikely(!obj->mm.pages)) {
		err = ____i915_gem_object_get_pages(obj);
		if (err)
			goto unlock;
2501

2502 2503 2504
		smp_mb__before_atomic();
	}
	atomic_inc(&obj->mm.pages_pin_count);
2505

2506 2507
unlock:
	mutex_unlock(&obj->mm.lock);
2508
	return err;
2509 2510
}

2511
/* The 'mapping' part of i915_gem_object_pin_map() below */
2512 2513
static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
				 enum i915_map_type type)
2514 2515
{
	unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
C
Chris Wilson 已提交
2516
	struct sg_table *sgt = obj->mm.pages;
2517 2518
	struct sgt_iter sgt_iter;
	struct page *page;
2519 2520
	struct page *stack_pages[32];
	struct page **pages = stack_pages;
2521
	unsigned long i = 0;
2522
	pgprot_t pgprot;
2523 2524 2525
	void *addr;

	/* A single page can always be kmapped */
2526
	if (n_pages == 1 && type == I915_MAP_WB)
2527 2528
		return kmap(sg_page(sgt->sgl));

2529 2530 2531 2532 2533 2534
	if (n_pages > ARRAY_SIZE(stack_pages)) {
		/* Too big for stack -- allocate temporary array instead */
		pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
		if (!pages)
			return NULL;
	}
2535

2536 2537
	for_each_sgt_page(page, sgt_iter, sgt)
		pages[i++] = page;
2538 2539 2540 2541

	/* Check that we have the expected number of pages */
	GEM_BUG_ON(i != n_pages);

2542 2543 2544 2545 2546 2547 2548 2549 2550
	switch (type) {
	case I915_MAP_WB:
		pgprot = PAGE_KERNEL;
		break;
	case I915_MAP_WC:
		pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
		break;
	}
	addr = vmap(pages, n_pages, 0, pgprot);
2551

2552 2553
	if (pages != stack_pages)
		drm_free_large(pages);
2554 2555 2556 2557 2558

	return addr;
}

/* get, pin, and map the pages of the object into kernel space */
2559 2560
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
			      enum i915_map_type type)
2561
{
2562 2563 2564
	enum i915_map_type has_type;
	bool pinned;
	void *ptr;
2565 2566
	int ret;

2567
	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
2568

2569
	ret = mutex_lock_interruptible(&obj->mm.lock);
2570 2571 2572
	if (ret)
		return ERR_PTR(ret);

2573 2574
	pinned = true;
	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
2575 2576 2577 2578
		if (unlikely(!obj->mm.pages)) {
			ret = ____i915_gem_object_get_pages(obj);
			if (ret)
				goto err_unlock;
2579

2580 2581 2582
			smp_mb__before_atomic();
		}
		atomic_inc(&obj->mm.pages_pin_count);
2583 2584 2585
		pinned = false;
	}
	GEM_BUG_ON(!obj->mm.pages);
2586

C
Chris Wilson 已提交
2587
	ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
2588 2589 2590
	if (ptr && has_type != type) {
		if (pinned) {
			ret = -EBUSY;
2591
			goto err_unpin;
2592
		}
2593 2594 2595 2596 2597 2598

		if (is_vmalloc_addr(ptr))
			vunmap(ptr);
		else
			kunmap(kmap_to_page(ptr));

C
Chris Wilson 已提交
2599
		ptr = obj->mm.mapping = NULL;
2600 2601
	}

2602 2603 2604 2605
	if (!ptr) {
		ptr = i915_gem_object_map(obj, type);
		if (!ptr) {
			ret = -ENOMEM;
2606
			goto err_unpin;
2607 2608
		}

C
Chris Wilson 已提交
2609
		obj->mm.mapping = ptr_pack_bits(ptr, type);
2610 2611
	}

2612 2613
out_unlock:
	mutex_unlock(&obj->mm.lock);
2614 2615
	return ptr;

2616 2617 2618 2619 2620
err_unpin:
	atomic_dec(&obj->mm.pages_pin_count);
err_unlock:
	ptr = ERR_PTR(ret);
	goto out_unlock;
2621 2622
}

2623
static bool i915_context_is_banned(const struct i915_gem_context *ctx)
2624
{
2625
	unsigned long elapsed;
2626

2627
	if (ctx->hang_stats.banned)
2628 2629
		return true;

2630
	elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
2631 2632
	if (ctx->hang_stats.ban_period_seconds &&
	    elapsed <= ctx->hang_stats.ban_period_seconds) {
2633 2634
		DRM_DEBUG("context hanging too fast, banning!\n");
		return true;
2635 2636 2637 2638 2639
	}

	return false;
}

2640
static void i915_set_reset_status(struct i915_gem_context *ctx,
2641
				  const bool guilty)
2642
{
2643
	struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
2644 2645

	if (guilty) {
2646
		hs->banned = i915_context_is_banned(ctx);
2647 2648 2649 2650
		hs->batch_active++;
		hs->guilty_ts = get_seconds();
	} else {
		hs->batch_pending++;
2651 2652 2653
	}
}

2654
struct drm_i915_gem_request *
2655
i915_gem_find_active_request(struct intel_engine_cs *engine)
2656
{
2657 2658
	struct drm_i915_gem_request *request;

2659 2660 2661 2662 2663 2664 2665 2666
	/* We are called by the error capture and reset at a random
	 * point in time. In particular, note that neither is crucially
	 * ordered with an interrupt. After a hang, the GPU is dead and we
	 * assume that no more writes can happen (we waited long enough for
	 * all writes that were in transaction to be flushed) - adding an
	 * extra delay for a recent interrupt is pointless. Hence, we do
	 * not need an engine->irq_seqno_barrier() before the seqno reads.
	 */
2667
	list_for_each_entry(request, &engine->timeline->requests, link) {
C
Chris Wilson 已提交
2668
		if (__i915_gem_request_completed(request))
2669
			continue;
2670

2671
		return request;
2672
	}
2673 2674 2675 2676

	return NULL;
}

2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694
static void reset_request(struct drm_i915_gem_request *request)
{
	void *vaddr = request->ring->vaddr;
	u32 head;

	/* As this request likely depends on state from the lost
	 * context, clear out all the user operations leaving the
	 * breadcrumb at the end (so we get the fence notifications).
	 */
	head = request->head;
	if (request->postfix < head) {
		memset(vaddr + head, 0, request->ring->size - head);
		head = 0;
	}
	memset(vaddr + head, 0, request->postfix - head);
}

static void i915_gem_reset_engine(struct intel_engine_cs *engine)
2695 2696
{
	struct drm_i915_gem_request *request;
2697
	struct i915_gem_context *incomplete_ctx;
C
Chris Wilson 已提交
2698
	struct intel_timeline *timeline;
2699 2700
	bool ring_hung;

2701 2702 2703
	if (engine->irq_seqno_barrier)
		engine->irq_seqno_barrier(engine);

2704
	request = i915_gem_find_active_request(engine);
2705
	if (!request)
2706 2707
		return;

2708 2709 2710 2711 2712
	ring_hung = engine->hangcheck.stalled;
	if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) {
		DRM_DEBUG_DRIVER("%s pardoned, was guilty? %s\n",
				 engine->name,
				 yesno(ring_hung));
2713
		ring_hung = false;
2714
	}
2715

2716
	i915_set_reset_status(request->ctx, ring_hung);
2717 2718 2719 2720
	if (!ring_hung)
		return;

	DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
2721
			 engine->name, request->global_seqno);
2722 2723 2724 2725 2726 2727 2728 2729 2730 2731 2732 2733 2734 2735 2736 2737

	/* Setup the CS to resume from the breadcrumb of the hung request */
	engine->reset_hw(engine, request);

	/* Users of the default context do not rely on logical state
	 * preserved between batches. They have to emit full state on
	 * every batch and so it is safe to execute queued requests following
	 * the hang.
	 *
	 * Other contexts preserve state, now corrupt. We want to skip all
	 * queued requests that reference the corrupt context.
	 */
	incomplete_ctx = request->ctx;
	if (i915_gem_context_is_default(incomplete_ctx))
		return;

2738
	list_for_each_entry_continue(request, &engine->timeline->requests, link)
2739 2740
		if (request->ctx == incomplete_ctx)
			reset_request(request);
C
Chris Wilson 已提交
2741 2742 2743 2744

	timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
	list_for_each_entry(request, &timeline->requests, link)
		reset_request(request);
2745
}
2746

2747
void i915_gem_reset(struct drm_i915_private *dev_priv)
2748
{
2749
	struct intel_engine_cs *engine;
2750
	enum intel_engine_id id;
2751

2752 2753
	lockdep_assert_held(&dev_priv->drm.struct_mutex);

2754 2755
	i915_gem_retire_requests(dev_priv);

2756
	for_each_engine(engine, dev_priv, id)
2757 2758
		i915_gem_reset_engine(engine);

2759
	i915_gem_restore_fences(dev_priv);
2760 2761 2762 2763 2764 2765 2766

	if (dev_priv->gt.awake) {
		intel_sanitize_gt_powersave(dev_priv);
		intel_enable_gt_powersave(dev_priv);
		if (INTEL_GEN(dev_priv) >= 6)
			gen6_rps_busy(dev_priv);
	}
2767 2768 2769 2770 2771 2772 2773 2774 2775
}

static void nop_submit_request(struct drm_i915_gem_request *request)
{
}

static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
{
	engine->submit_request = nop_submit_request;
2776

2777 2778 2779 2780
	/* Mark all pending requests as complete so that any concurrent
	 * (lockless) lookup doesn't try and wait upon the request as we
	 * reset it.
	 */
2781
	intel_engine_init_global_seqno(engine,
2782
				       intel_engine_last_submit(engine));
2783

2784 2785 2786 2787 2788 2789
	/*
	 * Clear the execlists queue up before freeing the requests, as those
	 * are the ones that keep the context and ringbuffer backing objects
	 * pinned in place.
	 */

2790
	if (i915.enable_execlists) {
2791 2792 2793 2794
		unsigned long flags;

		spin_lock_irqsave(&engine->timeline->lock, flags);

2795 2796 2797
		i915_gem_request_put(engine->execlist_port[0].request);
		i915_gem_request_put(engine->execlist_port[1].request);
		memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
2798 2799
		engine->execlist_queue = RB_ROOT;
		engine->execlist_first = NULL;
2800 2801

		spin_unlock_irqrestore(&engine->timeline->lock, flags);
2802
	}
2803 2804
}

2805
void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
2806
{
2807
	struct intel_engine_cs *engine;
2808
	enum intel_engine_id id;
2809

2810 2811
	lockdep_assert_held(&dev_priv->drm.struct_mutex);
	set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
2812

2813
	i915_gem_context_lost(dev_priv);
2814
	for_each_engine(engine, dev_priv, id)
2815
		i915_gem_cleanup_engine(engine);
2816
	mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
2817

2818
	i915_gem_retire_requests(dev_priv);
2819 2820
}

2821
static void
2822 2823
i915_gem_retire_work_handler(struct work_struct *work)
{
2824
	struct drm_i915_private *dev_priv =
2825
		container_of(work, typeof(*dev_priv), gt.retire_work.work);
2826
	struct drm_device *dev = &dev_priv->drm;
2827

2828
	/* Come back later if the device is busy... */
2829
	if (mutex_trylock(&dev->struct_mutex)) {
2830
		i915_gem_retire_requests(dev_priv);
2831
		mutex_unlock(&dev->struct_mutex);
2832
	}
2833 2834 2835 2836 2837

	/* Keep the retire handler running until we are finally idle.
	 * We do not need to do this test under locking as in the worst-case
	 * we queue the retire worker once too often.
	 */
2838 2839
	if (READ_ONCE(dev_priv->gt.awake)) {
		i915_queue_hangcheck(dev_priv);
2840 2841
		queue_delayed_work(dev_priv->wq,
				   &dev_priv->gt.retire_work,
2842
				   round_jiffies_up_relative(HZ));
2843
	}
2844
}
2845

2846 2847 2848 2849
static void
i915_gem_idle_work_handler(struct work_struct *work)
{
	struct drm_i915_private *dev_priv =
2850
		container_of(work, typeof(*dev_priv), gt.idle_work.work);
2851
	struct drm_device *dev = &dev_priv->drm;
2852
	struct intel_engine_cs *engine;
2853
	enum intel_engine_id id;
2854 2855 2856 2857 2858
	bool rearm_hangcheck;

	if (!READ_ONCE(dev_priv->gt.awake))
		return;

2859 2860 2861 2862 2863 2864 2865
	/*
	 * Wait for last execlists context complete, but bail out in case a
	 * new request is submitted.
	 */
	wait_for(READ_ONCE(dev_priv->gt.active_requests) ||
		 intel_execlists_idle(dev_priv), 10);

2866
	if (READ_ONCE(dev_priv->gt.active_requests))
2867 2868 2869 2870 2871 2872 2873 2874 2875 2876 2877 2878 2879
		return;

	rearm_hangcheck =
		cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);

	if (!mutex_trylock(&dev->struct_mutex)) {
		/* Currently busy, come back later */
		mod_delayed_work(dev_priv->wq,
				 &dev_priv->gt.idle_work,
				 msecs_to_jiffies(50));
		goto out_rearm;
	}

2880 2881 2882 2883 2884 2885 2886
	/*
	 * New request retired after this work handler started, extend active
	 * period until next instance of the work.
	 */
	if (work_pending(work))
		goto out_unlock;

2887
	if (dev_priv->gt.active_requests)
2888
		goto out_unlock;
2889

2890 2891 2892
	if (wait_for(intel_execlists_idle(dev_priv), 10))
		DRM_ERROR("Timeout waiting for engines to idle\n");

2893
	for_each_engine(engine, dev_priv, id)
2894
		i915_gem_batch_pool_fini(&engine->batch_pool);
2895

2896 2897 2898
	GEM_BUG_ON(!dev_priv->gt.awake);
	dev_priv->gt.awake = false;
	rearm_hangcheck = false;
2899

2900 2901 2902 2903 2904
	if (INTEL_GEN(dev_priv) >= 6)
		gen6_rps_idle(dev_priv);
	intel_runtime_pm_put(dev_priv);
out_unlock:
	mutex_unlock(&dev->struct_mutex);
2905

2906 2907 2908 2909
out_rearm:
	if (rearm_hangcheck) {
		GEM_BUG_ON(!dev_priv->gt.awake);
		i915_queue_hangcheck(dev_priv);
2910
	}
2911 2912
}

2913 2914 2915 2916 2917 2918 2919 2920 2921 2922
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
	struct drm_i915_gem_object *obj = to_intel_bo(gem);
	struct drm_i915_file_private *fpriv = file->driver_priv;
	struct i915_vma *vma, *vn;

	mutex_lock(&obj->base.dev->struct_mutex);
	list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
		if (vma->vm->file == fpriv)
			i915_vma_close(vma);
2923 2924 2925 2926 2927 2928

	if (i915_gem_object_is_active(obj) &&
	    !i915_gem_object_has_active_reference(obj)) {
		i915_gem_object_set_active_reference(obj);
		i915_gem_object_get(obj);
	}
2929 2930 2931
	mutex_unlock(&obj->base.dev->struct_mutex);
}

2932 2933 2934 2935 2936 2937 2938 2939 2940 2941 2942
static unsigned long to_wait_timeout(s64 timeout_ns)
{
	if (timeout_ns < 0)
		return MAX_SCHEDULE_TIMEOUT;

	if (timeout_ns == 0)
		return 0;

	return nsecs_to_jiffies_timeout(timeout_ns);
}

2943 2944
/**
 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2945 2946 2947
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969 2970 2971
 *
 * Returns 0 if successful, else an error is returned with the remaining time in
 * the timeout parameter.
 *  -ETIME: object is still busy after timeout
 *  -ERESTARTSYS: signal interrupted the wait
 *  -ENONENT: object doesn't exist
 * Also possible, but rare:
 *  -EAGAIN: GPU wedged
 *  -ENOMEM: damn
 *  -ENODEV: Internal IRQ fail
 *  -E?: The add request failed
 *
 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
 * non-zero timeout parameter the wait ioctl will wait for the given number of
 * nanoseconds on an object becoming unbusy. Since the wait itself does so
 * without holding struct_mutex the object may become re-busied before this
 * function completes. A similar but shorter * race condition exists in the busy
 * ioctl
 */
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
	struct drm_i915_gem_wait *args = data;
	struct drm_i915_gem_object *obj;
2972 2973
	ktime_t start;
	long ret;
2974

2975 2976 2977
	if (args->flags != 0)
		return -EINVAL;

2978
	obj = i915_gem_object_lookup(file, args->bo_handle);
2979
	if (!obj)
2980 2981
		return -ENOENT;

2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992
	start = ktime_get();

	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
				   to_wait_timeout(args->timeout_ns),
				   to_rps_client(file));

	if (args->timeout_ns > 0) {
		args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
		if (args->timeout_ns < 0)
			args->timeout_ns = 0;
2993 2994
	}

C
Chris Wilson 已提交
2995
	i915_gem_object_put(obj);
2996
	return ret;
2997 2998
}

2999
static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
3000
{
3001
	int ret, i;
3002

3003 3004 3005 3006 3007
	for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
		ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
		if (ret)
			return ret;
	}
3008

3009 3010 3011 3012 3013 3014 3015
	return 0;
}

int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
{
	int ret;

3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027
	if (flags & I915_WAIT_LOCKED) {
		struct i915_gem_timeline *tl;

		lockdep_assert_held(&i915->drm.struct_mutex);

		list_for_each_entry(tl, &i915->gt.timelines, link) {
			ret = wait_for_timeline(tl, flags);
			if (ret)
				return ret;
		}
	} else {
		ret = wait_for_timeline(&i915->gt.global_timeline, flags);
3028 3029 3030
		if (ret)
			return ret;
	}
3031

3032
	return 0;
3033 3034
}

3035 3036
void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
			     bool force)
3037 3038 3039 3040 3041
{
	/* If we don't have a page list set up, then we're not pinned
	 * to GPU, and we can ignore the cache flush because it'll happen
	 * again at bind time.
	 */
C
Chris Wilson 已提交
3042
	if (!obj->mm.pages)
3043
		return;
3044

3045 3046 3047 3048
	/*
	 * Stolen memory is always coherent with the GPU as it is explicitly
	 * marked as wc by the system, or the system is cache-coherent.
	 */
3049
	if (obj->stolen || obj->phys_handle)
3050
		return;
3051

3052 3053 3054 3055 3056 3057 3058 3059
	/* If the GPU is snooping the contents of the CPU cache,
	 * we do not need to manually clear the CPU cache lines.  However,
	 * the caches are only snooped when the render cache is
	 * flushed/invalidated.  As we always have to emit invalidations
	 * and flushes when moving into and out of the RENDER domain, correct
	 * snooping behaviour occurs naturally as the result of our domain
	 * tracking.
	 */
3060 3061
	if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
		obj->cache_dirty = true;
3062
		return;
3063
	}
3064

C
Chris Wilson 已提交
3065
	trace_i915_gem_object_clflush(obj);
C
Chris Wilson 已提交
3066
	drm_clflush_sg(obj->mm.pages);
3067
	obj->cache_dirty = false;
3068 3069 3070 3071
}

/** Flushes the GTT write domain for the object if it's dirty. */
static void
3072
i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3073
{
3074
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
C
Chris Wilson 已提交
3075

3076
	if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3077 3078
		return;

3079
	/* No actual flushing is required for the GTT write domain.  Writes
3080
	 * to it "immediately" go to main memory as far as we know, so there's
3081
	 * no chipset flush.  It also doesn't land in render cache.
3082 3083 3084 3085
	 *
	 * However, we do have to enforce the order so that all writes through
	 * the GTT land before any writes to the device, such as updates to
	 * the GATT itself.
3086 3087 3088 3089 3090 3091 3092
	 *
	 * We also have to wait a bit for the writes to land from the GTT.
	 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
	 * timing. This issue has only been observed when switching quickly
	 * between GTT writes and CPU reads from inside the kernel on recent hw,
	 * and it appears to only affect discrete GTT blocks (i.e. on LLC
	 * system agents we cannot reproduce this behaviour).
3093
	 */
3094
	wmb();
3095
	if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
3096
		POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
3097

3098
	intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
3099

3100
	obj->base.write_domain = 0;
C
Chris Wilson 已提交
3101
	trace_i915_gem_object_change_domain(obj,
3102
					    obj->base.read_domains,
3103
					    I915_GEM_DOMAIN_GTT);
3104 3105 3106 3107
}

/** Flushes the CPU write domain for the object if it's dirty. */
static void
3108
i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3109
{
3110
	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3111 3112
		return;

3113
	i915_gem_clflush_object(obj, obj->pin_display);
3114
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
3115

3116
	obj->base.write_domain = 0;
C
Chris Wilson 已提交
3117
	trace_i915_gem_object_change_domain(obj,
3118
					    obj->base.read_domains,
3119
					    I915_GEM_DOMAIN_CPU);
3120 3121
}

3122 3123
/**
 * Moves a single object to the GTT read, and possibly write domain.
3124 3125
 * @obj: object to act on
 * @write: ask for write access or read only
3126 3127 3128 3129
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
J
Jesse Barnes 已提交
3130
int
3131
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3132
{
C
Chris Wilson 已提交
3133
	uint32_t old_write_domain, old_read_domains;
3134
	int ret;
3135

3136
	lockdep_assert_held(&obj->base.dev->struct_mutex);
3137

3138 3139 3140 3141 3142 3143
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   (write ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
3144 3145 3146
	if (ret)
		return ret;

3147 3148 3149
	if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
		return 0;

3150 3151 3152 3153 3154 3155 3156 3157
	/* Flush and acquire obj->pages so that we are coherent through
	 * direct access in memory with previous cached writes through
	 * shmemfs and that our cache domain tracking remains valid.
	 * For example, if the obj->filp was moved to swap without us
	 * being notified and releasing the pages, we would mistakenly
	 * continue to assume that the obj remained out of the CPU cached
	 * domain.
	 */
C
Chris Wilson 已提交
3158
	ret = i915_gem_object_pin_pages(obj);
3159 3160 3161
	if (ret)
		return ret;

3162
	i915_gem_object_flush_cpu_write_domain(obj);
C
Chris Wilson 已提交
3163

3164 3165 3166 3167 3168 3169 3170
	/* Serialise direct access to this object with the barriers for
	 * coherent writes from the GPU, by effectively invalidating the
	 * GTT domain upon first access.
	 */
	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
		mb();

3171 3172
	old_write_domain = obj->base.write_domain;
	old_read_domains = obj->base.read_domains;
C
Chris Wilson 已提交
3173

3174 3175 3176
	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3177
	GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3178
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3179
	if (write) {
3180 3181
		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
C
Chris Wilson 已提交
3182
		obj->mm.dirty = true;
3183 3184
	}

C
Chris Wilson 已提交
3185 3186 3187 3188
	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    old_write_domain);

C
Chris Wilson 已提交
3189
	i915_gem_object_unpin_pages(obj);
3190 3191 3192
	return 0;
}

3193 3194
/**
 * Changes the cache-level of an object across all VMA.
3195 3196
 * @obj: object to act on
 * @cache_level: new cache level to set for the object
3197 3198 3199 3200 3201 3202 3203 3204 3205 3206 3207
 *
 * After this function returns, the object will be in the new cache-level
 * across all GTT and the contents of the backing storage will be coherent,
 * with respect to the new cache-level. In order to keep the backing storage
 * coherent for all users, we only allow a single cache level to be set
 * globally on the object and prevent it from being changed whilst the
 * hardware is reading from the object. That is if the object is currently
 * on the scanout it will be set to uncached (or equivalent display
 * cache coherency) and all non-MOCS GPU access will also be uncached so
 * that all direct access to the scanout remains coherent.
 */
3208 3209 3210
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
				    enum i915_cache_level cache_level)
{
3211
	struct i915_vma *vma;
3212
	int ret;
3213

3214 3215
	lockdep_assert_held(&obj->base.dev->struct_mutex);

3216
	if (obj->cache_level == cache_level)
3217
		return 0;
3218

3219 3220 3221 3222 3223
	/* Inspect the list of currently bound VMA and unbind any that would
	 * be invalid given the new cache-level. This is principally to
	 * catch the issue of the CS prefetch crossing page boundaries and
	 * reading an invalid PTE on older architectures.
	 */
3224 3225
restart:
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
3226 3227 3228
		if (!drm_mm_node_allocated(&vma->node))
			continue;

3229
		if (i915_vma_is_pinned(vma)) {
3230 3231 3232 3233
			DRM_DEBUG("can not change the cache level of pinned objects\n");
			return -EBUSY;
		}

3234 3235 3236 3237 3238 3239 3240 3241 3242 3243 3244 3245
		if (i915_gem_valid_gtt_space(vma, cache_level))
			continue;

		ret = i915_vma_unbind(vma);
		if (ret)
			return ret;

		/* As unbinding may affect other elements in the
		 * obj->vma_list (due to side-effects from retiring
		 * an active vma), play safe and restart the iterator.
		 */
		goto restart;
3246 3247
	}

3248 3249 3250 3251 3252 3253 3254
	/* We can reuse the existing drm_mm nodes but need to change the
	 * cache-level on the PTE. We could simply unbind them all and
	 * rebind with the correct cache-level on next use. However since
	 * we already have a valid slot, dma mapping, pages etc, we may as
	 * rewrite the PTE in the belief that doing so tramples upon less
	 * state and so involves less work.
	 */
3255
	if (obj->bind_count) {
3256 3257 3258 3259
		/* Before we change the PTE, the GPU must not be accessing it.
		 * If we wait upon the object, we know that all the bound
		 * VMA are no longer active.
		 */
3260 3261 3262 3263 3264 3265
		ret = i915_gem_object_wait(obj,
					   I915_WAIT_INTERRUPTIBLE |
					   I915_WAIT_LOCKED |
					   I915_WAIT_ALL,
					   MAX_SCHEDULE_TIMEOUT,
					   NULL);
3266 3267 3268
		if (ret)
			return ret;

3269 3270
		if (!HAS_LLC(to_i915(obj->base.dev)) &&
		    cache_level != I915_CACHE_NONE) {
3271 3272 3273 3274 3275 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286
			/* Access to snoopable pages through the GTT is
			 * incoherent and on some machines causes a hard
			 * lockup. Relinquish the CPU mmaping to force
			 * userspace to refault in the pages and we can
			 * then double check if the GTT mapping is still
			 * valid for that pointer access.
			 */
			i915_gem_release_mmap(obj);

			/* As we no longer need a fence for GTT access,
			 * we can relinquish it now (and so prevent having
			 * to steal a fence from someone else on the next
			 * fence request). Note GPU activity would have
			 * dropped the fence as all snoopable access is
			 * supposed to be linear.
			 */
3287 3288 3289 3290 3291
			list_for_each_entry(vma, &obj->vma_list, obj_link) {
				ret = i915_vma_put_fence(vma);
				if (ret)
					return ret;
			}
3292 3293 3294 3295 3296 3297 3298 3299
		} else {
			/* We either have incoherent backing store and
			 * so no GTT access or the architecture is fully
			 * coherent. In such cases, existing GTT mmaps
			 * ignore the cache bit in the PTE and we can
			 * rewrite it without confusing the GPU or having
			 * to force userspace to fault back in its mmaps.
			 */
3300 3301
		}

3302
		list_for_each_entry(vma, &obj->vma_list, obj_link) {
3303 3304 3305 3306 3307 3308 3309
			if (!drm_mm_node_allocated(&vma->node))
				continue;

			ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
			if (ret)
				return ret;
		}
3310 3311
	}

3312 3313 3314 3315
	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU &&
	    cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
		obj->cache_dirty = true;

3316
	list_for_each_entry(vma, &obj->vma_list, obj_link)
3317 3318 3319
		vma->node.color = cache_level;
	obj->cache_level = cache_level;

3320 3321 3322
	return 0;
}

B
Ben Widawsky 已提交
3323 3324
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file)
3325
{
B
Ben Widawsky 已提交
3326
	struct drm_i915_gem_caching *args = data;
3327
	struct drm_i915_gem_object *obj;
3328
	int err = 0;
3329

3330 3331 3332 3333 3334 3335
	rcu_read_lock();
	obj = i915_gem_object_lookup_rcu(file, args->handle);
	if (!obj) {
		err = -ENOENT;
		goto out;
	}
3336

3337 3338 3339 3340 3341 3342
	switch (obj->cache_level) {
	case I915_CACHE_LLC:
	case I915_CACHE_L3_LLC:
		args->caching = I915_CACHING_CACHED;
		break;

3343 3344 3345 3346
	case I915_CACHE_WT:
		args->caching = I915_CACHING_DISPLAY;
		break;

3347 3348 3349 3350
	default:
		args->caching = I915_CACHING_NONE;
		break;
	}
3351 3352 3353
out:
	rcu_read_unlock();
	return err;
3354 3355
}

B
Ben Widawsky 已提交
3356 3357
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file)
3358
{
3359
	struct drm_i915_private *i915 = to_i915(dev);
B
Ben Widawsky 已提交
3360
	struct drm_i915_gem_caching *args = data;
3361 3362 3363 3364
	struct drm_i915_gem_object *obj;
	enum i915_cache_level level;
	int ret;

B
Ben Widawsky 已提交
3365 3366
	switch (args->caching) {
	case I915_CACHING_NONE:
3367 3368
		level = I915_CACHE_NONE;
		break;
B
Ben Widawsky 已提交
3369
	case I915_CACHING_CACHED:
3370 3371 3372 3373 3374 3375
		/*
		 * Due to a HW issue on BXT A stepping, GPU stores via a
		 * snooped mapping may leave stale data in a corresponding CPU
		 * cacheline, whereas normally such cachelines would get
		 * invalidated.
		 */
3376
		if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
3377 3378
			return -ENODEV;

3379 3380
		level = I915_CACHE_LLC;
		break;
3381
	case I915_CACHING_DISPLAY:
3382
		level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
3383
		break;
3384 3385 3386 3387
	default:
		return -EINVAL;
	}

B
Ben Widawsky 已提交
3388 3389
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
3390
		return ret;
B
Ben Widawsky 已提交
3391

3392 3393
	obj = i915_gem_object_lookup(file, args->handle);
	if (!obj) {
3394 3395 3396 3397 3398
		ret = -ENOENT;
		goto unlock;
	}

	ret = i915_gem_object_set_cache_level(obj, level);
3399
	i915_gem_object_put(obj);
3400 3401 3402 3403 3404
unlock:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

3405
/*
3406 3407 3408
 * Prepare buffer for display plane (scanout, cursors, etc).
 * Can be called from an uninterruptible phase (modesetting) and allows
 * any flushes to be pipelined (for pageflips).
3409
 */
C
Chris Wilson 已提交
3410
struct i915_vma *
3411 3412
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
				     u32 alignment,
3413
				     const struct i915_ggtt_view *view)
3414
{
C
Chris Wilson 已提交
3415
	struct i915_vma *vma;
3416
	u32 old_read_domains, old_write_domain;
3417 3418
	int ret;

3419 3420
	lockdep_assert_held(&obj->base.dev->struct_mutex);

3421 3422 3423
	/* Mark the pin_display early so that we account for the
	 * display coherency whilst setting up the cache domains.
	 */
3424
	obj->pin_display++;
3425

3426 3427 3428 3429 3430 3431 3432 3433 3434
	/* The display engine is not coherent with the LLC cache on gen6.  As
	 * a result, we make sure that the pinning that is about to occur is
	 * done with uncached PTEs. This is lowest common denominator for all
	 * chipsets.
	 *
	 * However for gen6+, we could do better by using the GFDT bit instead
	 * of uncaching, which would allow us to flush all the LLC-cached data
	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
	 */
3435
	ret = i915_gem_object_set_cache_level(obj,
3436 3437
					      HAS_WT(to_i915(obj->base.dev)) ?
					      I915_CACHE_WT : I915_CACHE_NONE);
C
Chris Wilson 已提交
3438 3439
	if (ret) {
		vma = ERR_PTR(ret);
3440
		goto err_unpin_display;
C
Chris Wilson 已提交
3441
	}
3442

3443 3444
	/* As the user may map the buffer once pinned in the display plane
	 * (e.g. libkms for the bootup splash), we have to ensure that we
3445 3446 3447 3448
	 * always use map_and_fenceable for all scanout buffers. However,
	 * it may simply be too big to fit into mappable, in which case
	 * put it anyway and hope that userspace can cope (but always first
	 * try to preserve the existing ABI).
3449
	 */
3450 3451 3452 3453
	vma = ERR_PTR(-ENOSPC);
	if (view->type == I915_GGTT_VIEW_NORMAL)
		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
					       PIN_MAPPABLE | PIN_NONBLOCK);
3454 3455 3456 3457 3458 3459 3460 3461 3462 3463 3464 3465 3466 3467 3468 3469
	if (IS_ERR(vma)) {
		struct drm_i915_private *i915 = to_i915(obj->base.dev);
		unsigned int flags;

		/* Valleyview is definitely limited to scanning out the first
		 * 512MiB. Lets presume this behaviour was inherited from the
		 * g4x display engine and that all earlier gen are similarly
		 * limited. Testing suggests that it is a little more
		 * complicated than this. For example, Cherryview appears quite
		 * happy to scanout from anywhere within its global aperture.
		 */
		flags = 0;
		if (HAS_GMCH_DISPLAY(i915))
			flags = PIN_MAPPABLE;
		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
	}
C
Chris Wilson 已提交
3470
	if (IS_ERR(vma))
3471
		goto err_unpin_display;
3472

3473 3474
	vma->display_alignment = max_t(u64, vma->display_alignment, alignment);

3475 3476 3477 3478 3479
	/* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
	if (obj->cache_dirty) {
		i915_gem_clflush_object(obj, true);
		intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
	}
3480

3481
	old_write_domain = obj->base.write_domain;
3482
	old_read_domains = obj->base.read_domains;
3483 3484 3485 3486

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3487
	obj->base.write_domain = 0;
3488
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3489 3490 3491

	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
3492
					    old_write_domain);
3493

C
Chris Wilson 已提交
3494
	return vma;
3495 3496

err_unpin_display:
3497
	obj->pin_display--;
C
Chris Wilson 已提交
3498
	return vma;
3499 3500 3501
}

void
C
Chris Wilson 已提交
3502
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
3503
{
3504 3505
	lockdep_assert_held(&vma->vm->dev->struct_mutex);

C
Chris Wilson 已提交
3506
	if (WARN_ON(vma->obj->pin_display == 0))
3507 3508
		return;

3509 3510
	if (--vma->obj->pin_display == 0)
		vma->display_alignment = 0;
3511

3512 3513 3514 3515
	/* Bump the LRU to try and avoid premature eviction whilst flipping  */
	if (!i915_vma_is_active(vma))
		list_move_tail(&vma->vm_link, &vma->vm->inactive_list);

C
Chris Wilson 已提交
3516
	i915_vma_unpin(vma);
3517 3518
}

3519 3520
/**
 * Moves a single object to the CPU read, and possibly write domain.
3521 3522
 * @obj: object to act on
 * @write: requesting write or read-only access
3523 3524 3525 3526
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
3527
int
3528
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3529
{
C
Chris Wilson 已提交
3530
	uint32_t old_write_domain, old_read_domains;
3531 3532
	int ret;

3533
	lockdep_assert_held(&obj->base.dev->struct_mutex);
3534

3535 3536 3537 3538 3539 3540
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   (write ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
3541 3542 3543
	if (ret)
		return ret;

3544 3545 3546
	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
		return 0;

3547
	i915_gem_object_flush_gtt_write_domain(obj);
3548

3549 3550
	old_write_domain = obj->base.write_domain;
	old_read_domains = obj->base.read_domains;
C
Chris Wilson 已提交
3551

3552
	/* Flush the CPU cache if it's still invalid. */
3553
	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3554
		i915_gem_clflush_object(obj, false);
3555

3556
		obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3557 3558 3559 3560 3561
	}

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3562
	GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3563 3564 3565 3566 3567

	/* If we're writing through the CPU, then the GPU read domains will
	 * need to be invalidated at next use.
	 */
	if (write) {
3568 3569
		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3570
	}
3571

C
Chris Wilson 已提交
3572 3573 3574 3575
	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    old_write_domain);

3576 3577 3578
	return 0;
}

3579 3580 3581
/* Throttle our rendering by waiting until the ring has completed our requests
 * emitted over 20 msec ago.
 *
3582 3583 3584 3585
 * Note that if we were to use the current jiffies each time around the loop,
 * we wouldn't escape the function with any frames outstanding if the time to
 * render a frame was over 20ms.
 *
3586 3587 3588
 * This should get us reasonable parallelism between CPU and GPU but also
 * relatively low latency when blocking on a particular request to finish.
 */
3589
static int
3590
i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3591
{
3592
	struct drm_i915_private *dev_priv = to_i915(dev);
3593
	struct drm_i915_file_private *file_priv = file->driver_priv;
3594
	unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
3595
	struct drm_i915_gem_request *request, *target = NULL;
3596
	long ret;
3597

3598 3599 3600
	/* ABI: return -EIO if already wedged */
	if (i915_terminally_wedged(&dev_priv->gpu_error))
		return -EIO;
3601

3602
	spin_lock(&file_priv->mm.lock);
3603
	list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3604 3605
		if (time_after_eq(request->emitted_jiffies, recent_enough))
			break;
3606

3607 3608 3609 3610 3611 3612 3613
		/*
		 * Note that the request might not have been submitted yet.
		 * In which case emitted_jiffies will be zero.
		 */
		if (!request->emitted_jiffies)
			continue;

3614
		target = request;
3615
	}
3616
	if (target)
3617
		i915_gem_request_get(target);
3618
	spin_unlock(&file_priv->mm.lock);
3619

3620
	if (target == NULL)
3621
		return 0;
3622

3623 3624 3625
	ret = i915_wait_request(target,
				I915_WAIT_INTERRUPTIBLE,
				MAX_SCHEDULE_TIMEOUT);
3626
	i915_gem_request_put(target);
3627

3628
	return ret < 0 ? ret : 0;
3629 3630
}

C
Chris Wilson 已提交
3631
struct i915_vma *
3632 3633
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
			 const struct i915_ggtt_view *view,
3634
			 u64 size,
3635 3636
			 u64 alignment,
			 u64 flags)
3637
{
3638 3639
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
	struct i915_address_space *vm = &dev_priv->ggtt.base;
3640 3641
	struct i915_vma *vma;
	int ret;
3642

3643 3644
	lockdep_assert_held(&obj->base.dev->struct_mutex);

C
Chris Wilson 已提交
3645
	vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
3646
	if (IS_ERR(vma))
C
Chris Wilson 已提交
3647
		return vma;
3648 3649 3650 3651

	if (i915_vma_misplaced(vma, size, alignment, flags)) {
		if (flags & PIN_NONBLOCK &&
		    (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
C
Chris Wilson 已提交
3652
			return ERR_PTR(-ENOSPC);
3653

3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677 3678 3679 3680 3681 3682 3683 3684 3685 3686 3687 3688
		if (flags & PIN_MAPPABLE) {
			u32 fence_size;

			fence_size = i915_gem_get_ggtt_size(dev_priv, vma->size,
							    i915_gem_object_get_tiling(obj));
			/* If the required space is larger than the available
			 * aperture, we will not able to find a slot for the
			 * object and unbinding the object now will be in
			 * vain. Worse, doing so may cause us to ping-pong
			 * the object in and out of the Global GTT and
			 * waste a lot of cycles under the mutex.
			 */
			if (fence_size > dev_priv->ggtt.mappable_end)
				return ERR_PTR(-E2BIG);

			/* If NONBLOCK is set the caller is optimistically
			 * trying to cache the full object within the mappable
			 * aperture, and *must* have a fallback in place for
			 * situations where we cannot bind the object. We
			 * can be a little more lax here and use the fallback
			 * more often to avoid costly migrations of ourselves
			 * and other objects within the aperture.
			 *
			 * Half-the-aperture is used as a simple heuristic.
			 * More interesting would to do search for a free
			 * block prior to making the commitment to unbind.
			 * That caters for the self-harm case, and with a
			 * little more heuristics (e.g. NOFAULT, NOEVICT)
			 * we could try to minimise harm to others.
			 */
			if (flags & PIN_NONBLOCK &&
			    fence_size > dev_priv->ggtt.mappable_end / 2)
				return ERR_PTR(-ENOSPC);
		}

3689 3690
		WARN(i915_vma_is_pinned(vma),
		     "bo is already pinned in ggtt with incorrect alignment:"
3691 3692 3693
		     " offset=%08x, req.alignment=%llx,"
		     " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
		     i915_ggtt_offset(vma), alignment,
3694
		     !!(flags & PIN_MAPPABLE),
3695
		     i915_vma_is_map_and_fenceable(vma));
3696 3697
		ret = i915_vma_unbind(vma);
		if (ret)
C
Chris Wilson 已提交
3698
			return ERR_PTR(ret);
3699 3700
	}

C
Chris Wilson 已提交
3701 3702 3703
	ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
	if (ret)
		return ERR_PTR(ret);
3704

C
Chris Wilson 已提交
3705
	return vma;
3706 3707
}

3708
static __always_inline unsigned int __busy_read_flag(unsigned int id)
3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722
{
	/* Note that we could alias engines in the execbuf API, but
	 * that would be very unwise as it prevents userspace from
	 * fine control over engine selection. Ahem.
	 *
	 * This should be something like EXEC_MAX_ENGINE instead of
	 * I915_NUM_ENGINES.
	 */
	BUILD_BUG_ON(I915_NUM_ENGINES > 16);
	return 0x10000 << id;
}

static __always_inline unsigned int __busy_write_id(unsigned int id)
{
3723 3724 3725 3726 3727 3728 3729 3730 3731
	/* The uABI guarantees an active writer is also amongst the read
	 * engines. This would be true if we accessed the activity tracking
	 * under the lock, but as we perform the lookup of the object and
	 * its activity locklessly we can not guarantee that the last_write
	 * being active implies that we have set the same engine flag from
	 * last_read - hence we always set both read and write busy for
	 * last_write.
	 */
	return id | __busy_read_flag(id);
3732 3733
}

3734
static __always_inline unsigned int
3735
__busy_set_if_active(const struct dma_fence *fence,
3736 3737
		     unsigned int (*flag)(unsigned int id))
{
3738
	struct drm_i915_gem_request *rq;
3739

3740 3741 3742 3743
	/* We have to check the current hw status of the fence as the uABI
	 * guarantees forward progress. We could rely on the idle worker
	 * to eventually flush us, but to minimise latency just ask the
	 * hardware.
3744
	 *
3745
	 * Note we only report on the status of native fences.
3746
	 */
3747 3748 3749 3750 3751 3752 3753 3754 3755
	if (!dma_fence_is_i915(fence))
		return 0;

	/* opencode to_request() in order to avoid const warnings */
	rq = container_of(fence, struct drm_i915_gem_request, fence);
	if (i915_gem_request_completed(rq))
		return 0;

	return flag(rq->engine->exec_id);
3756 3757
}

3758
static __always_inline unsigned int
3759
busy_check_reader(const struct dma_fence *fence)
3760
{
3761
	return __busy_set_if_active(fence, __busy_read_flag);
3762 3763
}

3764
static __always_inline unsigned int
3765
busy_check_writer(const struct dma_fence *fence)
3766
{
3767 3768 3769 3770
	if (!fence)
		return 0;

	return __busy_set_if_active(fence, __busy_write_id);
3771 3772
}

3773 3774
int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3775
		    struct drm_file *file)
3776 3777
{
	struct drm_i915_gem_busy *args = data;
3778
	struct drm_i915_gem_object *obj;
3779 3780
	struct reservation_object_list *list;
	unsigned int seq;
3781
	int err;
3782

3783
	err = -ENOENT;
3784 3785
	rcu_read_lock();
	obj = i915_gem_object_lookup_rcu(file, args->handle);
3786
	if (!obj)
3787
		goto out;
3788

3789 3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800 3801 3802 3803 3804 3805 3806
	/* A discrepancy here is that we do not report the status of
	 * non-i915 fences, i.e. even though we may report the object as idle,
	 * a call to set-domain may still stall waiting for foreign rendering.
	 * This also means that wait-ioctl may report an object as busy,
	 * where busy-ioctl considers it idle.
	 *
	 * We trade the ability to warn of foreign fences to report on which
	 * i915 engines are active for the object.
	 *
	 * Alternatively, we can trade that extra information on read/write
	 * activity with
	 *	args->busy =
	 *		!reservation_object_test_signaled_rcu(obj->resv, true);
	 * to report the overall busyness. This is what the wait-ioctl does.
	 *
	 */
retry:
	seq = raw_read_seqcount(&obj->resv->seq);
3807

3808 3809
	/* Translate the exclusive fence to the READ *and* WRITE engine */
	args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
3810

3811 3812 3813 3814
	/* Translate shared fences to READ set of engines */
	list = rcu_dereference(obj->resv->fence);
	if (list) {
		unsigned int shared_count = list->shared_count, i;
3815

3816 3817 3818 3819 3820 3821
		for (i = 0; i < shared_count; ++i) {
			struct dma_fence *fence =
				rcu_dereference(list->shared[i]);

			args->busy |= busy_check_reader(fence);
		}
3822
	}
3823

3824 3825 3826 3827
	if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
		goto retry;

	err = 0;
3828 3829 3830
out:
	rcu_read_unlock();
	return err;
3831 3832 3833 3834 3835 3836
}

int
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file_priv)
{
3837
	return i915_gem_ring_throttle(dev, file_priv);
3838 3839
}

3840 3841 3842 3843
int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
3844
	struct drm_i915_private *dev_priv = to_i915(dev);
3845
	struct drm_i915_gem_madvise *args = data;
3846
	struct drm_i915_gem_object *obj;
3847
	int err;
3848 3849 3850 3851 3852 3853 3854 3855 3856

	switch (args->madv) {
	case I915_MADV_DONTNEED:
	case I915_MADV_WILLNEED:
	    break;
	default:
	    return -EINVAL;
	}

3857
	obj = i915_gem_object_lookup(file_priv, args->handle);
3858 3859 3860 3861 3862 3863
	if (!obj)
		return -ENOENT;

	err = mutex_lock_interruptible(&obj->mm.lock);
	if (err)
		goto out;
3864

C
Chris Wilson 已提交
3865
	if (obj->mm.pages &&
3866
	    i915_gem_object_is_tiled(obj) &&
3867
	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
3868 3869
		if (obj->mm.madv == I915_MADV_WILLNEED) {
			GEM_BUG_ON(!obj->mm.quirked);
C
Chris Wilson 已提交
3870
			__i915_gem_object_unpin_pages(obj);
3871 3872 3873
			obj->mm.quirked = false;
		}
		if (args->madv == I915_MADV_WILLNEED) {
3874
			GEM_BUG_ON(obj->mm.quirked);
C
Chris Wilson 已提交
3875
			__i915_gem_object_pin_pages(obj);
3876 3877
			obj->mm.quirked = true;
		}
3878 3879
	}

C
Chris Wilson 已提交
3880 3881
	if (obj->mm.madv != __I915_MADV_PURGED)
		obj->mm.madv = args->madv;
3882

C
Chris Wilson 已提交
3883
	/* if the object is no longer attached, discard its backing storage */
C
Chris Wilson 已提交
3884
	if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
3885 3886
		i915_gem_object_truncate(obj);

C
Chris Wilson 已提交
3887
	args->retained = obj->mm.madv != __I915_MADV_PURGED;
3888
	mutex_unlock(&obj->mm.lock);
C
Chris Wilson 已提交
3889

3890
out:
3891
	i915_gem_object_put(obj);
3892
	return err;
3893 3894
}

3895 3896 3897 3898 3899 3900 3901 3902 3903 3904
static void
frontbuffer_retire(struct i915_gem_active *active,
		   struct drm_i915_gem_request *request)
{
	struct drm_i915_gem_object *obj =
		container_of(active, typeof(*obj), frontbuffer_write);

	intel_fb_obj_flush(obj, true, ORIGIN_CS);
}

3905 3906
void i915_gem_object_init(struct drm_i915_gem_object *obj,
			  const struct drm_i915_gem_object_ops *ops)
3907
{
3908 3909
	mutex_init(&obj->mm.lock);

3910
	INIT_LIST_HEAD(&obj->global_link);
3911
	INIT_LIST_HEAD(&obj->userfault_link);
3912
	INIT_LIST_HEAD(&obj->obj_exec_link);
B
Ben Widawsky 已提交
3913
	INIT_LIST_HEAD(&obj->vma_list);
3914
	INIT_LIST_HEAD(&obj->batch_pool_link);
3915

3916 3917
	obj->ops = ops;

3918 3919 3920
	reservation_object_init(&obj->__builtin_resv);
	obj->resv = &obj->__builtin_resv;

3921
	obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
3922
	init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
C
Chris Wilson 已提交
3923 3924 3925 3926

	obj->mm.madv = I915_MADV_WILLNEED;
	INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
	mutex_init(&obj->mm.get_page.lock);
3927

3928
	i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
3929 3930
}

3931
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3932 3933
	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
		 I915_GEM_OBJECT_IS_SHRINKABLE,
3934 3935 3936 3937
	.get_pages = i915_gem_object_get_pages_gtt,
	.put_pages = i915_gem_object_put_pages_gtt,
};

3938 3939 3940 3941 3942 3943
/* Note we don't consider signbits :| */
#define overflows_type(x, T) \
	(sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))

struct drm_i915_gem_object *
i915_gem_object_create(struct drm_device *dev, u64 size)
3944
{
3945
	struct drm_i915_private *dev_priv = to_i915(dev);
3946
	struct drm_i915_gem_object *obj;
3947
	struct address_space *mapping;
D
Daniel Vetter 已提交
3948
	gfp_t mask;
3949
	int ret;
3950

3951 3952 3953 3954 3955 3956 3957 3958 3959 3960 3961
	/* There is a prevalence of the assumption that we fit the object's
	 * page count inside a 32bit _signed_ variable. Let's document this and
	 * catch if we ever need to fix it. In the meantime, if you do spot
	 * such a local variable, please consider fixing!
	 */
	if (WARN_ON(size >> PAGE_SHIFT > INT_MAX))
		return ERR_PTR(-E2BIG);

	if (overflows_type(size, obj->base.size))
		return ERR_PTR(-E2BIG);

3962
	obj = i915_gem_object_alloc(dev);
3963
	if (obj == NULL)
3964
		return ERR_PTR(-ENOMEM);
3965

3966 3967 3968
	ret = drm_gem_object_init(dev, &obj->base, size);
	if (ret)
		goto fail;
3969

3970
	mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
3971
	if (IS_CRESTLINE(dev_priv) || IS_BROADWATER(dev_priv)) {
3972 3973 3974 3975 3976
		/* 965gm cannot relocate objects above 4GiB. */
		mask &= ~__GFP_HIGHMEM;
		mask |= __GFP_DMA32;
	}

3977
	mapping = obj->base.filp->f_mapping;
3978
	mapping_set_gfp_mask(mapping, mask);
3979

3980
	i915_gem_object_init(obj, &i915_gem_object_ops);
3981

3982 3983
	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3984

3985
	if (HAS_LLC(dev_priv)) {
3986
		/* On some devices, we can have the GPU use the LLC (the CPU
3987 3988 3989 3990 3991 3992 3993 3994 3995 3996 3997 3998 3999 4000 4001
		 * cache) for about a 10% performance improvement
		 * compared to uncached.  Graphics requests other than
		 * display scanout are coherent with the CPU in
		 * accessing this cache.  This means in this mode we
		 * don't need to clflush on the CPU side, and on the
		 * GPU side we only need to flush internal caches to
		 * get data visible to the CPU.
		 *
		 * However, we maintain the display planes as UC, and so
		 * need to rebind when first used as such.
		 */
		obj->cache_level = I915_CACHE_LLC;
	} else
		obj->cache_level = I915_CACHE_NONE;

4002 4003
	trace_i915_gem_object_create(obj);

4004
	return obj;
4005 4006 4007 4008

fail:
	i915_gem_object_free(obj);
	return ERR_PTR(ret);
4009 4010
}

4011 4012 4013 4014 4015 4016 4017 4018
static bool discard_backing_storage(struct drm_i915_gem_object *obj)
{
	/* If we are the last user of the backing storage (be it shmemfs
	 * pages or stolen etc), we know that the pages are going to be
	 * immediately released. In this case, we can then skip copying
	 * back the contents from the GPU.
	 */

C
Chris Wilson 已提交
4019
	if (obj->mm.madv != I915_MADV_WILLNEED)
4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034
		return false;

	if (obj->base.filp == NULL)
		return true;

	/* At first glance, this looks racy, but then again so would be
	 * userspace racing mmap against close. However, the first external
	 * reference to the filp can only be obtained through the
	 * i915_gem_mmap_ioctl() which safeguards us against the user
	 * acquiring such a reference whilst we are in the middle of
	 * freeing the object.
	 */
	return atomic_long_read(&obj->base.filp->f_count) == 1;
}

4035 4036
static void __i915_gem_free_objects(struct drm_i915_private *i915,
				    struct llist_node *freed)
4037
{
4038
	struct drm_i915_gem_object *obj, *on;
4039

4040 4041 4042 4043 4044 4045 4046 4047 4048 4049 4050 4051 4052 4053 4054
	mutex_lock(&i915->drm.struct_mutex);
	intel_runtime_pm_get(i915);
	llist_for_each_entry(obj, freed, freed) {
		struct i915_vma *vma, *vn;

		trace_i915_gem_object_destroy(obj);

		GEM_BUG_ON(i915_gem_object_is_active(obj));
		list_for_each_entry_safe(vma, vn,
					 &obj->vma_list, obj_link) {
			GEM_BUG_ON(!i915_vma_is_ggtt(vma));
			GEM_BUG_ON(i915_vma_is_active(vma));
			vma->flags &= ~I915_VMA_PIN_MASK;
			i915_vma_close(vma);
		}
4055 4056
		GEM_BUG_ON(!list_empty(&obj->vma_list));
		GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
4057

4058
		list_del(&obj->global_link);
4059 4060 4061 4062 4063 4064 4065 4066 4067 4068
	}
	intel_runtime_pm_put(i915);
	mutex_unlock(&i915->drm.struct_mutex);

	llist_for_each_entry_safe(obj, on, freed, freed) {
		GEM_BUG_ON(obj->bind_count);
		GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));

		if (obj->ops->release)
			obj->ops->release(obj);
4069

4070 4071
		if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
			atomic_set(&obj->mm.pages_pin_count, 0);
4072
		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
4073 4074 4075 4076 4077
		GEM_BUG_ON(obj->mm.pages);

		if (obj->base.import_attach)
			drm_prime_gem_destroy(&obj->base, NULL);

4078
		reservation_object_fini(&obj->__builtin_resv);
4079 4080 4081 4082 4083 4084 4085 4086 4087 4088 4089 4090 4091 4092 4093 4094 4095 4096 4097 4098 4099 4100
		drm_gem_object_release(&obj->base);
		i915_gem_info_remove_obj(i915, obj->base.size);

		kfree(obj->bit_17);
		i915_gem_object_free(obj);
	}
}

static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
{
	struct llist_node *freed;

	freed = llist_del_all(&i915->mm.free_list);
	if (unlikely(freed))
		__i915_gem_free_objects(i915, freed);
}

static void __i915_gem_free_work(struct work_struct *work)
{
	struct drm_i915_private *i915 =
		container_of(work, struct drm_i915_private, mm.free_work);
	struct llist_node *freed;
4101

4102 4103 4104 4105 4106 4107 4108
	/* All file-owned VMA should have been released by this point through
	 * i915_gem_close_object(), or earlier by i915_gem_context_close().
	 * However, the object may also be bound into the global GTT (e.g.
	 * older GPUs without per-process support, or for direct access through
	 * the GTT either for the user or for scanout). Those VMA still need to
	 * unbound now.
	 */
4109

4110 4111 4112
	while ((freed = llist_del_all(&i915->mm.free_list)))
		__i915_gem_free_objects(i915, freed);
}
4113

4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127
static void __i915_gem_free_object_rcu(struct rcu_head *head)
{
	struct drm_i915_gem_object *obj =
		container_of(head, typeof(*obj), rcu);
	struct drm_i915_private *i915 = to_i915(obj->base.dev);

	/* We can't simply use call_rcu() from i915_gem_free_object()
	 * as we need to block whilst unbinding, and the call_rcu
	 * task may be called from softirq context. So we take a
	 * detour through a worker.
	 */
	if (llist_add(&obj->freed, &i915->mm.free_list))
		schedule_work(&i915->mm.free_work);
}
4128

4129 4130 4131
void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
C
Chris Wilson 已提交
4132

4133 4134 4135
	if (obj->mm.quirked)
		__i915_gem_object_unpin_pages(obj);

4136
	if (discard_backing_storage(obj))
C
Chris Wilson 已提交
4137
		obj->mm.madv = I915_MADV_DONTNEED;
4138

4139 4140 4141 4142 4143 4144
	/* Before we free the object, make sure any pure RCU-only
	 * read-side critical sections are complete, e.g.
	 * i915_gem_busy_ioctl(). For the corresponding synchronized
	 * lookup see i915_gem_object_lookup_rcu().
	 */
	call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
4145 4146
}

4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157
void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
{
	lockdep_assert_held(&obj->base.dev->struct_mutex);

	GEM_BUG_ON(i915_gem_object_has_active_reference(obj));
	if (i915_gem_object_is_active(obj))
		i915_gem_object_set_active_reference(obj);
	else
		i915_gem_object_put(obj);
}

4158 4159 4160 4161 4162 4163 4164 4165 4166
static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	for_each_engine(engine, dev_priv, id)
		GEM_BUG_ON(engine->last_context != dev_priv->kernel_context);
}

4167
int i915_gem_suspend(struct drm_device *dev)
4168
{
4169
	struct drm_i915_private *dev_priv = to_i915(dev);
4170
	int ret;
4171

4172 4173
	intel_suspend_gt_powersave(dev_priv);

4174
	mutex_lock(&dev->struct_mutex);
4175 4176 4177 4178 4179 4180 4181 4182 4183 4184 4185 4186 4187

	/* We have to flush all the executing contexts to main memory so
	 * that they can saved in the hibernation image. To ensure the last
	 * context image is coherent, we have to switch away from it. That
	 * leaves the dev_priv->kernel_context still active when
	 * we actually suspend, and its image in memory may not match the GPU
	 * state. Fortunately, the kernel_context is disposable and we do
	 * not rely on its state.
	 */
	ret = i915_gem_switch_to_kernel_context(dev_priv);
	if (ret)
		goto err;

4188 4189 4190
	ret = i915_gem_wait_for_idle(dev_priv,
				     I915_WAIT_INTERRUPTIBLE |
				     I915_WAIT_LOCKED);
4191
	if (ret)
4192
		goto err;
4193

4194
	i915_gem_retire_requests(dev_priv);
4195
	GEM_BUG_ON(dev_priv->gt.active_requests);
4196

4197
	assert_kernel_context_is_current(dev_priv);
4198
	i915_gem_context_lost(dev_priv);
4199 4200
	mutex_unlock(&dev->struct_mutex);

4201
	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4202 4203
	cancel_delayed_work_sync(&dev_priv->gt.retire_work);
	flush_delayed_work(&dev_priv->gt.idle_work);
4204
	flush_work(&dev_priv->mm.free_work);
4205

4206 4207 4208
	/* Assert that we sucessfully flushed all the work and
	 * reset the GPU back to its idle, low power state.
	 */
4209
	WARN_ON(dev_priv->gt.awake);
4210
	WARN_ON(!intel_execlists_idle(dev_priv));
4211

4212 4213 4214 4215 4216 4217 4218 4219 4220 4221 4222 4223 4224 4225 4226 4227 4228 4229 4230
	/*
	 * Neither the BIOS, ourselves or any other kernel
	 * expects the system to be in execlists mode on startup,
	 * so we need to reset the GPU back to legacy mode. And the only
	 * known way to disable logical contexts is through a GPU reset.
	 *
	 * So in order to leave the system in a known default configuration,
	 * always reset the GPU upon unload and suspend. Afterwards we then
	 * clean up the GEM state tracking, flushing off the requests and
	 * leaving the system in a known idle state.
	 *
	 * Note that is of the upmost importance that the GPU is idle and
	 * all stray writes are flushed *before* we dismantle the backing
	 * storage for the pinned objects.
	 *
	 * However, since we are uncertain that resetting the GPU on older
	 * machines is a good idea, we don't - just in case it leaves the
	 * machine in an unusable condition.
	 */
4231
	if (HAS_HW_CONTEXTS(dev_priv)) {
4232 4233 4234 4235
		int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
		WARN_ON(reset && reset != -ENODEV);
	}

4236
	return 0;
4237 4238 4239 4240

err:
	mutex_unlock(&dev->struct_mutex);
	return ret;
4241 4242
}

4243 4244 4245 4246
void i915_gem_resume(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = to_i915(dev);

4247 4248
	WARN_ON(dev_priv->gt.awake);

4249
	mutex_lock(&dev->struct_mutex);
4250
	i915_gem_restore_gtt_mappings(dev_priv);
4251 4252 4253 4254 4255

	/* As we didn't flush the kernel context before suspend, we cannot
	 * guarantee that the context image is complete. So let's just reset
	 * it and start again.
	 */
4256
	dev_priv->gt.resume(dev_priv);
4257 4258 4259 4260

	mutex_unlock(&dev->struct_mutex);
}

4261
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
4262
{
4263
	if (INTEL_GEN(dev_priv) < 5 ||
4264 4265 4266 4267 4268 4269
	    dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
		return;

	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
				 DISP_TILE_SURFACE_SWIZZLING);

4270
	if (IS_GEN5(dev_priv))
4271 4272
		return;

4273
	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4274
	if (IS_GEN6(dev_priv))
4275
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4276
	else if (IS_GEN7(dev_priv))
4277
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4278
	else if (IS_GEN8(dev_priv))
B
Ben Widawsky 已提交
4279
		I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4280 4281
	else
		BUG();
4282
}
D
Daniel Vetter 已提交
4283

4284
static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
4285 4286 4287 4288 4289 4290 4291
{
	I915_WRITE(RING_CTL(base), 0);
	I915_WRITE(RING_HEAD(base), 0);
	I915_WRITE(RING_TAIL(base), 0);
	I915_WRITE(RING_START(base), 0);
}

4292
static void init_unused_rings(struct drm_i915_private *dev_priv)
4293
{
4294 4295 4296 4297 4298 4299 4300 4301 4302 4303 4304 4305
	if (IS_I830(dev_priv)) {
		init_unused_ring(dev_priv, PRB1_BASE);
		init_unused_ring(dev_priv, SRB0_BASE);
		init_unused_ring(dev_priv, SRB1_BASE);
		init_unused_ring(dev_priv, SRB2_BASE);
		init_unused_ring(dev_priv, SRB3_BASE);
	} else if (IS_GEN2(dev_priv)) {
		init_unused_ring(dev_priv, SRB0_BASE);
		init_unused_ring(dev_priv, SRB1_BASE);
	} else if (IS_GEN3(dev_priv)) {
		init_unused_ring(dev_priv, PRB1_BASE);
		init_unused_ring(dev_priv, PRB2_BASE);
4306 4307 4308
	}
}

4309 4310 4311
int
i915_gem_init_hw(struct drm_device *dev)
{
4312
	struct drm_i915_private *dev_priv = to_i915(dev);
4313
	struct intel_engine_cs *engine;
4314
	enum intel_engine_id id;
C
Chris Wilson 已提交
4315
	int ret;
4316

4317 4318
	dev_priv->gt.last_init_time = ktime_get();

4319 4320 4321
	/* Double layer security blanket, see i915_gem_init() */
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

4322
	if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
4323
		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4324

4325
	if (IS_HASWELL(dev_priv))
4326
		I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
4327
			   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4328

4329
	if (HAS_PCH_NOP(dev_priv)) {
4330
		if (IS_IVYBRIDGE(dev_priv)) {
4331 4332 4333
			u32 temp = I915_READ(GEN7_MSG_CTL);
			temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
			I915_WRITE(GEN7_MSG_CTL, temp);
4334
		} else if (INTEL_GEN(dev_priv) >= 7) {
4335 4336 4337 4338
			u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
			temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
			I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
		}
4339 4340
	}

4341
	i915_gem_init_swizzling(dev_priv);
4342

4343 4344 4345 4346 4347 4348
	/*
	 * At least 830 can leave some of the unused rings
	 * "active" (ie. head != tail) after resume which
	 * will prevent c3 entry. Makes sure all unused rings
	 * are totally idle.
	 */
4349
	init_unused_rings(dev_priv);
4350

4351
	BUG_ON(!dev_priv->kernel_context);
4352

4353
	ret = i915_ppgtt_init_hw(dev_priv);
4354 4355 4356 4357 4358 4359
	if (ret) {
		DRM_ERROR("PPGTT enable HW failed %d\n", ret);
		goto out;
	}

	/* Need to do basic initialisation of all rings first: */
4360
	for_each_engine(engine, dev_priv, id) {
4361
		ret = engine->init_hw(engine);
D
Daniel Vetter 已提交
4362
		if (ret)
4363
			goto out;
D
Daniel Vetter 已提交
4364
	}
4365

4366 4367
	intel_mocs_init_l3cc_table(dev);

4368
	/* We can't enable contexts until all firmware is loaded */
4369 4370 4371
	ret = intel_guc_setup(dev);
	if (ret)
		goto out;
4372

4373 4374
out:
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4375
	return ret;
4376 4377
}

4378 4379 4380 4381 4382 4383 4384 4385 4386 4387 4388 4389 4390 4391 4392 4393 4394 4395 4396 4397 4398
bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
{
	if (INTEL_INFO(dev_priv)->gen < 6)
		return false;

	/* TODO: make semaphores and Execlists play nicely together */
	if (i915.enable_execlists)
		return false;

	if (value >= 0)
		return value;

#ifdef CONFIG_INTEL_IOMMU
	/* Enable semaphores on SNB when IO remapping is off */
	if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
		return false;
#endif

	return true;
}

4399 4400
int i915_gem_init(struct drm_device *dev)
{
4401
	struct drm_i915_private *dev_priv = to_i915(dev);
4402 4403 4404
	int ret;

	mutex_lock(&dev->struct_mutex);
4405

4406
	if (!i915.enable_execlists) {
4407
		dev_priv->gt.resume = intel_legacy_submission_resume;
4408
		dev_priv->gt.cleanup_engine = intel_engine_cleanup;
4409
	} else {
4410
		dev_priv->gt.resume = intel_lr_context_resume;
4411
		dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
4412 4413
	}

4414 4415 4416 4417 4418 4419 4420 4421
	/* This is just a security blanket to placate dragons.
	 * On some systems, we very sporadically observe that the first TLBs
	 * used by the CS may be stale, despite us poking the TLB reset. If
	 * we hold the forcewake during initialisation these problems
	 * just magically go away.
	 */
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

4422
	i915_gem_init_userptr(dev_priv);
4423 4424 4425 4426

	ret = i915_gem_init_ggtt(dev_priv);
	if (ret)
		goto out_unlock;
4427

4428
	ret = i915_gem_context_init(dev);
4429 4430
	if (ret)
		goto out_unlock;
4431

4432
	ret = intel_engines_init(dev);
D
Daniel Vetter 已提交
4433
	if (ret)
4434
		goto out_unlock;
4435

4436
	ret = i915_gem_init_hw(dev);
4437
	if (ret == -EIO) {
4438
		/* Allow engine initialisation to fail by marking the GPU as
4439 4440 4441 4442
		 * wedged. But we only want to do this where the GPU is angry,
		 * for all other failure, such as an allocation failure, bail.
		 */
		DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4443
		i915_gem_set_wedged(dev_priv);
4444
		ret = 0;
4445
	}
4446 4447

out_unlock:
4448
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4449
	mutex_unlock(&dev->struct_mutex);
4450

4451
	return ret;
4452 4453
}

4454
void
4455
i915_gem_cleanup_engines(struct drm_device *dev)
4456
{
4457
	struct drm_i915_private *dev_priv = to_i915(dev);
4458
	struct intel_engine_cs *engine;
4459
	enum intel_engine_id id;
4460

4461
	for_each_engine(engine, dev_priv, id)
4462
		dev_priv->gt.cleanup_engine(engine);
4463 4464
}

4465 4466 4467
void
i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
4468
	int i;
4469 4470 4471 4472 4473 4474 4475 4476 4477 4478

	if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
	    !IS_CHERRYVIEW(dev_priv))
		dev_priv->num_fence_regs = 32;
	else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
		 IS_I945GM(dev_priv) || IS_G33(dev_priv))
		dev_priv->num_fence_regs = 16;
	else
		dev_priv->num_fence_regs = 8;

4479
	if (intel_vgpu_active(dev_priv))
4480 4481 4482 4483
		dev_priv->num_fence_regs =
				I915_READ(vgtif_reg(avail_rs.fence_num));

	/* Initialize fence registers to zero */
4484 4485 4486 4487 4488 4489 4490
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
		struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];

		fence->i915 = dev_priv;
		fence->id = i;
		list_add_tail(&fence->link, &dev_priv->mm.fence_list);
	}
4491
	i915_gem_restore_fences(dev_priv);
4492

4493
	i915_gem_detect_bit_6_swizzle(dev_priv);
4494 4495
}

4496
int
4497
i915_gem_load_init(struct drm_device *dev)
4498
{
4499
	struct drm_i915_private *dev_priv = to_i915(dev);
4500
	int err = -ENOMEM;
4501

4502 4503
	dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
	if (!dev_priv->objects)
4504 4505
		goto err_out;

4506 4507
	dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
	if (!dev_priv->vmas)
4508 4509
		goto err_objects;

4510 4511 4512 4513 4514
	dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
					SLAB_HWCACHE_ALIGN |
					SLAB_RECLAIM_ACCOUNT |
					SLAB_DESTROY_BY_RCU);
	if (!dev_priv->requests)
4515 4516
		goto err_vmas;

4517 4518 4519 4520 4521 4522
	dev_priv->dependencies = KMEM_CACHE(i915_dependency,
					    SLAB_HWCACHE_ALIGN |
					    SLAB_RECLAIM_ACCOUNT);
	if (!dev_priv->dependencies)
		goto err_requests;

4523 4524
	mutex_lock(&dev_priv->drm.struct_mutex);
	INIT_LIST_HEAD(&dev_priv->gt.timelines);
4525
	err = i915_gem_timeline_init__global(dev_priv);
4526 4527
	mutex_unlock(&dev_priv->drm.struct_mutex);
	if (err)
4528
		goto err_dependencies;
4529

4530
	INIT_LIST_HEAD(&dev_priv->context_list);
4531 4532
	INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
	init_llist_head(&dev_priv->mm.free_list);
C
Chris Wilson 已提交
4533 4534
	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4535
	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4536
	INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
4537
	INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
4538
			  i915_gem_retire_work_handler);
4539
	INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
4540
			  i915_gem_idle_work_handler);
4541
	init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
4542
	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4543

4544 4545
	dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;

4546
	init_waitqueue_head(&dev_priv->pending_flip_queue);
4547

4548 4549
	dev_priv->mm.interruptible = true;

4550 4551
	atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);

4552
	spin_lock_init(&dev_priv->fb_tracking.lock);
4553 4554 4555

	return 0;

4556 4557
err_dependencies:
	kmem_cache_destroy(dev_priv->dependencies);
4558 4559 4560 4561 4562 4563 4564 4565
err_requests:
	kmem_cache_destroy(dev_priv->requests);
err_vmas:
	kmem_cache_destroy(dev_priv->vmas);
err_objects:
	kmem_cache_destroy(dev_priv->objects);
err_out:
	return err;
4566
}
4567

4568 4569 4570 4571
void i915_gem_load_cleanup(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = to_i915(dev);

4572 4573
	WARN_ON(!llist_empty(&dev_priv->mm.free_list));

4574 4575 4576 4577 4578
	mutex_lock(&dev_priv->drm.struct_mutex);
	i915_gem_timeline_fini(&dev_priv->gt.global_timeline);
	WARN_ON(!list_empty(&dev_priv->gt.timelines));
	mutex_unlock(&dev_priv->drm.struct_mutex);

4579
	kmem_cache_destroy(dev_priv->dependencies);
4580 4581 4582
	kmem_cache_destroy(dev_priv->requests);
	kmem_cache_destroy(dev_priv->vmas);
	kmem_cache_destroy(dev_priv->objects);
4583 4584 4585

	/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
	rcu_barrier();
4586 4587
}

4588 4589 4590 4591 4592 4593 4594 4595 4596 4597 4598 4599 4600
int i915_gem_freeze(struct drm_i915_private *dev_priv)
{
	intel_runtime_pm_get(dev_priv);

	mutex_lock(&dev_priv->drm.struct_mutex);
	i915_gem_shrink_all(dev_priv);
	mutex_unlock(&dev_priv->drm.struct_mutex);

	intel_runtime_pm_put(dev_priv);

	return 0;
}

4601 4602 4603
int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
{
	struct drm_i915_gem_object *obj;
4604 4605 4606 4607 4608
	struct list_head *phases[] = {
		&dev_priv->mm.unbound_list,
		&dev_priv->mm.bound_list,
		NULL
	}, **p;
4609 4610 4611 4612 4613 4614 4615 4616 4617 4618

	/* Called just before we write the hibernation image.
	 *
	 * We need to update the domain tracking to reflect that the CPU
	 * will be accessing all the pages to create and restore from the
	 * hibernation, and so upon restoration those pages will be in the
	 * CPU domain.
	 *
	 * To make sure the hibernation image contains the latest state,
	 * we update that state just before writing out the image.
4619 4620 4621
	 *
	 * To try and reduce the hibernation image, we manually shrink
	 * the objects as well.
4622 4623
	 */

4624 4625
	mutex_lock(&dev_priv->drm.struct_mutex);
	i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
4626

4627
	for (p = phases; *p; p++) {
4628
		list_for_each_entry(obj, *p, global_link) {
4629 4630 4631
			obj->base.read_domains = I915_GEM_DOMAIN_CPU;
			obj->base.write_domain = I915_GEM_DOMAIN_CPU;
		}
4632
	}
4633
	mutex_unlock(&dev_priv->drm.struct_mutex);
4634 4635 4636 4637

	return 0;
}

4638
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4639
{
4640
	struct drm_i915_file_private *file_priv = file->driver_priv;
4641
	struct drm_i915_gem_request *request;
4642 4643 4644 4645 4646

	/* Clean up our request list when the client is going away, so that
	 * later retire_requests won't dereference our soon-to-be-gone
	 * file_priv.
	 */
4647
	spin_lock(&file_priv->mm.lock);
4648
	list_for_each_entry(request, &file_priv->mm.request_list, client_list)
4649
		request->file_priv = NULL;
4650
	spin_unlock(&file_priv->mm.lock);
4651

4652
	if (!list_empty(&file_priv->rps.link)) {
4653
		spin_lock(&to_i915(dev)->rps.client_lock);
4654
		list_del(&file_priv->rps.link);
4655
		spin_unlock(&to_i915(dev)->rps.client_lock);
4656
	}
4657 4658 4659 4660 4661
}

int i915_gem_open(struct drm_device *dev, struct drm_file *file)
{
	struct drm_i915_file_private *file_priv;
4662
	int ret;
4663

4664
	DRM_DEBUG("\n");
4665 4666 4667 4668 4669 4670

	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
	if (!file_priv)
		return -ENOMEM;

	file->driver_priv = file_priv;
4671
	file_priv->dev_priv = to_i915(dev);
4672
	file_priv->file = file;
4673
	INIT_LIST_HEAD(&file_priv->rps.link);
4674 4675 4676 4677

	spin_lock_init(&file_priv->mm.lock);
	INIT_LIST_HEAD(&file_priv->mm.request_list);

4678
	file_priv->bsd_engine = -1;
4679

4680 4681 4682
	ret = i915_gem_context_open(dev, file);
	if (ret)
		kfree(file_priv);
4683

4684
	return ret;
4685 4686
}

4687 4688
/**
 * i915_gem_track_fb - update frontbuffer tracking
4689 4690 4691
 * @old: current GEM buffer for the frontbuffer slots
 * @new: new GEM buffer for the frontbuffer slots
 * @frontbuffer_bits: bitmask of frontbuffer slots
4692 4693 4694 4695
 *
 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
 * from @old and setting them in @new. Both @old and @new can be NULL.
 */
4696 4697 4698 4699
void i915_gem_track_fb(struct drm_i915_gem_object *old,
		       struct drm_i915_gem_object *new,
		       unsigned frontbuffer_bits)
{
4700 4701 4702 4703 4704 4705 4706 4707 4708
	/* Control of individual bits within the mask are guarded by
	 * the owning plane->mutex, i.e. we can never see concurrent
	 * manipulation of individual bits. But since the bitfield as a whole
	 * is updated using RMW, we need to use atomics in order to update
	 * the bits.
	 */
	BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
		     sizeof(atomic_t) * BITS_PER_BYTE);

4709
	if (old) {
4710 4711
		WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
		atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
4712 4713 4714
	}

	if (new) {
4715 4716
		WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
		atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
4717 4718 4719
	}
}

4720 4721 4722 4723 4724 4725 4726 4727 4728 4729
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
i915_gem_object_create_from_data(struct drm_device *dev,
			         const void *data, size_t size)
{
	struct drm_i915_gem_object *obj;
	struct sg_table *sg;
	size_t bytes;
	int ret;

4730
	obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
4731
	if (IS_ERR(obj))
4732 4733 4734 4735 4736 4737
		return obj;

	ret = i915_gem_object_set_to_cpu_domain(obj, true);
	if (ret)
		goto fail;

C
Chris Wilson 已提交
4738
	ret = i915_gem_object_pin_pages(obj);
4739 4740 4741
	if (ret)
		goto fail;

C
Chris Wilson 已提交
4742
	sg = obj->mm.pages;
4743
	bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
C
Chris Wilson 已提交
4744
	obj->mm.dirty = true; /* Backing store is now out of date */
4745 4746 4747 4748 4749 4750 4751 4752 4753 4754 4755
	i915_gem_object_unpin_pages(obj);

	if (WARN_ON(bytes != size)) {
		DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
		ret = -EFAULT;
		goto fail;
	}

	return obj;

fail:
4756
	i915_gem_object_put(obj);
4757 4758
	return ERR_PTR(ret);
}
4759 4760 4761 4762 4763 4764

struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
		       unsigned int n,
		       unsigned int *offset)
{
C
Chris Wilson 已提交
4765
	struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
4766 4767 4768 4769 4770
	struct scatterlist *sg;
	unsigned int idx, count;

	might_sleep();
	GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
C
Chris Wilson 已提交
4771
	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782 4783 4784 4785 4786 4787 4788 4789 4790 4791 4792 4793 4794 4795 4796 4797 4798 4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895

	/* As we iterate forward through the sg, we record each entry in a
	 * radixtree for quick repeated (backwards) lookups. If we have seen
	 * this index previously, we will have an entry for it.
	 *
	 * Initial lookup is O(N), but this is amortized to O(1) for
	 * sequential page access (where each new request is consecutive
	 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
	 * i.e. O(1) with a large constant!
	 */
	if (n < READ_ONCE(iter->sg_idx))
		goto lookup;

	mutex_lock(&iter->lock);

	/* We prefer to reuse the last sg so that repeated lookup of this
	 * (or the subsequent) sg are fast - comparing against the last
	 * sg is faster than going through the radixtree.
	 */

	sg = iter->sg_pos;
	idx = iter->sg_idx;
	count = __sg_page_count(sg);

	while (idx + count <= n) {
		unsigned long exception, i;
		int ret;

		/* If we cannot allocate and insert this entry, or the
		 * individual pages from this range, cancel updating the
		 * sg_idx so that on this lookup we are forced to linearly
		 * scan onwards, but on future lookups we will try the
		 * insertion again (in which case we need to be careful of
		 * the error return reporting that we have already inserted
		 * this index).
		 */
		ret = radix_tree_insert(&iter->radix, idx, sg);
		if (ret && ret != -EEXIST)
			goto scan;

		exception =
			RADIX_TREE_EXCEPTIONAL_ENTRY |
			idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
		for (i = 1; i < count; i++) {
			ret = radix_tree_insert(&iter->radix, idx + i,
						(void *)exception);
			if (ret && ret != -EEXIST)
				goto scan;
		}

		idx += count;
		sg = ____sg_next(sg);
		count = __sg_page_count(sg);
	}

scan:
	iter->sg_pos = sg;
	iter->sg_idx = idx;

	mutex_unlock(&iter->lock);

	if (unlikely(n < idx)) /* insertion completed by another thread */
		goto lookup;

	/* In case we failed to insert the entry into the radixtree, we need
	 * to look beyond the current sg.
	 */
	while (idx + count <= n) {
		idx += count;
		sg = ____sg_next(sg);
		count = __sg_page_count(sg);
	}

	*offset = n - idx;
	return sg;

lookup:
	rcu_read_lock();

	sg = radix_tree_lookup(&iter->radix, n);
	GEM_BUG_ON(!sg);

	/* If this index is in the middle of multi-page sg entry,
	 * the radixtree will contain an exceptional entry that points
	 * to the start of that range. We will return the pointer to
	 * the base page and the offset of this page within the
	 * sg entry's range.
	 */
	*offset = 0;
	if (unlikely(radix_tree_exception(sg))) {
		unsigned long base =
			(unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;

		sg = radix_tree_lookup(&iter->radix, base);
		GEM_BUG_ON(!sg);

		*offset = n - base;
	}

	rcu_read_unlock();

	return sg;
}

struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
{
	struct scatterlist *sg;
	unsigned int offset;

	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));

	sg = i915_gem_object_get_sg(obj, n, &offset);
	return nth_page(sg_page(sg), offset);
}

/* Like i915_gem_object_get_page(), but mark the returned page dirty */
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
			       unsigned int n)
{
	struct page *page;

	page = i915_gem_object_get_page(obj, n);
C
Chris Wilson 已提交
4896
	if (!obj->mm.dirty)
4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911
		set_page_dirty(page);

	return page;
}

dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
				unsigned long n)
{
	struct scatterlist *sg;
	unsigned int offset;

	sg = i915_gem_object_get_sg(obj, n, &offset);
	return sg_dma_address(sg) + (offset << PAGE_SHIFT);
}