i915_gem.c 130.9 KB
Newer Older
1
/*
2
 * Copyright © 2008-2015 Intel Corporation
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *
 */

28
#include <drm/drmP.h>
29
#include <drm/drm_vma_manager.h>
30
#include <drm/i915_drm.h>
31
#include "i915_drv.h"
32
#include "i915_vgpu.h"
C
Chris Wilson 已提交
33
#include "i915_trace.h"
34
#include "intel_drv.h"
35
#include "intel_frontbuffer.h"
36
#include "intel_mocs.h"
37
#include <linux/dma-fence-array.h>
38
#include <linux/reservation.h>
39
#include <linux/shmem_fs.h>
40
#include <linux/slab.h>
41
#include <linux/stop_machine.h>
42
#include <linux/swap.h>
J
Jesse Barnes 已提交
43
#include <linux/pci.h>
44
#include <linux/dma-buf.h>
45

46
static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
47
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
48
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
49

50 51 52
static bool cpu_cache_is_coherent(struct drm_device *dev,
				  enum i915_cache_level level)
{
53
	return HAS_LLC(to_i915(dev)) || level != I915_CACHE_NONE;
54 55
}

56 57
static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
{
58 59 60
	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
		return false;

61 62 63 64 65 66
	if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
		return true;

	return obj->pin_display;
}

67
static int
68
insert_mappable_node(struct i915_ggtt *ggtt,
69 70 71
                     struct drm_mm_node *node, u32 size)
{
	memset(node, 0, sizeof(*node));
72
	return drm_mm_insert_node_in_range_generic(&ggtt->base.mm, node,
73 74
						   size, 0,
						   I915_COLOR_UNEVICTABLE,
75
						   0, ggtt->mappable_end,
76 77 78 79 80 81 82 83 84 85
						   DRM_MM_SEARCH_DEFAULT,
						   DRM_MM_CREATE_DEFAULT);
}

static void
remove_mappable_node(struct drm_mm_node *node)
{
	drm_mm_remove_node(node);
}

86 87
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
88
				  u64 size)
89
{
90
	spin_lock(&dev_priv->mm.object_stat_lock);
91 92
	dev_priv->mm.object_count++;
	dev_priv->mm.object_memory += size;
93
	spin_unlock(&dev_priv->mm.object_stat_lock);
94 95 96
}

static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
97
				     u64 size)
98
{
99
	spin_lock(&dev_priv->mm.object_stat_lock);
100 101
	dev_priv->mm.object_count--;
	dev_priv->mm.object_memory -= size;
102
	spin_unlock(&dev_priv->mm.object_stat_lock);
103 104
}

105
static int
106
i915_gem_wait_for_error(struct i915_gpu_error *error)
107 108 109
{
	int ret;

110 111
	might_sleep();

112
	if (!i915_reset_in_progress(error))
113 114
		return 0;

115 116 117 118 119
	/*
	 * Only wait 10 seconds for the gpu reset to complete to avoid hanging
	 * userspace. If it takes that long something really bad is going on and
	 * we should simply try to bail out and fail as gracefully as possible.
	 */
120
	ret = wait_event_interruptible_timeout(error->reset_queue,
121
					       !i915_reset_in_progress(error),
122
					       I915_RESET_TIMEOUT);
123 124 125 126
	if (ret == 0) {
		DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
		return -EIO;
	} else if (ret < 0) {
127
		return ret;
128 129
	} else {
		return 0;
130
	}
131 132
}

133
int i915_mutex_lock_interruptible(struct drm_device *dev)
134
{
135
	struct drm_i915_private *dev_priv = to_i915(dev);
136 137
	int ret;

138
	ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
139 140 141 142 143 144 145 146 147
	if (ret)
		return ret;

	ret = mutex_lock_interruptible(&dev->struct_mutex);
	if (ret)
		return ret;

	return 0;
}
148

149 150
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
151
			    struct drm_file *file)
152
{
153
	struct drm_i915_private *dev_priv = to_i915(dev);
154
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
155
	struct drm_i915_gem_get_aperture *args = data;
156
	struct i915_vma *vma;
157
	size_t pinned;
158

159
	pinned = 0;
160
	mutex_lock(&dev->struct_mutex);
161
	list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
162
		if (i915_vma_is_pinned(vma))
163
			pinned += vma->node.size;
164
	list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
165
		if (i915_vma_is_pinned(vma))
166
			pinned += vma->node.size;
167
	mutex_unlock(&dev->struct_mutex);
168

169
	args->aper_size = ggtt->base.total;
170
	args->aper_available_size = args->aper_size - pinned;
171

172 173 174
	return 0;
}

175
static struct sg_table *
176
i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
177
{
178
	struct address_space *mapping = obj->base.filp->f_mapping;
179 180 181 182
	char *vaddr = obj->phys_handle->vaddr;
	struct sg_table *st;
	struct scatterlist *sg;
	int i;
183

184
	if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
185
		return ERR_PTR(-EINVAL);
186 187 188 189 190 191 192

	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
		struct page *page;
		char *src;

		page = shmem_read_mapping_page(mapping, i);
		if (IS_ERR(page))
193
			return ERR_CAST(page);
194 195 196 197 198 199

		src = kmap_atomic(page);
		memcpy(vaddr, src, PAGE_SIZE);
		drm_clflush_virt_range(vaddr, PAGE_SIZE);
		kunmap_atomic(src);

200
		put_page(page);
201 202 203
		vaddr += PAGE_SIZE;
	}

204
	i915_gem_chipset_flush(to_i915(obj->base.dev));
205 206 207

	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
208
		return ERR_PTR(-ENOMEM);
209 210 211

	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
		kfree(st);
212
		return ERR_PTR(-ENOMEM);
213 214 215 216 217
	}

	sg = st->sgl;
	sg->offset = 0;
	sg->length = obj->base.size;
218

219 220 221
	sg_dma_address(sg) = obj->phys_handle->busaddr;
	sg_dma_len(sg) = obj->base.size;

222
	return st;
223 224 225
}

static void
226 227
__i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
				struct sg_table *pages)
228
{
C
Chris Wilson 已提交
229
	GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
230

C
Chris Wilson 已提交
231 232
	if (obj->mm.madv == I915_MADV_DONTNEED)
		obj->mm.dirty = false;
233

234 235
	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
	    !cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
236
		drm_clflush_sg(pages);
237 238 239 240 241 242 243 244 245

	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}

static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
			       struct sg_table *pages)
{
246
	__i915_gem_object_release_shmem(obj, pages);
247

C
Chris Wilson 已提交
248
	if (obj->mm.dirty) {
249
		struct address_space *mapping = obj->base.filp->f_mapping;
250
		char *vaddr = obj->phys_handle->vaddr;
251 252 253
		int i;

		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
254 255 256 257 258 259 260 261 262 263 264 265 266
			struct page *page;
			char *dst;

			page = shmem_read_mapping_page(mapping, i);
			if (IS_ERR(page))
				continue;

			dst = kmap_atomic(page);
			drm_clflush_virt_range(vaddr, PAGE_SIZE);
			memcpy(dst, vaddr, PAGE_SIZE);
			kunmap_atomic(dst);

			set_page_dirty(page);
C
Chris Wilson 已提交
267
			if (obj->mm.madv == I915_MADV_WILLNEED)
268
				mark_page_accessed(page);
269
			put_page(page);
270 271
			vaddr += PAGE_SIZE;
		}
C
Chris Wilson 已提交
272
		obj->mm.dirty = false;
273 274
	}

275 276
	sg_free_table(pages);
	kfree(pages);
277 278 279 280 281 282
}

static void
i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
{
	drm_pci_free(obj->base.dev, obj->phys_handle);
C
Chris Wilson 已提交
283
	i915_gem_object_unpin_pages(obj);
284 285 286 287 288 289 290 291
}

static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
	.get_pages = i915_gem_object_get_pages_phys,
	.put_pages = i915_gem_object_put_pages_phys,
	.release = i915_gem_object_release_phys,
};

292
int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
293 294 295
{
	struct i915_vma *vma;
	LIST_HEAD(still_in_list);
296 297 298
	int ret;

	lockdep_assert_held(&obj->base.dev->struct_mutex);
299

300 301 302 303
	/* Closed vma are removed from the obj->vma_list - but they may
	 * still have an active binding on the object. To remove those we
	 * must wait for all rendering to complete to the object (as unbinding
	 * must anyway), and retire the requests.
304
	 */
305 306 307 308 309 310
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   I915_WAIT_ALL,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
311 312 313 314 315
	if (ret)
		return ret;

	i915_gem_retire_requests(to_i915(obj->base.dev));

316 317 318 319 320 321 322 323 324 325 326 327 328
	while ((vma = list_first_entry_or_null(&obj->vma_list,
					       struct i915_vma,
					       obj_link))) {
		list_move_tail(&vma->obj_link, &still_in_list);
		ret = i915_vma_unbind(vma);
		if (ret)
			break;
	}
	list_splice(&still_in_list, &obj->vma_list);

	return ret;
}

329 330 331 332 333
static long
i915_gem_object_wait_fence(struct dma_fence *fence,
			   unsigned int flags,
			   long timeout,
			   struct intel_rps_client *rps)
334
{
335
	struct drm_i915_gem_request *rq;
336

337
	BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
338

339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370
	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
		return timeout;

	if (!dma_fence_is_i915(fence))
		return dma_fence_wait_timeout(fence,
					      flags & I915_WAIT_INTERRUPTIBLE,
					      timeout);

	rq = to_request(fence);
	if (i915_gem_request_completed(rq))
		goto out;

	/* This client is about to stall waiting for the GPU. In many cases
	 * this is undesirable and limits the throughput of the system, as
	 * many clients cannot continue processing user input/output whilst
	 * blocked. RPS autotuning may take tens of milliseconds to respond
	 * to the GPU load and thus incurs additional latency for the client.
	 * We can circumvent that by promoting the GPU frequency to maximum
	 * before we wait. This makes the GPU throttle up much more quickly
	 * (good for benchmarks and user experience, e.g. window animations),
	 * but at a cost of spending more power processing the workload
	 * (bad for battery). Not all clients even want their results
	 * immediately and for them we should just let the GPU select its own
	 * frequency to maximise efficiency. To prevent a single client from
	 * forcing the clocks too high for the whole system, we only allow
	 * each client to waitboost once in a busy period.
	 */
	if (rps) {
		if (INTEL_GEN(rq->i915) >= 6)
			gen6_rps_boost(rq->i915, rps, rq->emitted_jiffies);
		else
			rps = NULL;
371 372
	}

373 374 375 376 377 378
	timeout = i915_wait_request(rq, flags, timeout);

out:
	if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
		i915_gem_request_retire_upto(rq);

379
	if (rps && rq->global_seqno == intel_engine_last_submit(rq->engine)) {
380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408
		/* The GPU is now idle and this client has stalled.
		 * Since no other client has submitted a request in the
		 * meantime, assume that this client is the only one
		 * supplying work to the GPU but is unable to keep that
		 * work supplied because it is waiting. Since the GPU is
		 * then never kept fully busy, RPS autoclocking will
		 * keep the clocks relatively low, causing further delays.
		 * Compensate by giving the synchronous client credit for
		 * a waitboost next time.
		 */
		spin_lock(&rq->i915->rps.client_lock);
		list_del_init(&rps->link);
		spin_unlock(&rq->i915->rps.client_lock);
	}

	return timeout;
}

static long
i915_gem_object_wait_reservation(struct reservation_object *resv,
				 unsigned int flags,
				 long timeout,
				 struct intel_rps_client *rps)
{
	struct dma_fence *excl;

	if (flags & I915_WAIT_ALL) {
		struct dma_fence **shared;
		unsigned int count, i;
409 410
		int ret;

411 412
		ret = reservation_object_get_fences_rcu(resv,
							&excl, &count, &shared);
413 414 415
		if (ret)
			return ret;

416 417 418 419 420 421
		for (i = 0; i < count; i++) {
			timeout = i915_gem_object_wait_fence(shared[i],
							     flags, timeout,
							     rps);
			if (timeout <= 0)
				break;
422

423 424 425 426 427 428 429 430
			dma_fence_put(shared[i]);
		}

		for (; i < count; i++)
			dma_fence_put(shared[i]);
		kfree(shared);
	} else {
		excl = reservation_object_get_excl_rcu(resv);
431 432
	}

433 434 435 436 437 438
	if (excl && timeout > 0)
		timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);

	dma_fence_put(excl);

	return timeout;
439 440
}

441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504
static void __fence_set_priority(struct dma_fence *fence, int prio)
{
	struct drm_i915_gem_request *rq;
	struct intel_engine_cs *engine;

	if (!dma_fence_is_i915(fence))
		return;

	rq = to_request(fence);
	engine = rq->engine;
	if (!engine->schedule)
		return;

	engine->schedule(rq, prio);
}

static void fence_set_priority(struct dma_fence *fence, int prio)
{
	/* Recurse once into a fence-array */
	if (dma_fence_is_array(fence)) {
		struct dma_fence_array *array = to_dma_fence_array(fence);
		int i;

		for (i = 0; i < array->num_fences; i++)
			__fence_set_priority(array->fences[i], prio);
	} else {
		__fence_set_priority(fence, prio);
	}
}

int
i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
			      unsigned int flags,
			      int prio)
{
	struct dma_fence *excl;

	if (flags & I915_WAIT_ALL) {
		struct dma_fence **shared;
		unsigned int count, i;
		int ret;

		ret = reservation_object_get_fences_rcu(obj->resv,
							&excl, &count, &shared);
		if (ret)
			return ret;

		for (i = 0; i < count; i++) {
			fence_set_priority(shared[i], prio);
			dma_fence_put(shared[i]);
		}

		kfree(shared);
	} else {
		excl = reservation_object_get_excl_rcu(obj->resv);
	}

	if (excl) {
		fence_set_priority(excl, prio);
		dma_fence_put(excl);
	}
	return 0;
}

505 506 507 508 509 510
/**
 * Waits for rendering to the object to be completed
 * @obj: i915 gem object
 * @flags: how to wait (under a lock, for all rendering or just for writes etc)
 * @timeout: how long to wait
 * @rps: client (user process) to charge for any waitboosting
511
 */
512 513 514 515 516
int
i915_gem_object_wait(struct drm_i915_gem_object *obj,
		     unsigned int flags,
		     long timeout,
		     struct intel_rps_client *rps)
517
{
518 519 520 521 522 523 524
	might_sleep();
#if IS_ENABLED(CONFIG_LOCKDEP)
	GEM_BUG_ON(debug_locks &&
		   !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
		   !!(flags & I915_WAIT_LOCKED));
#endif
	GEM_BUG_ON(timeout < 0);
525

526 527 528
	timeout = i915_gem_object_wait_reservation(obj->resv,
						   flags, timeout,
						   rps);
529
	return timeout < 0 ? timeout : 0;
530 531 532 533 534 535 536 537 538
}

static struct intel_rps_client *to_rps_client(struct drm_file *file)
{
	struct drm_i915_file_private *fpriv = file->driver_priv;

	return &fpriv->rps;
}

539 540 541 542 543
int
i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,
			    int align)
{
	drm_dma_handle_t *phys;
544
	int ret;
545 546 547 548 549 550 551 552

	if (obj->phys_handle) {
		if ((unsigned long)obj->phys_handle->vaddr & (align -1))
			return -EBUSY;

		return 0;
	}

C
Chris Wilson 已提交
553
	if (obj->mm.madv != I915_MADV_WILLNEED)
554 555 556 557 558
		return -EFAULT;

	if (obj->base.filp == NULL)
		return -EINVAL;

C
Chris Wilson 已提交
559 560 561 562
	ret = i915_gem_object_unbind(obj);
	if (ret)
		return ret;

563
	__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
564 565
	if (obj->mm.pages)
		return -EBUSY;
566

567 568 569 570 571 572
	/* create a new object */
	phys = drm_pci_alloc(obj->base.dev, obj->base.size, align);
	if (!phys)
		return -ENOMEM;

	obj->phys_handle = phys;
573 574
	obj->ops = &i915_gem_phys_ops;

C
Chris Wilson 已提交
575
	return i915_gem_object_pin_pages(obj);
576 577 578 579 580
}

static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pwrite *args,
581
		     struct drm_file *file)
582 583 584
{
	struct drm_device *dev = obj->base.dev;
	void *vaddr = obj->phys_handle->vaddr + args->offset;
585
	char __user *user_data = u64_to_user_ptr(args->data_ptr);
586
	int ret;
587 588 589 590

	/* We manually control the domain here and pretend that it
	 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
	 */
591 592 593 594 595 596
	lockdep_assert_held(&obj->base.dev->struct_mutex);
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   I915_WAIT_ALL,
				   MAX_SCHEDULE_TIMEOUT,
597
				   to_rps_client(file));
598 599
	if (ret)
		return ret;
600

601
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
602 603 604 605 606 607 608 609 610 611
	if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
		unsigned long unwritten;

		/* The physical object once assigned is fixed for the lifetime
		 * of the obj, so we can safely drop the lock and continue
		 * to access vaddr.
		 */
		mutex_unlock(&dev->struct_mutex);
		unwritten = copy_from_user(vaddr, user_data, args->size);
		mutex_lock(&dev->struct_mutex);
612 613 614 615
		if (unwritten) {
			ret = -EFAULT;
			goto out;
		}
616 617
	}

618
	drm_clflush_virt_range(vaddr, args->size);
619
	i915_gem_chipset_flush(to_i915(dev));
620 621

out:
622
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
623
	return ret;
624 625
}

626
void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
627
{
628
	return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
629 630 631 632
}

void i915_gem_object_free(struct drm_i915_gem_object *obj)
{
633
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
634
	kmem_cache_free(dev_priv->objects, obj);
635 636
}

637 638
static int
i915_gem_create(struct drm_file *file,
639
		struct drm_i915_private *dev_priv,
640 641
		uint64_t size,
		uint32_t *handle_p)
642
{
643
	struct drm_i915_gem_object *obj;
644 645
	int ret;
	u32 handle;
646

647
	size = roundup(size, PAGE_SIZE);
648 649
	if (size == 0)
		return -EINVAL;
650 651

	/* Allocate the new object */
652
	obj = i915_gem_object_create(dev_priv, size);
653 654
	if (IS_ERR(obj))
		return PTR_ERR(obj);
655

656
	ret = drm_gem_handle_create(file, &obj->base, &handle);
657
	/* drop reference from allocate - handle holds it now */
C
Chris Wilson 已提交
658
	i915_gem_object_put(obj);
659 660
	if (ret)
		return ret;
661

662
	*handle_p = handle;
663 664 665
	return 0;
}

666 667 668 669 670 671
int
i915_gem_dumb_create(struct drm_file *file,
		     struct drm_device *dev,
		     struct drm_mode_create_dumb *args)
{
	/* have to work out size/pitch and return them */
672
	args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
673
	args->size = args->pitch * args->height;
674
	return i915_gem_create(file, to_i915(dev),
675
			       args->size, &args->handle);
676 677 678 679
}

/**
 * Creates a new mm object and returns a handle to it.
680 681 682
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
683 684 685 686 687
 */
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file)
{
688
	struct drm_i915_private *dev_priv = to_i915(dev);
689
	struct drm_i915_gem_create *args = data;
690

691
	i915_gem_flush_free_objects(dev_priv);
692

693
	return i915_gem_create(file, dev_priv,
694
			       args->size, &args->handle);
695 696
}

697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
static inline int
__copy_to_user_swizzled(char __user *cpu_vaddr,
			const char *gpu_vaddr, int gpu_offset,
			int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_to_user(cpu_vaddr + cpu_offset,
				     gpu_vaddr + swizzled_gpu_offset,
				     this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

723
static inline int
724 725
__copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
			  const char __user *cpu_vaddr,
726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748
			  int length)
{
	int ret, cpu_offset = 0;

	while (length > 0) {
		int cacheline_end = ALIGN(gpu_offset + 1, 64);
		int this_length = min(cacheline_end - gpu_offset, length);
		int swizzled_gpu_offset = gpu_offset ^ 64;

		ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
				       cpu_vaddr + cpu_offset,
				       this_length);
		if (ret)
			return ret + length;

		cpu_offset += this_length;
		gpu_offset += this_length;
		length -= this_length;
	}

	return 0;
}

749 750 751 752 753 754
/*
 * Pins the specified object's pages and synchronizes the object with
 * GPU accesses. Sets needs_clflush to non-zero if the caller should
 * flush the object from the CPU cache.
 */
int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
755
				    unsigned int *needs_clflush)
756 757 758
{
	int ret;

759
	lockdep_assert_held(&obj->base.dev->struct_mutex);
760

761
	*needs_clflush = 0;
762 763
	if (!i915_gem_object_has_struct_page(obj))
		return -ENODEV;
764

765 766 767 768 769
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
770 771 772
	if (ret)
		return ret;

C
Chris Wilson 已提交
773
	ret = i915_gem_object_pin_pages(obj);
774 775 776
	if (ret)
		return ret;

777 778
	i915_gem_object_flush_gtt_write_domain(obj);

779 780 781 782 783 784
	/* If we're not in the cpu read domain, set ourself into the gtt
	 * read domain and manually flush cachelines (if required). This
	 * optimizes for the case when the gpu will dirty the data
	 * anyway again before the next pread happens.
	 */
	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
785 786
		*needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
							obj->cache_level);
787 788 789

	if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
		ret = i915_gem_object_set_to_cpu_domain(obj, false);
790 791 792
		if (ret)
			goto err_unpin;

793
		*needs_clflush = 0;
794 795
	}

796
	/* return with the pages pinned */
797
	return 0;
798 799 800 801

err_unpin:
	i915_gem_object_unpin_pages(obj);
	return ret;
802 803 804 805 806 807 808
}

int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
				     unsigned int *needs_clflush)
{
	int ret;

809 810
	lockdep_assert_held(&obj->base.dev->struct_mutex);

811 812 813 814
	*needs_clflush = 0;
	if (!i915_gem_object_has_struct_page(obj))
		return -ENODEV;

815 816 817 818 819 820
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   I915_WAIT_ALL,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
821 822 823
	if (ret)
		return ret;

C
Chris Wilson 已提交
824
	ret = i915_gem_object_pin_pages(obj);
825 826 827
	if (ret)
		return ret;

828 829
	i915_gem_object_flush_gtt_write_domain(obj);

830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
	/* If we're not in the cpu write domain, set ourself into the
	 * gtt write domain and manually flush cachelines (as required).
	 * This optimizes for the case when the gpu will use the data
	 * right away and we therefore have to clflush anyway.
	 */
	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
		*needs_clflush |= cpu_write_needs_clflush(obj) << 1;

	/* Same trick applies to invalidate partially written cachelines read
	 * before writing.
	 */
	if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
		*needs_clflush |= !cpu_cache_is_coherent(obj->base.dev,
							 obj->cache_level);

	if (*needs_clflush && !static_cpu_has(X86_FEATURE_CLFLUSH)) {
		ret = i915_gem_object_set_to_cpu_domain(obj, true);
847 848 849
		if (ret)
			goto err_unpin;

850 851 852 853 854 855 856
		*needs_clflush = 0;
	}

	if ((*needs_clflush & CLFLUSH_AFTER) == 0)
		obj->cache_dirty = true;

	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
C
Chris Wilson 已提交
857
	obj->mm.dirty = true;
858
	/* return with the pages pinned */
859
	return 0;
860 861 862 863

err_unpin:
	i915_gem_object_unpin_pages(obj);
	return ret;
864 865
}

866 867 868 869
static void
shmem_clflush_swizzled_range(char *addr, unsigned long length,
			     bool swizzled)
{
870
	if (unlikely(swizzled)) {
871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887
		unsigned long start = (unsigned long) addr;
		unsigned long end = (unsigned long) addr + length;

		/* For swizzling simply ensure that we always flush both
		 * channels. Lame, but simple and it works. Swizzled
		 * pwrite/pread is far from a hotpath - current userspace
		 * doesn't use it at all. */
		start = round_down(start, 128);
		end = round_up(end, 128);

		drm_clflush_virt_range((void *)start, end - start);
	} else {
		drm_clflush_virt_range(addr, length);
	}

}

888 889 890
/* Only difference to the fast-path function is that this can handle bit17
 * and uses non-atomic copy and kmap functions. */
static int
891
shmem_pread_slow(struct page *page, int offset, int length,
892 893 894 895 896 897 898 899
		 char __user *user_data,
		 bool page_do_bit17_swizzling, bool needs_clflush)
{
	char *vaddr;
	int ret;

	vaddr = kmap(page);
	if (needs_clflush)
900
		shmem_clflush_swizzled_range(vaddr + offset, length,
901
					     page_do_bit17_swizzling);
902 903

	if (page_do_bit17_swizzling)
904
		ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
905
	else
906
		ret = __copy_to_user(user_data, vaddr + offset, length);
907 908
	kunmap(page);

909
	return ret ? - EFAULT : 0;
910 911
}

912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987
static int
shmem_pread(struct page *page, int offset, int length, char __user *user_data,
	    bool page_do_bit17_swizzling, bool needs_clflush)
{
	int ret;

	ret = -ENODEV;
	if (!page_do_bit17_swizzling) {
		char *vaddr = kmap_atomic(page);

		if (needs_clflush)
			drm_clflush_virt_range(vaddr + offset, length);
		ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
		kunmap_atomic(vaddr);
	}
	if (ret == 0)
		return 0;

	return shmem_pread_slow(page, offset, length, user_data,
				page_do_bit17_swizzling, needs_clflush);
}

static int
i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pread *args)
{
	char __user *user_data;
	u64 remain;
	unsigned int obj_do_bit17_swizzling;
	unsigned int needs_clflush;
	unsigned int idx, offset;
	int ret;

	obj_do_bit17_swizzling = 0;
	if (i915_gem_object_needs_bit17_swizzle(obj))
		obj_do_bit17_swizzling = BIT(17);

	ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
	if (ret)
		return ret;

	ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
	mutex_unlock(&obj->base.dev->struct_mutex);
	if (ret)
		return ret;

	remain = args->size;
	user_data = u64_to_user_ptr(args->data_ptr);
	offset = offset_in_page(args->offset);
	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
		struct page *page = i915_gem_object_get_page(obj, idx);
		int length;

		length = remain;
		if (offset + length > PAGE_SIZE)
			length = PAGE_SIZE - offset;

		ret = shmem_pread(page, offset, length, user_data,
				  page_to_phys(page) & obj_do_bit17_swizzling,
				  needs_clflush);
		if (ret)
			break;

		remain -= length;
		user_data += length;
		offset = 0;
	}

	i915_gem_obj_finish_shmem_access(obj);
	return ret;
}

static inline bool
gtt_user_read(struct io_mapping *mapping,
	      loff_t base, int offset,
	      char __user *user_data, int length)
988 989
{
	void *vaddr;
990
	unsigned long unwritten;
991 992

	/* We can use the cpu mem copy function because this is X86. */
993 994 995 996 997 998 999 1000 1001
	vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
	unwritten = __copy_to_user_inatomic(user_data, vaddr + offset, length);
	io_mapping_unmap_atomic(vaddr);
	if (unwritten) {
		vaddr = (void __force *)
			io_mapping_map_wc(mapping, base, PAGE_SIZE);
		unwritten = copy_to_user(user_data, vaddr + offset, length);
		io_mapping_unmap(vaddr);
	}
1002 1003 1004 1005
	return unwritten;
}

static int
1006 1007
i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
		   const struct drm_i915_gem_pread *args)
1008
{
1009 1010
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	struct i915_ggtt *ggtt = &i915->ggtt;
1011
	struct drm_mm_node node;
1012 1013 1014
	struct i915_vma *vma;
	void __user *user_data;
	u64 remain, offset;
1015 1016
	int ret;

1017 1018 1019 1020 1021 1022 1023
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
	if (ret)
		return ret;

	intel_runtime_pm_get(i915);
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
				       PIN_MAPPABLE | PIN_NONBLOCK);
1024 1025 1026
	if (!IS_ERR(vma)) {
		node.start = i915_ggtt_offset(vma);
		node.allocated = false;
1027
		ret = i915_vma_put_fence(vma);
1028 1029 1030 1031 1032
		if (ret) {
			i915_vma_unpin(vma);
			vma = ERR_PTR(ret);
		}
	}
C
Chris Wilson 已提交
1033
	if (IS_ERR(vma)) {
1034
		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1035
		if (ret)
1036 1037
			goto out_unlock;
		GEM_BUG_ON(!node.allocated);
1038 1039 1040 1041 1042 1043
	}

	ret = i915_gem_object_set_to_gtt_domain(obj, false);
	if (ret)
		goto out_unpin;

1044
	mutex_unlock(&i915->drm.struct_mutex);
1045

1046 1047 1048
	user_data = u64_to_user_ptr(args->data_ptr);
	remain = args->size;
	offset = args->offset;
1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064

	while (remain > 0) {
		/* Operation in this page
		 *
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
		 */
		u32 page_base = node.start;
		unsigned page_offset = offset_in_page(offset);
		unsigned page_length = PAGE_SIZE - page_offset;
		page_length = remain < page_length ? remain : page_length;
		if (node.allocated) {
			wmb();
			ggtt->base.insert_page(&ggtt->base,
					       i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1065
					       node.start, I915_CACHE_NONE, 0);
1066 1067 1068 1069
			wmb();
		} else {
			page_base += offset & PAGE_MASK;
		}
1070 1071 1072

		if (gtt_user_read(&ggtt->mappable, page_base, page_offset,
				  user_data, page_length)) {
1073 1074 1075 1076 1077 1078 1079 1080 1081
			ret = -EFAULT;
			break;
		}

		remain -= page_length;
		user_data += page_length;
		offset += page_length;
	}

1082
	mutex_lock(&i915->drm.struct_mutex);
1083 1084 1085 1086
out_unpin:
	if (node.allocated) {
		wmb();
		ggtt->base.clear_range(&ggtt->base,
1087
				       node.start, node.size);
1088 1089
		remove_mappable_node(&node);
	} else {
C
Chris Wilson 已提交
1090
		i915_vma_unpin(vma);
1091
	}
1092 1093 1094
out_unlock:
	intel_runtime_pm_put(i915);
	mutex_unlock(&i915->drm.struct_mutex);
1095

1096 1097 1098
	return ret;
}

1099 1100
/**
 * Reads data from the object referenced by handle.
1101 1102 1103
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
1104 1105 1106 1107 1108
 *
 * On error, the contents of *data are undefined.
 */
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1109
		     struct drm_file *file)
1110 1111
{
	struct drm_i915_gem_pread *args = data;
1112
	struct drm_i915_gem_object *obj;
1113
	int ret;
1114

1115 1116 1117 1118
	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_WRITE,
1119
		       u64_to_user_ptr(args->data_ptr),
1120 1121 1122
		       args->size))
		return -EFAULT;

1123
	obj = i915_gem_object_lookup(file, args->handle);
1124 1125
	if (!obj)
		return -ENOENT;
1126

1127
	/* Bounds check source.  */
1128 1129
	if (args->offset > obj->base.size ||
	    args->size > obj->base.size - args->offset) {
C
Chris Wilson 已提交
1130
		ret = -EINVAL;
1131
		goto out;
C
Chris Wilson 已提交
1132 1133
	}

C
Chris Wilson 已提交
1134 1135
	trace_i915_gem_object_pread(obj, args->offset, args->size);

1136 1137 1138 1139
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE,
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
1140
	if (ret)
1141
		goto out;
1142

1143
	ret = i915_gem_object_pin_pages(obj);
1144
	if (ret)
1145
		goto out;
1146

1147
	ret = i915_gem_shmem_pread(obj, args);
1148
	if (ret == -EFAULT || ret == -ENODEV)
1149
		ret = i915_gem_gtt_pread(obj, args);
1150

1151 1152
	i915_gem_object_unpin_pages(obj);
out:
C
Chris Wilson 已提交
1153
	i915_gem_object_put(obj);
1154
	return ret;
1155 1156
}

1157 1158
/* This is the fast write path which cannot handle
 * page faults in the source data
1159
 */
1160

1161 1162 1163 1164
static inline bool
ggtt_write(struct io_mapping *mapping,
	   loff_t base, int offset,
	   char __user *user_data, int length)
1165
{
1166
	void *vaddr;
1167
	unsigned long unwritten;
1168

1169
	/* We can use the cpu mem copy function because this is X86. */
1170 1171
	vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
	unwritten = __copy_from_user_inatomic_nocache(vaddr + offset,
1172
						      user_data, length);
1173 1174 1175 1176 1177 1178 1179
	io_mapping_unmap_atomic(vaddr);
	if (unwritten) {
		vaddr = (void __force *)
			io_mapping_map_wc(mapping, base, PAGE_SIZE);
		unwritten = copy_from_user(vaddr + offset, user_data, length);
		io_mapping_unmap(vaddr);
	}
1180 1181 1182 1183

	return unwritten;
}

1184 1185 1186
/**
 * This is the fast pwrite path, where we copy the data directly from the
 * user into the GTT, uncached.
1187
 * @obj: i915 GEM object
1188
 * @args: pwrite arguments structure
1189
 */
1190
static int
1191 1192
i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
			 const struct drm_i915_gem_pwrite *args)
1193
{
1194
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
1195 1196
	struct i915_ggtt *ggtt = &i915->ggtt;
	struct drm_mm_node node;
1197 1198 1199
	struct i915_vma *vma;
	u64 remain, offset;
	void __user *user_data;
1200
	int ret;
1201

1202 1203 1204
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
	if (ret)
		return ret;
D
Daniel Vetter 已提交
1205

1206
	intel_runtime_pm_get(i915);
C
Chris Wilson 已提交
1207
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1208
				       PIN_MAPPABLE | PIN_NONBLOCK);
1209 1210 1211
	if (!IS_ERR(vma)) {
		node.start = i915_ggtt_offset(vma);
		node.allocated = false;
1212
		ret = i915_vma_put_fence(vma);
1213 1214 1215 1216 1217
		if (ret) {
			i915_vma_unpin(vma);
			vma = ERR_PTR(ret);
		}
	}
C
Chris Wilson 已提交
1218
	if (IS_ERR(vma)) {
1219
		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1220
		if (ret)
1221 1222
			goto out_unlock;
		GEM_BUG_ON(!node.allocated);
1223
	}
D
Daniel Vetter 已提交
1224 1225 1226 1227 1228

	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret)
		goto out_unpin;

1229 1230
	mutex_unlock(&i915->drm.struct_mutex);

1231
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1232

1233 1234 1235 1236
	user_data = u64_to_user_ptr(args->data_ptr);
	offset = args->offset;
	remain = args->size;
	while (remain) {
1237 1238
		/* Operation in this page
		 *
1239 1240 1241
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
1242
		 */
1243
		u32 page_base = node.start;
1244 1245
		unsigned int page_offset = offset_in_page(offset);
		unsigned int page_length = PAGE_SIZE - page_offset;
1246 1247 1248 1249 1250 1251 1252 1253 1254 1255
		page_length = remain < page_length ? remain : page_length;
		if (node.allocated) {
			wmb(); /* flush the write before we modify the GGTT */
			ggtt->base.insert_page(&ggtt->base,
					       i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
					       node.start, I915_CACHE_NONE, 0);
			wmb(); /* flush modifications to the GGTT (insert_page) */
		} else {
			page_base += offset & PAGE_MASK;
		}
1256
		/* If we get a fault while copying data, then (presumably) our
1257 1258
		 * source page isn't available.  Return the error and we'll
		 * retry in the slow path.
1259 1260
		 * If the object is non-shmem backed, we retry again with the
		 * path that handles page fault.
1261
		 */
1262 1263 1264 1265
		if (ggtt_write(&ggtt->mappable, page_base, page_offset,
			       user_data, page_length)) {
			ret = -EFAULT;
			break;
D
Daniel Vetter 已提交
1266
		}
1267

1268 1269 1270
		remain -= page_length;
		user_data += page_length;
		offset += page_length;
1271
	}
1272
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1273 1274

	mutex_lock(&i915->drm.struct_mutex);
D
Daniel Vetter 已提交
1275
out_unpin:
1276 1277 1278
	if (node.allocated) {
		wmb();
		ggtt->base.clear_range(&ggtt->base,
1279
				       node.start, node.size);
1280 1281
		remove_mappable_node(&node);
	} else {
C
Chris Wilson 已提交
1282
		i915_vma_unpin(vma);
1283
	}
1284
out_unlock:
1285
	intel_runtime_pm_put(i915);
1286
	mutex_unlock(&i915->drm.struct_mutex);
1287
	return ret;
1288 1289
}

1290
static int
1291
shmem_pwrite_slow(struct page *page, int offset, int length,
1292 1293 1294 1295
		  char __user *user_data,
		  bool page_do_bit17_swizzling,
		  bool needs_clflush_before,
		  bool needs_clflush_after)
1296
{
1297 1298
	char *vaddr;
	int ret;
1299

1300
	vaddr = kmap(page);
1301
	if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1302
		shmem_clflush_swizzled_range(vaddr + offset, length,
1303
					     page_do_bit17_swizzling);
1304
	if (page_do_bit17_swizzling)
1305 1306
		ret = __copy_from_user_swizzled(vaddr, offset, user_data,
						length);
1307
	else
1308
		ret = __copy_from_user(vaddr + offset, user_data, length);
1309
	if (needs_clflush_after)
1310
		shmem_clflush_swizzled_range(vaddr + offset, length,
1311
					     page_do_bit17_swizzling);
1312
	kunmap(page);
1313

1314
	return ret ? -EFAULT : 0;
1315 1316
}

1317 1318 1319 1320 1321
/* Per-page copy function for the shmem pwrite fastpath.
 * Flushes invalid cachelines before writing to the target if
 * needs_clflush_before is set and flushes out any written cachelines after
 * writing if needs_clflush is set.
 */
1322
static int
1323 1324 1325 1326
shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
	     bool page_do_bit17_swizzling,
	     bool needs_clflush_before,
	     bool needs_clflush_after)
1327
{
1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359
	int ret;

	ret = -ENODEV;
	if (!page_do_bit17_swizzling) {
		char *vaddr = kmap_atomic(page);

		if (needs_clflush_before)
			drm_clflush_virt_range(vaddr + offset, len);
		ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
		if (needs_clflush_after)
			drm_clflush_virt_range(vaddr + offset, len);

		kunmap_atomic(vaddr);
	}
	if (ret == 0)
		return ret;

	return shmem_pwrite_slow(page, offset, len, user_data,
				 page_do_bit17_swizzling,
				 needs_clflush_before,
				 needs_clflush_after);
}

static int
i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
		      const struct drm_i915_gem_pwrite *args)
{
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	void __user *user_data;
	u64 remain;
	unsigned int obj_do_bit17_swizzling;
	unsigned int partial_cacheline_write;
1360
	unsigned int needs_clflush;
1361 1362
	unsigned int offset, idx;
	int ret;
1363

1364
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1365 1366 1367
	if (ret)
		return ret;

1368 1369 1370 1371
	ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
	mutex_unlock(&i915->drm.struct_mutex);
	if (ret)
		return ret;
1372

1373 1374 1375
	obj_do_bit17_swizzling = 0;
	if (i915_gem_object_needs_bit17_swizzle(obj))
		obj_do_bit17_swizzling = BIT(17);
1376

1377 1378 1379 1380 1381 1382 1383
	/* If we don't overwrite a cacheline completely we need to be
	 * careful to have up-to-date data by first clflushing. Don't
	 * overcomplicate things and flush the entire patch.
	 */
	partial_cacheline_write = 0;
	if (needs_clflush & CLFLUSH_BEFORE)
		partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
1384

1385 1386 1387 1388 1389 1390
	user_data = u64_to_user_ptr(args->data_ptr);
	remain = args->size;
	offset = offset_in_page(args->offset);
	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
		struct page *page = i915_gem_object_get_page(obj, idx);
		int length;
1391

1392 1393 1394
		length = remain;
		if (offset + length > PAGE_SIZE)
			length = PAGE_SIZE - offset;
1395

1396 1397 1398 1399
		ret = shmem_pwrite(page, offset, length, user_data,
				   page_to_phys(page) & obj_do_bit17_swizzling,
				   (offset | length) & partial_cacheline_write,
				   needs_clflush & CLFLUSH_AFTER);
1400
		if (ret)
1401
			break;
1402

1403 1404 1405
		remain -= length;
		user_data += length;
		offset = 0;
1406
	}
1407

1408
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
1409
	i915_gem_obj_finish_shmem_access(obj);
1410
	return ret;
1411 1412 1413 1414
}

/**
 * Writes data to the object referenced by handle.
1415 1416 1417
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1418 1419 1420 1421 1422
 *
 * On error, the contents of the buffer that were to be modified are undefined.
 */
int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1423
		      struct drm_file *file)
1424 1425
{
	struct drm_i915_gem_pwrite *args = data;
1426
	struct drm_i915_gem_object *obj;
1427 1428 1429 1430 1431 1432
	int ret;

	if (args->size == 0)
		return 0;

	if (!access_ok(VERIFY_READ,
1433
		       u64_to_user_ptr(args->data_ptr),
1434 1435 1436
		       args->size))
		return -EFAULT;

1437
	obj = i915_gem_object_lookup(file, args->handle);
1438 1439
	if (!obj)
		return -ENOENT;
1440

1441
	/* Bounds check destination. */
1442 1443
	if (args->offset > obj->base.size ||
	    args->size > obj->base.size - args->offset) {
C
Chris Wilson 已提交
1444
		ret = -EINVAL;
1445
		goto err;
C
Chris Wilson 已提交
1446 1447
	}

C
Chris Wilson 已提交
1448 1449
	trace_i915_gem_object_pwrite(obj, args->offset, args->size);

1450 1451 1452 1453 1454
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_ALL,
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
1455 1456 1457
	if (ret)
		goto err;

1458
	ret = i915_gem_object_pin_pages(obj);
1459
	if (ret)
1460
		goto err;
1461

D
Daniel Vetter 已提交
1462
	ret = -EFAULT;
1463 1464 1465 1466 1467 1468
	/* We can only do the GTT pwrite on untiled buffers, as otherwise
	 * it would end up going through the fenced access, and we'll get
	 * different detiling behavior between reading and writing.
	 * pread/pwrite currently are reading and writing from the CPU
	 * perspective, requiring manual detiling by the client.
	 */
1469
	if (!i915_gem_object_has_struct_page(obj) ||
1470
	    cpu_write_needs_clflush(obj))
D
Daniel Vetter 已提交
1471 1472
		/* Note that the gtt paths might fail with non-page-backed user
		 * pointers (e.g. gtt mappings when moving data between
1473 1474
		 * textures). Fallback to the shmem path in that case.
		 */
1475
		ret = i915_gem_gtt_pwrite_fast(obj, args);
1476

1477
	if (ret == -EFAULT || ret == -ENOSPC) {
1478 1479
		if (obj->phys_handle)
			ret = i915_gem_phys_pwrite(obj, args, file);
1480
		else
1481
			ret = i915_gem_shmem_pwrite(obj, args);
1482
	}
1483

1484
	i915_gem_object_unpin_pages(obj);
1485
err:
C
Chris Wilson 已提交
1486
	i915_gem_object_put(obj);
1487
	return ret;
1488 1489
}

1490
static inline enum fb_op_origin
1491 1492
write_origin(struct drm_i915_gem_object *obj, unsigned domain)
{
1493 1494
	return (domain == I915_GEM_DOMAIN_GTT ?
		obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
1495 1496
}

1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517
static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
{
	struct drm_i915_private *i915;
	struct list_head *list;
	struct i915_vma *vma;

	list_for_each_entry(vma, &obj->vma_list, obj_link) {
		if (!i915_vma_is_ggtt(vma))
			continue;

		if (i915_vma_is_active(vma))
			continue;

		if (!drm_mm_node_allocated(&vma->node))
			continue;

		list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
	}

	i915 = to_i915(obj->base.dev);
	list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
1518
	list_move_tail(&obj->global_link, list);
1519 1520
}

1521
/**
1522 1523
 * Called when user space prepares to use an object with the CPU, either
 * through the mmap ioctl's mapping or a GTT mapping.
1524 1525 1526
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1527 1528 1529
 */
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1530
			  struct drm_file *file)
1531 1532
{
	struct drm_i915_gem_set_domain *args = data;
1533
	struct drm_i915_gem_object *obj;
1534 1535
	uint32_t read_domains = args->read_domains;
	uint32_t write_domain = args->write_domain;
1536
	int err;
1537

1538
	/* Only handle setting domains to types used by the CPU. */
1539
	if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
1540 1541 1542 1543 1544 1545 1546 1547
		return -EINVAL;

	/* Having something in the write domain implies it's in the read
	 * domain, and only that read domain.  Enforce that in the request.
	 */
	if (write_domain != 0 && read_domains != write_domain)
		return -EINVAL;

1548
	obj = i915_gem_object_lookup(file, args->handle);
1549 1550
	if (!obj)
		return -ENOENT;
1551

1552 1553 1554 1555
	/* Try to flush the object off the GPU without holding the lock.
	 * We will repeat the flush holding the lock in the normal manner
	 * to catch cases where we are gazumped.
	 */
1556
	err = i915_gem_object_wait(obj,
1557 1558 1559 1560
				   I915_WAIT_INTERRUPTIBLE |
				   (write_domain ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   to_rps_client(file));
1561
	if (err)
C
Chris Wilson 已提交
1562
		goto out;
1563

1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
	/* Flush and acquire obj->pages so that we are coherent through
	 * direct access in memory with previous cached writes through
	 * shmemfs and that our cache domain tracking remains valid.
	 * For example, if the obj->filp was moved to swap without us
	 * being notified and releasing the pages, we would mistakenly
	 * continue to assume that the obj remained out of the CPU cached
	 * domain.
	 */
	err = i915_gem_object_pin_pages(obj);
	if (err)
C
Chris Wilson 已提交
1574
		goto out;
1575 1576 1577

	err = i915_mutex_lock_interruptible(dev);
	if (err)
C
Chris Wilson 已提交
1578
		goto out_unpin;
1579

1580
	if (read_domains & I915_GEM_DOMAIN_GTT)
1581
		err = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
1582
	else
1583
		err = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
1584

1585 1586
	/* And bump the LRU for this access */
	i915_gem_object_bump_inactive_ggtt(obj);
1587

1588
	mutex_unlock(&dev->struct_mutex);
1589

1590 1591 1592
	if (write_domain != 0)
		intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));

C
Chris Wilson 已提交
1593
out_unpin:
1594
	i915_gem_object_unpin_pages(obj);
C
Chris Wilson 已提交
1595 1596
out:
	i915_gem_object_put(obj);
1597
	return err;
1598 1599 1600 1601
}

/**
 * Called when user space has done writes to this buffer
1602 1603 1604
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1605 1606 1607
 */
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1608
			 struct drm_file *file)
1609 1610
{
	struct drm_i915_gem_sw_finish *args = data;
1611
	struct drm_i915_gem_object *obj;
1612
	int err = 0;
1613

1614
	obj = i915_gem_object_lookup(file, args->handle);
1615 1616
	if (!obj)
		return -ENOENT;
1617 1618

	/* Pinned buffers may be scanout, so flush the cache */
1619 1620 1621 1622 1623 1624 1625
	if (READ_ONCE(obj->pin_display)) {
		err = i915_mutex_lock_interruptible(dev);
		if (!err) {
			i915_gem_object_flush_cpu_write_domain(obj);
			mutex_unlock(&dev->struct_mutex);
		}
	}
1626

C
Chris Wilson 已提交
1627
	i915_gem_object_put(obj);
1628
	return err;
1629 1630 1631
}

/**
1632 1633 1634 1635 1636
 * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
 *			 it is mapped to.
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
1637 1638 1639
 *
 * While the mapping holds a reference on the contents of the object, it doesn't
 * imply a ref on the object itself.
1640 1641 1642 1643 1644 1645 1646 1647 1648 1649
 *
 * IMPORTANT:
 *
 * DRM driver writers who look a this function as an example for how to do GEM
 * mmap support, please don't implement mmap support like here. The modern way
 * to implement DRM mmap support is with an mmap offset ioctl (like
 * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
 * That way debug tooling like valgrind will understand what's going on, hiding
 * the mmap call in a driver private ioctl will break that. The i915 driver only
 * does cpu mmaps this way because we didn't know better.
1650 1651 1652
 */
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1653
		    struct drm_file *file)
1654 1655
{
	struct drm_i915_gem_mmap *args = data;
1656
	struct drm_i915_gem_object *obj;
1657 1658
	unsigned long addr;

1659 1660 1661
	if (args->flags & ~(I915_MMAP_WC))
		return -EINVAL;

1662
	if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1663 1664
		return -ENODEV;

1665 1666
	obj = i915_gem_object_lookup(file, args->handle);
	if (!obj)
1667
		return -ENOENT;
1668

1669 1670 1671
	/* prime objects have no backing filp to GEM mmap
	 * pages from.
	 */
1672
	if (!obj->base.filp) {
C
Chris Wilson 已提交
1673
		i915_gem_object_put(obj);
1674 1675 1676
		return -EINVAL;
	}

1677
	addr = vm_mmap(obj->base.filp, 0, args->size,
1678 1679
		       PROT_READ | PROT_WRITE, MAP_SHARED,
		       args->offset);
1680 1681 1682 1683
	if (args->flags & I915_MMAP_WC) {
		struct mm_struct *mm = current->mm;
		struct vm_area_struct *vma;

1684
		if (down_write_killable(&mm->mmap_sem)) {
C
Chris Wilson 已提交
1685
			i915_gem_object_put(obj);
1686 1687
			return -EINTR;
		}
1688 1689 1690 1691 1692 1693 1694
		vma = find_vma(mm, addr);
		if (vma)
			vma->vm_page_prot =
				pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
		else
			addr = -ENOMEM;
		up_write(&mm->mmap_sem);
1695 1696

		/* This may race, but that's ok, it only gets set */
1697
		WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
1698
	}
C
Chris Wilson 已提交
1699
	i915_gem_object_put(obj);
1700 1701 1702 1703 1704 1705 1706 1707
	if (IS_ERR((void *)addr))
		return addr;

	args->addr_ptr = (uint64_t) addr;

	return 0;
}

1708 1709 1710 1711 1712 1713 1714 1715 1716 1717
static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
{
	u64 size;

	size = i915_gem_object_get_stride(obj);
	size *= i915_gem_object_get_tiling(obj) == I915_TILING_Y ? 32 : 8;

	return size >> PAGE_SHIFT;
}

1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767
/**
 * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
 *
 * A history of the GTT mmap interface:
 *
 * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
 *     aligned and suitable for fencing, and still fit into the available
 *     mappable space left by the pinned display objects. A classic problem
 *     we called the page-fault-of-doom where we would ping-pong between
 *     two objects that could not fit inside the GTT and so the memcpy
 *     would page one object in at the expense of the other between every
 *     single byte.
 *
 * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
 *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
 *     object is too large for the available space (or simply too large
 *     for the mappable aperture!), a view is created instead and faulted
 *     into userspace. (This view is aligned and sized appropriately for
 *     fenced access.)
 *
 * Restrictions:
 *
 *  * snoopable objects cannot be accessed via the GTT. It can cause machine
 *    hangs on some architectures, corruption on others. An attempt to service
 *    a GTT page fault from a snoopable object will generate a SIGBUS.
 *
 *  * the object must be able to fit into RAM (physical memory, though no
 *    limited to the mappable aperture).
 *
 *
 * Caveats:
 *
 *  * a new GTT page fault will synchronize rendering from the GPU and flush
 *    all data to system memory. Subsequent access will not be synchronized.
 *
 *  * all mappings are revoked on runtime device suspend.
 *
 *  * there are only 8, 16 or 32 fence registers to share between all users
 *    (older machines require fence register for display and blitter access
 *    as well). Contention of the fence registers will cause the previous users
 *    to be unmapped and any new access will generate new page faults.
 *
 *  * running out of memory while servicing a fault may generate a SIGBUS,
 *    rather than the expected SIGSEGV.
 */
int i915_gem_mmap_gtt_version(void)
{
	return 1;
}

1768 1769
/**
 * i915_gem_fault - fault a page into the GTT
C
Chris Wilson 已提交
1770
 * @area: CPU VMA in question
1771
 * @vmf: fault info
1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782
 *
 * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
 * from userspace.  The fault handler takes care of binding the object to
 * the GTT (if needed), allocating and programming a fence register (again,
 * only if needed based on whether the old reg is still valid or the object
 * is tiled) and inserting a new PTE into the faulting process.
 *
 * Note that the faulting process may involve evicting existing objects
 * from the GTT and/or fence registers to make room.  So performance may
 * suffer if the GTT working set is large or there are few fence registers
 * left.
1783 1784 1785
 *
 * The current feature set supported by i915_gem_fault() and thus GTT mmaps
 * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
1786
 */
C
Chris Wilson 已提交
1787
int i915_gem_fault(struct vm_area_struct *area, struct vm_fault *vmf)
1788
{
1789
#define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
C
Chris Wilson 已提交
1790
	struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
1791
	struct drm_device *dev = obj->base.dev;
1792 1793
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
1794
	bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
C
Chris Wilson 已提交
1795
	struct i915_vma *vma;
1796
	pgoff_t page_offset;
1797
	unsigned int flags;
1798
	int ret;
1799

1800
	/* We don't use vmf->pgoff since that has the fake offset */
C
Chris Wilson 已提交
1801
	page_offset = ((unsigned long)vmf->virtual_address - area->vm_start) >>
1802 1803
		PAGE_SHIFT;

C
Chris Wilson 已提交
1804 1805
	trace_i915_gem_object_fault(obj, page_offset, true, write);

1806
	/* Try to flush the object off the GPU first without holding the lock.
1807
	 * Upon acquiring the lock, we will perform our sanity checks and then
1808 1809 1810
	 * repeat the flush holding the lock in the normal manner to catch cases
	 * where we are gazumped.
	 */
1811 1812 1813 1814
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE,
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
1815
	if (ret)
1816 1817
		goto err;

1818 1819 1820 1821
	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		goto err;

1822 1823 1824 1825 1826
	intel_runtime_pm_get(dev_priv);

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		goto err_rpm;
1827

1828
	/* Access to snoopable pages through the GTT is incoherent. */
1829
	if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
1830
		ret = -EFAULT;
1831
		goto err_unlock;
1832 1833
	}

1834 1835 1836 1837 1838 1839 1840 1841
	/* If the object is smaller than a couple of partial vma, it is
	 * not worth only creating a single partial vma - we may as well
	 * clear enough space for the full object.
	 */
	flags = PIN_MAPPABLE;
	if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
		flags |= PIN_NONBLOCK | PIN_NONFAULT;

1842
	/* Now pin it into the GTT as needed */
1843
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
1844 1845
	if (IS_ERR(vma)) {
		struct i915_ggtt_view view;
1846 1847
		unsigned int chunk_size;

1848
		/* Use a partial view if it is bigger than available space */
1849 1850
		chunk_size = MIN_CHUNK_PAGES;
		if (i915_gem_object_is_tiled(obj))
1851
			chunk_size = roundup(chunk_size, tile_row_pages(obj));
1852

1853 1854 1855 1856
		memset(&view, 0, sizeof(view));
		view.type = I915_GGTT_VIEW_PARTIAL;
		view.params.partial.offset = rounddown(page_offset, chunk_size);
		view.params.partial.size =
1857
			min_t(unsigned int, chunk_size,
1858
			      vma_pages(area) - view.params.partial.offset);
1859

1860 1861 1862 1863 1864 1865
		/* If the partial covers the entire object, just create a
		 * normal VMA.
		 */
		if (chunk_size >= obj->base.size >> PAGE_SHIFT)
			view.type = I915_GGTT_VIEW_NORMAL;

1866 1867 1868 1869 1870
		/* Userspace is now writing through an untracked VMA, abandon
		 * all hope that the hardware is able to track future writes.
		 */
		obj->frontbuffer_ggtt_origin = ORIGIN_CPU;

1871 1872
		vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
	}
C
Chris Wilson 已提交
1873 1874
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
1875
		goto err_unlock;
C
Chris Wilson 已提交
1876
	}
1877

1878 1879
	ret = i915_gem_object_set_to_gtt_domain(obj, write);
	if (ret)
1880
		goto err_unpin;
1881

1882
	ret = i915_vma_get_fence(vma);
1883
	if (ret)
1884
		goto err_unpin;
1885

1886
	/* Mark as being mmapped into userspace for later revocation */
1887
	assert_rpm_wakelock_held(dev_priv);
1888 1889 1890
	if (list_empty(&obj->userfault_link))
		list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);

1891
	/* Finally, remap it using the new GTT offset */
1892 1893 1894 1895 1896
	ret = remap_io_mapping(area,
			       area->vm_start + (vma->ggtt_view.params.partial.offset << PAGE_SHIFT),
			       (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
			       min_t(u64, vma->size, area->vm_end - area->vm_start),
			       &ggtt->mappable);
1897

1898
err_unpin:
C
Chris Wilson 已提交
1899
	__i915_vma_unpin(vma);
1900
err_unlock:
1901
	mutex_unlock(&dev->struct_mutex);
1902 1903
err_rpm:
	intel_runtime_pm_put(dev_priv);
1904
	i915_gem_object_unpin_pages(obj);
1905
err:
1906
	switch (ret) {
1907
	case -EIO:
1908 1909 1910 1911 1912 1913 1914
		/*
		 * We eat errors when the gpu is terminally wedged to avoid
		 * userspace unduly crashing (gl has no provisions for mmaps to
		 * fail). But any other -EIO isn't ours (e.g. swap in failure)
		 * and so needs to be reported.
		 */
		if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
1915 1916 1917
			ret = VM_FAULT_SIGBUS;
			break;
		}
1918
	case -EAGAIN:
D
Daniel Vetter 已提交
1919 1920 1921 1922
		/*
		 * EAGAIN means the gpu is hung and we'll wait for the error
		 * handler to reset everything when re-faulting in
		 * i915_mutex_lock_interruptible.
1923
		 */
1924 1925
	case 0:
	case -ERESTARTSYS:
1926
	case -EINTR:
1927 1928 1929 1930 1931
	case -EBUSY:
		/*
		 * EBUSY is ok: this just means that another thread
		 * already did the job.
		 */
1932 1933
		ret = VM_FAULT_NOPAGE;
		break;
1934
	case -ENOMEM:
1935 1936
		ret = VM_FAULT_OOM;
		break;
1937
	case -ENOSPC:
1938
	case -EFAULT:
1939 1940
		ret = VM_FAULT_SIGBUS;
		break;
1941
	default:
1942
		WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
1943 1944
		ret = VM_FAULT_SIGBUS;
		break;
1945
	}
1946
	return ret;
1947 1948
}

1949 1950 1951 1952
/**
 * i915_gem_release_mmap - remove physical page mappings
 * @obj: obj in question
 *
1953
 * Preserve the reservation of the mmapping with the DRM core code, but
1954 1955 1956 1957 1958 1959 1960 1961 1962
 * relinquish ownership of the pages back to the system.
 *
 * It is vital that we remove the page mapping if we have mapped a tiled
 * object through the GTT and then lose the fence register due to
 * resource pressure. Similarly if the object has been moved out of the
 * aperture, than pages mapped into userspace must be revoked. Removing the
 * mapping will then trigger a page fault on the next user access, allowing
 * fixup by i915_gem_fault().
 */
1963
void
1964
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
1965
{
1966 1967
	struct drm_i915_private *i915 = to_i915(obj->base.dev);

1968 1969 1970
	/* Serialisation between user GTT access and our code depends upon
	 * revoking the CPU's PTE whilst the mutex is held. The next user
	 * pagefault then has to wait until we release the mutex.
1971 1972 1973 1974
	 *
	 * Note that RPM complicates somewhat by adding an additional
	 * requirement that operations to the GGTT be made holding the RPM
	 * wakeref.
1975
	 */
1976
	lockdep_assert_held(&i915->drm.struct_mutex);
1977
	intel_runtime_pm_get(i915);
1978

1979
	if (list_empty(&obj->userfault_link))
1980
		goto out;
1981

1982
	list_del_init(&obj->userfault_link);
1983 1984
	drm_vma_node_unmap(&obj->base.vma_node,
			   obj->base.dev->anon_inode->i_mapping);
1985 1986 1987 1988 1989 1990 1991 1992 1993

	/* Ensure that the CPU's PTE are revoked and there are not outstanding
	 * memory transactions from userspace before we return. The TLB
	 * flushing implied above by changing the PTE above *should* be
	 * sufficient, an extra barrier here just provides us with a bit
	 * of paranoid documentation about our requirement to serialise
	 * memory writes before touching registers / GSM.
	 */
	wmb();
1994 1995 1996

out:
	intel_runtime_pm_put(i915);
1997 1998
}

1999
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
2000
{
2001
	struct drm_i915_gem_object *obj, *on;
2002
	int i;
2003

2004 2005 2006 2007 2008 2009
	/*
	 * Only called during RPM suspend. All users of the userfault_list
	 * must be holding an RPM wakeref to ensure that this can not
	 * run concurrently with themselves (and use the struct_mutex for
	 * protection between themselves).
	 */
2010

2011 2012 2013
	list_for_each_entry_safe(obj, on,
				 &dev_priv->mm.userfault_list, userfault_link) {
		list_del_init(&obj->userfault_link);
2014 2015 2016
		drm_vma_node_unmap(&obj->base.vma_node,
				   obj->base.dev->anon_inode->i_mapping);
	}
2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033

	/* The fence will be lost when the device powers down. If any were
	 * in use by hardware (i.e. they are pinned), we should not be powering
	 * down! All other fences will be reacquired by the user upon waking.
	 */
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];

		if (WARN_ON(reg->pin_count))
			continue;

		if (!reg->vma)
			continue;

		GEM_BUG_ON(!list_empty(&reg->vma->obj->userfault_link));
		reg->dirty = true;
	}
2034 2035
}

2036 2037
/**
 * i915_gem_get_ggtt_size - return required global GTT size for an object
2038
 * @dev_priv: i915 device
2039 2040 2041 2042 2043 2044
 * @size: object size
 * @tiling_mode: tiling mode
 *
 * Return the required global GTT size for an object, taking into account
 * potential fence register mapping.
 */
2045 2046
u64 i915_gem_get_ggtt_size(struct drm_i915_private *dev_priv,
			   u64 size, int tiling_mode)
2047
{
2048
	u64 ggtt_size;
2049

2050 2051
	GEM_BUG_ON(size == 0);

2052
	if (INTEL_GEN(dev_priv) >= 4 ||
2053 2054
	    tiling_mode == I915_TILING_NONE)
		return size;
2055 2056

	/* Previous chips need a power-of-two fence region when tiling */
2057
	if (IS_GEN3(dev_priv))
2058
		ggtt_size = 1024*1024;
2059
	else
2060
		ggtt_size = 512*1024;
2061

2062 2063
	while (ggtt_size < size)
		ggtt_size <<= 1;
2064

2065
	return ggtt_size;
2066 2067
}

2068
/**
2069
 * i915_gem_get_ggtt_alignment - return required global GTT alignment
2070
 * @dev_priv: i915 device
2071 2072
 * @size: object size
 * @tiling_mode: tiling mode
2073
 * @fenced: is fenced alignment required or not
2074
 *
2075
 * Return the required global GTT alignment for an object, taking into account
2076
 * potential fence register mapping.
2077
 */
2078
u64 i915_gem_get_ggtt_alignment(struct drm_i915_private *dev_priv, u64 size,
2079
				int tiling_mode, bool fenced)
2080
{
2081 2082
	GEM_BUG_ON(size == 0);

2083 2084 2085 2086
	/*
	 * Minimum alignment is 4k (GTT page size), but might be greater
	 * if a fence register is needed for the object.
	 */
2087 2088
	if (INTEL_GEN(dev_priv) >= 4 ||
	    (!fenced && (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))) ||
2089
	    tiling_mode == I915_TILING_NONE)
2090 2091
		return 4096;

2092 2093 2094 2095
	/*
	 * Previous chips need to be aligned to the size of the smallest
	 * fence register that can contain the object.
	 */
2096
	return i915_gem_get_ggtt_size(dev_priv, size, tiling_mode);
2097 2098
}

2099 2100
static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
{
2101
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2102
	int err;
2103

2104 2105 2106
	err = drm_gem_create_mmap_offset(&obj->base);
	if (!err)
		return 0;
2107

2108 2109 2110
	/* We can idle the GPU locklessly to flush stale objects, but in order
	 * to claim that space for ourselves, we need to take the big
	 * struct_mutex to free the requests+objects and allocate our slot.
2111
	 */
2112
	err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
2113 2114 2115 2116 2117 2118 2119 2120 2121
	if (err)
		return err;

	err = i915_mutex_lock_interruptible(&dev_priv->drm);
	if (!err) {
		i915_gem_retire_requests(dev_priv);
		err = drm_gem_create_mmap_offset(&obj->base);
		mutex_unlock(&dev_priv->drm.struct_mutex);
	}
2122

2123
	return err;
2124 2125 2126 2127 2128 2129 2130
}

static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
{
	drm_gem_free_mmap_offset(&obj->base);
}

2131
int
2132 2133
i915_gem_mmap_gtt(struct drm_file *file,
		  struct drm_device *dev,
2134
		  uint32_t handle,
2135
		  uint64_t *offset)
2136
{
2137
	struct drm_i915_gem_object *obj;
2138 2139
	int ret;

2140
	obj = i915_gem_object_lookup(file, handle);
2141 2142
	if (!obj)
		return -ENOENT;
2143

2144
	ret = i915_gem_object_create_mmap_offset(obj);
2145 2146
	if (ret == 0)
		*offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2147

C
Chris Wilson 已提交
2148
	i915_gem_object_put(obj);
2149
	return ret;
2150 2151
}

2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172
/**
 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
 * @dev: DRM device
 * @data: GTT mapping ioctl data
 * @file: GEM object info
 *
 * Simply returns the fake offset to userspace so it can mmap it.
 * The mmap call will end up in drm_gem_mmap(), which will set things
 * up so we can get faults in the handler above.
 *
 * The fault handler will take care of binding the object into the GTT
 * (since it may have been evicted to make room for something), allocating
 * a fence register, and mapping the appropriate aperture address into
 * userspace.
 */
int
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file)
{
	struct drm_i915_gem_mmap_gtt *args = data;

2173
	return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2174 2175
}

D
Daniel Vetter 已提交
2176 2177 2178
/* Immediately discard the backing storage */
static void
i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2179
{
2180
	i915_gem_object_free_mmap_offset(obj);
2181

2182 2183
	if (obj->base.filp == NULL)
		return;
2184

D
Daniel Vetter 已提交
2185 2186 2187 2188 2189
	/* Our goal here is to return as much of the memory as
	 * is possible back to the system as we are called from OOM.
	 * To do this we must instruct the shmfs to drop all of its
	 * backing pages, *now*.
	 */
2190
	shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
C
Chris Wilson 已提交
2191
	obj->mm.madv = __I915_MADV_PURGED;
D
Daniel Vetter 已提交
2192
}
2193

2194
/* Try to discard unwanted pages */
2195
void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
D
Daniel Vetter 已提交
2196
{
2197 2198
	struct address_space *mapping;

2199 2200 2201
	lockdep_assert_held(&obj->mm.lock);
	GEM_BUG_ON(obj->mm.pages);

C
Chris Wilson 已提交
2202
	switch (obj->mm.madv) {
2203 2204 2205 2206 2207 2208 2209 2210 2211
	case I915_MADV_DONTNEED:
		i915_gem_object_truncate(obj);
	case __I915_MADV_PURGED:
		return;
	}

	if (obj->base.filp == NULL)
		return;

2212
	mapping = obj->base.filp->f_mapping,
2213
	invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2214 2215
}

2216
static void
2217 2218
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
			      struct sg_table *pages)
2219
{
2220 2221
	struct sgt_iter sgt_iter;
	struct page *page;
2222

2223
	__i915_gem_object_release_shmem(obj, pages);
2224

2225
	i915_gem_gtt_finish_pages(obj, pages);
I
Imre Deak 已提交
2226

2227
	if (i915_gem_object_needs_bit17_swizzle(obj))
2228
		i915_gem_object_save_bit_17_swizzle(obj, pages);
2229

2230
	for_each_sgt_page(page, sgt_iter, pages) {
C
Chris Wilson 已提交
2231
		if (obj->mm.dirty)
2232
			set_page_dirty(page);
2233

C
Chris Wilson 已提交
2234
		if (obj->mm.madv == I915_MADV_WILLNEED)
2235
			mark_page_accessed(page);
2236

2237
		put_page(page);
2238
	}
C
Chris Wilson 已提交
2239
	obj->mm.dirty = false;
2240

2241 2242
	sg_free_table(pages);
	kfree(pages);
2243
}
C
Chris Wilson 已提交
2244

2245 2246 2247 2248 2249
static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
{
	struct radix_tree_iter iter;
	void **slot;

C
Chris Wilson 已提交
2250 2251
	radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
		radix_tree_delete(&obj->mm.get_page.radix, iter.index);
2252 2253
}

2254 2255
void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
				 enum i915_mm_subclass subclass)
2256
{
2257
	struct sg_table *pages;
2258

C
Chris Wilson 已提交
2259
	if (i915_gem_object_has_pinned_pages(obj))
2260
		return;
2261

2262
	GEM_BUG_ON(obj->bind_count);
2263 2264 2265 2266
	if (!READ_ONCE(obj->mm.pages))
		return;

	/* May be called by shrinker from within get_pages() (on another bo) */
2267
	mutex_lock_nested(&obj->mm.lock, subclass);
2268 2269
	if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
		goto unlock;
B
Ben Widawsky 已提交
2270

2271 2272 2273
	/* ->put_pages might need to allocate memory for the bit17 swizzle
	 * array, hence protect them from being reaped by removing them from gtt
	 * lists early. */
2274 2275
	pages = fetch_and_zero(&obj->mm.pages);
	GEM_BUG_ON(!pages);
2276

C
Chris Wilson 已提交
2277
	if (obj->mm.mapping) {
2278 2279
		void *ptr;

C
Chris Wilson 已提交
2280
		ptr = ptr_mask_bits(obj->mm.mapping);
2281 2282
		if (is_vmalloc_addr(ptr))
			vunmap(ptr);
2283
		else
2284 2285
			kunmap(kmap_to_page(ptr));

C
Chris Wilson 已提交
2286
		obj->mm.mapping = NULL;
2287 2288
	}

2289 2290
	__i915_gem_object_reset_page_iter(obj);

2291
	obj->ops->put_pages(obj, pages);
2292 2293
unlock:
	mutex_unlock(&obj->mm.lock);
C
Chris Wilson 已提交
2294 2295
}

2296
static unsigned int swiotlb_max_size(void)
2297 2298 2299 2300 2301 2302 2303 2304
{
#if IS_ENABLED(CONFIG_SWIOTLB)
	return rounddown(swiotlb_nr_tbl() << IO_TLB_SHIFT, PAGE_SIZE);
#else
	return 0;
#endif
}

2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328
static void i915_sg_trim(struct sg_table *orig_st)
{
	struct sg_table new_st;
	struct scatterlist *sg, *new_sg;
	unsigned int i;

	if (orig_st->nents == orig_st->orig_nents)
		return;

	if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL))
		return;

	new_sg = new_st.sgl;
	for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
		sg_set_page(new_sg, sg_page(sg), sg->length, 0);
		/* called before being DMA mapped, no need to copy sg->dma_* */
		new_sg = sg_next(new_sg);
	}

	sg_free_table(orig_st);

	*orig_st = new_st;
}

2329
static struct sg_table *
C
Chris Wilson 已提交
2330
i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2331
{
2332
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2333 2334
	int page_count, i;
	struct address_space *mapping;
2335 2336
	struct sg_table *st;
	struct scatterlist *sg;
2337
	struct sgt_iter sgt_iter;
2338
	struct page *page;
2339
	unsigned long last_pfn = 0;	/* suppress gcc warning */
2340
	unsigned int max_segment;
I
Imre Deak 已提交
2341
	int ret;
C
Chris Wilson 已提交
2342
	gfp_t gfp;
2343

C
Chris Wilson 已提交
2344 2345 2346 2347
	/* Assert that the object is not currently in any GPU domain. As it
	 * wasn't in the GTT, there shouldn't be any way it could have been in
	 * a GPU cache
	 */
2348 2349
	GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
	GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
C
Chris Wilson 已提交
2350

2351 2352
	max_segment = swiotlb_max_size();
	if (!max_segment)
2353
		max_segment = rounddown(UINT_MAX, PAGE_SIZE);
2354

2355 2356
	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
2357
		return ERR_PTR(-ENOMEM);
2358

2359
	page_count = obj->base.size / PAGE_SIZE;
2360 2361
	if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
		kfree(st);
2362
		return ERR_PTR(-ENOMEM);
2363
	}
2364

2365 2366 2367 2368 2369
	/* Get the list of pages out of our struct file.  They'll be pinned
	 * at this point until we release them.
	 *
	 * Fail silently without starting the shrinker
	 */
2370
	mapping = obj->base.filp->f_mapping;
2371
	gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM));
2372
	gfp |= __GFP_NORETRY | __GFP_NOWARN;
2373 2374 2375
	sg = st->sgl;
	st->nents = 0;
	for (i = 0; i < page_count; i++) {
C
Chris Wilson 已提交
2376 2377
		page = shmem_read_mapping_page_gfp(mapping, i, gfp);
		if (IS_ERR(page)) {
2378 2379 2380 2381 2382
			i915_gem_shrink(dev_priv,
					page_count,
					I915_SHRINK_BOUND |
					I915_SHRINK_UNBOUND |
					I915_SHRINK_PURGEABLE);
C
Chris Wilson 已提交
2383 2384 2385 2386 2387 2388 2389
			page = shmem_read_mapping_page_gfp(mapping, i, gfp);
		}
		if (IS_ERR(page)) {
			/* We've tried hard to allocate the memory by reaping
			 * our own buffer, now let the real VM do its job and
			 * go down in flames if truly OOM.
			 */
2390
			page = shmem_read_mapping_page(mapping, i);
I
Imre Deak 已提交
2391 2392
			if (IS_ERR(page)) {
				ret = PTR_ERR(page);
2393
				goto err_sg;
I
Imre Deak 已提交
2394
			}
C
Chris Wilson 已提交
2395
		}
2396 2397 2398
		if (!i ||
		    sg->length >= max_segment ||
		    page_to_pfn(page) != last_pfn + 1) {
2399 2400 2401 2402 2403 2404 2405 2406
			if (i)
				sg = sg_next(sg);
			st->nents++;
			sg_set_page(sg, page, PAGE_SIZE, 0);
		} else {
			sg->length += PAGE_SIZE;
		}
		last_pfn = page_to_pfn(page);
2407 2408 2409

		/* Check that the i965g/gm workaround works. */
		WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2410
	}
2411
	if (sg) /* loop terminated early; short sg table */
2412
		sg_mark_end(sg);
2413

2414 2415 2416
	/* Trim unused sg entries to avoid wasting memory. */
	i915_sg_trim(st);

2417
	ret = i915_gem_gtt_prepare_pages(obj, st);
I
Imre Deak 已提交
2418 2419 2420
	if (ret)
		goto err_pages;

2421
	if (i915_gem_object_needs_bit17_swizzle(obj))
2422
		i915_gem_object_do_bit_17_swizzle(obj, st);
2423

2424
	return st;
2425

2426
err_sg:
2427
	sg_mark_end(sg);
2428
err_pages:
2429 2430
	for_each_sgt_page(page, sgt_iter, st)
		put_page(page);
2431 2432
	sg_free_table(st);
	kfree(st);
2433 2434 2435 2436 2437 2438 2439 2440 2441

	/* shmemfs first checks if there is enough memory to allocate the page
	 * and reports ENOSPC should there be insufficient, along with the usual
	 * ENOMEM for a genuine allocation failure.
	 *
	 * We use ENOSPC in our driver to mean that we have run out of aperture
	 * space and so want to translate the error from shmemfs back to our
	 * usual understanding of ENOMEM.
	 */
I
Imre Deak 已提交
2442 2443 2444
	if (ret == -ENOSPC)
		ret = -ENOMEM;

2445 2446 2447 2448 2449 2450
	return ERR_PTR(ret);
}

void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
				 struct sg_table *pages)
{
2451
	lockdep_assert_held(&obj->mm.lock);
2452 2453 2454 2455 2456

	obj->mm.get_page.sg_pos = pages->sgl;
	obj->mm.get_page.sg_idx = 0;

	obj->mm.pages = pages;
2457 2458 2459 2460 2461 2462 2463

	if (i915_gem_object_is_tiled(obj) &&
	    to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
		GEM_BUG_ON(obj->mm.quirked);
		__i915_gem_object_pin_pages(obj);
		obj->mm.quirked = true;
	}
2464 2465 2466 2467 2468 2469
}

static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
	struct sg_table *pages;

2470 2471
	GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));

2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482
	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
		DRM_DEBUG("Attempting to obtain a purgeable object\n");
		return -EFAULT;
	}

	pages = obj->ops->get_pages(obj);
	if (unlikely(IS_ERR(pages)))
		return PTR_ERR(pages);

	__i915_gem_object_set_pages(obj, pages);
	return 0;
2483 2484
}

2485
/* Ensure that the associated pages are gathered from the backing storage
2486
 * and pinned into our object. i915_gem_object_pin_pages() may be called
2487
 * multiple times before they are released by a single call to
2488
 * i915_gem_object_unpin_pages() - once the pages are no longer referenced
2489 2490 2491
 * either as a result of memory pressure (reaping pages under the shrinker)
 * or as the object is itself released.
 */
C
Chris Wilson 已提交
2492
int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2493
{
2494
	int err;
2495

2496 2497 2498
	err = mutex_lock_interruptible(&obj->mm.lock);
	if (err)
		return err;
2499

2500 2501 2502 2503
	if (unlikely(!obj->mm.pages)) {
		err = ____i915_gem_object_get_pages(obj);
		if (err)
			goto unlock;
2504

2505 2506 2507
		smp_mb__before_atomic();
	}
	atomic_inc(&obj->mm.pages_pin_count);
2508

2509 2510
unlock:
	mutex_unlock(&obj->mm.lock);
2511
	return err;
2512 2513
}

2514
/* The 'mapping' part of i915_gem_object_pin_map() below */
2515 2516
static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
				 enum i915_map_type type)
2517 2518
{
	unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
C
Chris Wilson 已提交
2519
	struct sg_table *sgt = obj->mm.pages;
2520 2521
	struct sgt_iter sgt_iter;
	struct page *page;
2522 2523
	struct page *stack_pages[32];
	struct page **pages = stack_pages;
2524
	unsigned long i = 0;
2525
	pgprot_t pgprot;
2526 2527 2528
	void *addr;

	/* A single page can always be kmapped */
2529
	if (n_pages == 1 && type == I915_MAP_WB)
2530 2531
		return kmap(sg_page(sgt->sgl));

2532 2533 2534 2535 2536 2537
	if (n_pages > ARRAY_SIZE(stack_pages)) {
		/* Too big for stack -- allocate temporary array instead */
		pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
		if (!pages)
			return NULL;
	}
2538

2539 2540
	for_each_sgt_page(page, sgt_iter, sgt)
		pages[i++] = page;
2541 2542 2543 2544

	/* Check that we have the expected number of pages */
	GEM_BUG_ON(i != n_pages);

2545 2546 2547 2548 2549 2550 2551 2552 2553
	switch (type) {
	case I915_MAP_WB:
		pgprot = PAGE_KERNEL;
		break;
	case I915_MAP_WC:
		pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
		break;
	}
	addr = vmap(pages, n_pages, 0, pgprot);
2554

2555 2556
	if (pages != stack_pages)
		drm_free_large(pages);
2557 2558 2559 2560 2561

	return addr;
}

/* get, pin, and map the pages of the object into kernel space */
2562 2563
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
			      enum i915_map_type type)
2564
{
2565 2566 2567
	enum i915_map_type has_type;
	bool pinned;
	void *ptr;
2568 2569
	int ret;

2570
	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
2571

2572
	ret = mutex_lock_interruptible(&obj->mm.lock);
2573 2574 2575
	if (ret)
		return ERR_PTR(ret);

2576 2577
	pinned = true;
	if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
2578 2579 2580 2581
		if (unlikely(!obj->mm.pages)) {
			ret = ____i915_gem_object_get_pages(obj);
			if (ret)
				goto err_unlock;
2582

2583 2584 2585
			smp_mb__before_atomic();
		}
		atomic_inc(&obj->mm.pages_pin_count);
2586 2587 2588
		pinned = false;
	}
	GEM_BUG_ON(!obj->mm.pages);
2589

C
Chris Wilson 已提交
2590
	ptr = ptr_unpack_bits(obj->mm.mapping, has_type);
2591 2592 2593
	if (ptr && has_type != type) {
		if (pinned) {
			ret = -EBUSY;
2594
			goto err_unpin;
2595
		}
2596 2597 2598 2599 2600 2601

		if (is_vmalloc_addr(ptr))
			vunmap(ptr);
		else
			kunmap(kmap_to_page(ptr));

C
Chris Wilson 已提交
2602
		ptr = obj->mm.mapping = NULL;
2603 2604
	}

2605 2606 2607 2608
	if (!ptr) {
		ptr = i915_gem_object_map(obj, type);
		if (!ptr) {
			ret = -ENOMEM;
2609
			goto err_unpin;
2610 2611
		}

C
Chris Wilson 已提交
2612
		obj->mm.mapping = ptr_pack_bits(ptr, type);
2613 2614
	}

2615 2616
out_unlock:
	mutex_unlock(&obj->mm.lock);
2617 2618
	return ptr;

2619 2620 2621 2622 2623
err_unpin:
	atomic_dec(&obj->mm.pages_pin_count);
err_unlock:
	ptr = ERR_PTR(ret);
	goto out_unlock;
2624 2625
}

2626
static bool i915_context_is_banned(const struct i915_gem_context *ctx)
2627
{
2628
	if (ctx->banned)
2629 2630
		return true;

2631
	if (!ctx->bannable)
2632 2633
		return false;

2634
	if (ctx->ban_score >= CONTEXT_SCORE_BAN_THRESHOLD) {
2635 2636 2637 2638
		DRM_DEBUG("context hanging too often, banning!\n");
		return true;
	}

2639 2640 2641
	return false;
}

2642
static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
2643
{
2644
	ctx->ban_score += CONTEXT_SCORE_GUILTY;
2645

2646 2647
	ctx->banned = i915_context_is_banned(ctx);
	ctx->guilty_count++;
2648 2649

	DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
2650 2651
			 ctx->name, ctx->ban_score,
			 yesno(ctx->banned));
2652

2653
	if (!ctx->banned || IS_ERR_OR_NULL(ctx->file_priv))
2654 2655
		return;

2656 2657 2658
	ctx->file_priv->context_bans++;
	DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
			 ctx->name, ctx->file_priv->context_bans);
2659 2660 2661 2662
}

static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
{
2663
	ctx->active_count++;
2664 2665
}

2666
struct drm_i915_gem_request *
2667
i915_gem_find_active_request(struct intel_engine_cs *engine)
2668
{
2669 2670
	struct drm_i915_gem_request *request;

2671 2672 2673 2674 2675 2676 2677 2678
	/* We are called by the error capture and reset at a random
	 * point in time. In particular, note that neither is crucially
	 * ordered with an interrupt. After a hang, the GPU is dead and we
	 * assume that no more writes can happen (we waited long enough for
	 * all writes that were in transaction to be flushed) - adding an
	 * extra delay for a recent interrupt is pointless. Hence, we do
	 * not need an engine->irq_seqno_barrier() before the seqno reads.
	 */
2679
	list_for_each_entry(request, &engine->timeline->requests, link) {
C
Chris Wilson 已提交
2680
		if (__i915_gem_request_completed(request))
2681
			continue;
2682

2683
		return request;
2684
	}
2685 2686 2687 2688

	return NULL;
}

2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706
static void reset_request(struct drm_i915_gem_request *request)
{
	void *vaddr = request->ring->vaddr;
	u32 head;

	/* As this request likely depends on state from the lost
	 * context, clear out all the user operations leaving the
	 * breadcrumb at the end (so we get the fence notifications).
	 */
	head = request->head;
	if (request->postfix < head) {
		memset(vaddr + head, 0, request->ring->size - head);
		head = 0;
	}
	memset(vaddr + head, 0, request->postfix - head);
}

static void i915_gem_reset_engine(struct intel_engine_cs *engine)
2707 2708
{
	struct drm_i915_gem_request *request;
2709
	struct i915_gem_context *incomplete_ctx;
C
Chris Wilson 已提交
2710
	struct intel_timeline *timeline;
2711 2712
	bool ring_hung;

2713 2714 2715
	if (engine->irq_seqno_barrier)
		engine->irq_seqno_barrier(engine);

2716
	request = i915_gem_find_active_request(engine);
2717
	if (!request)
2718 2719
		return;

2720 2721 2722 2723 2724
	ring_hung = engine->hangcheck.stalled;
	if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) {
		DRM_DEBUG_DRIVER("%s pardoned, was guilty? %s\n",
				 engine->name,
				 yesno(ring_hung));
2725
		ring_hung = false;
2726
	}
2727

2728 2729 2730 2731 2732
	if (ring_hung)
		i915_gem_context_mark_guilty(request->ctx);
	else
		i915_gem_context_mark_innocent(request->ctx);

2733 2734 2735 2736
	if (!ring_hung)
		return;

	DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
2737
			 engine->name, request->global_seqno);
2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751 2752 2753

	/* Setup the CS to resume from the breadcrumb of the hung request */
	engine->reset_hw(engine, request);

	/* Users of the default context do not rely on logical state
	 * preserved between batches. They have to emit full state on
	 * every batch and so it is safe to execute queued requests following
	 * the hang.
	 *
	 * Other contexts preserve state, now corrupt. We want to skip all
	 * queued requests that reference the corrupt context.
	 */
	incomplete_ctx = request->ctx;
	if (i915_gem_context_is_default(incomplete_ctx))
		return;

2754
	list_for_each_entry_continue(request, &engine->timeline->requests, link)
2755 2756
		if (request->ctx == incomplete_ctx)
			reset_request(request);
C
Chris Wilson 已提交
2757 2758 2759 2760

	timeline = i915_gem_context_lookup_timeline(incomplete_ctx, engine);
	list_for_each_entry(request, &timeline->requests, link)
		reset_request(request);
2761
}
2762

2763
void i915_gem_reset(struct drm_i915_private *dev_priv)
2764
{
2765
	struct intel_engine_cs *engine;
2766
	enum intel_engine_id id;
2767

2768 2769
	lockdep_assert_held(&dev_priv->drm.struct_mutex);

2770 2771
	i915_gem_retire_requests(dev_priv);

2772
	for_each_engine(engine, dev_priv, id)
2773 2774
		i915_gem_reset_engine(engine);

2775
	i915_gem_restore_fences(dev_priv);
2776 2777 2778 2779 2780 2781 2782

	if (dev_priv->gt.awake) {
		intel_sanitize_gt_powersave(dev_priv);
		intel_enable_gt_powersave(dev_priv);
		if (INTEL_GEN(dev_priv) >= 6)
			gen6_rps_busy(dev_priv);
	}
2783 2784 2785 2786
}

static void nop_submit_request(struct drm_i915_gem_request *request)
{
2787 2788
	i915_gem_request_submit(request);
	intel_engine_init_global_seqno(request->engine, request->global_seqno);
2789 2790 2791 2792
}

static void i915_gem_cleanup_engine(struct intel_engine_cs *engine)
{
2793 2794 2795 2796 2797 2798
	/* We need to be sure that no thread is running the old callback as
	 * we install the nop handler (otherwise we would submit a request
	 * to hardware that will never complete). In order to prevent this
	 * race, we wait until the machine is idle before making the swap
	 * (using stop_machine()).
	 */
2799
	engine->submit_request = nop_submit_request;
2800

2801 2802 2803 2804
	/* Mark all pending requests as complete so that any concurrent
	 * (lockless) lookup doesn't try and wait upon the request as we
	 * reset it.
	 */
2805
	intel_engine_init_global_seqno(engine,
2806
				       intel_engine_last_submit(engine));
2807

2808 2809 2810 2811 2812 2813
	/*
	 * Clear the execlists queue up before freeing the requests, as those
	 * are the ones that keep the context and ringbuffer backing objects
	 * pinned in place.
	 */

2814
	if (i915.enable_execlists) {
2815 2816 2817 2818
		unsigned long flags;

		spin_lock_irqsave(&engine->timeline->lock, flags);

2819 2820 2821
		i915_gem_request_put(engine->execlist_port[0].request);
		i915_gem_request_put(engine->execlist_port[1].request);
		memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
2822 2823
		engine->execlist_queue = RB_ROOT;
		engine->execlist_first = NULL;
2824 2825

		spin_unlock_irqrestore(&engine->timeline->lock, flags);
2826
	}
2827 2828
}

2829
static int __i915_gem_set_wedged_BKL(void *data)
2830
{
2831
	struct drm_i915_private *i915 = data;
2832
	struct intel_engine_cs *engine;
2833
	enum intel_engine_id id;
2834

2835 2836 2837 2838 2839 2840 2841 2842
	for_each_engine(engine, i915, id)
		i915_gem_cleanup_engine(engine);

	return 0;
}

void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
{
2843 2844
	lockdep_assert_held(&dev_priv->drm.struct_mutex);
	set_bit(I915_WEDGED, &dev_priv->gpu_error.flags);
2845

2846
	stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL);
2847

2848
	i915_gem_context_lost(dev_priv);
2849
	i915_gem_retire_requests(dev_priv);
2850 2851

	mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
2852 2853
}

2854
static void
2855 2856
i915_gem_retire_work_handler(struct work_struct *work)
{
2857
	struct drm_i915_private *dev_priv =
2858
		container_of(work, typeof(*dev_priv), gt.retire_work.work);
2859
	struct drm_device *dev = &dev_priv->drm;
2860

2861
	/* Come back later if the device is busy... */
2862
	if (mutex_trylock(&dev->struct_mutex)) {
2863
		i915_gem_retire_requests(dev_priv);
2864
		mutex_unlock(&dev->struct_mutex);
2865
	}
2866 2867 2868 2869 2870

	/* Keep the retire handler running until we are finally idle.
	 * We do not need to do this test under locking as in the worst-case
	 * we queue the retire worker once too often.
	 */
2871 2872
	if (READ_ONCE(dev_priv->gt.awake)) {
		i915_queue_hangcheck(dev_priv);
2873 2874
		queue_delayed_work(dev_priv->wq,
				   &dev_priv->gt.retire_work,
2875
				   round_jiffies_up_relative(HZ));
2876
	}
2877
}
2878

2879 2880 2881 2882
static void
i915_gem_idle_work_handler(struct work_struct *work)
{
	struct drm_i915_private *dev_priv =
2883
		container_of(work, typeof(*dev_priv), gt.idle_work.work);
2884
	struct drm_device *dev = &dev_priv->drm;
2885
	struct intel_engine_cs *engine;
2886
	enum intel_engine_id id;
2887 2888 2889 2890 2891
	bool rearm_hangcheck;

	if (!READ_ONCE(dev_priv->gt.awake))
		return;

2892 2893 2894 2895 2896 2897 2898
	/*
	 * Wait for last execlists context complete, but bail out in case a
	 * new request is submitted.
	 */
	wait_for(READ_ONCE(dev_priv->gt.active_requests) ||
		 intel_execlists_idle(dev_priv), 10);

2899
	if (READ_ONCE(dev_priv->gt.active_requests))
2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912
		return;

	rearm_hangcheck =
		cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);

	if (!mutex_trylock(&dev->struct_mutex)) {
		/* Currently busy, come back later */
		mod_delayed_work(dev_priv->wq,
				 &dev_priv->gt.idle_work,
				 msecs_to_jiffies(50));
		goto out_rearm;
	}

2913 2914 2915 2916 2917 2918 2919
	/*
	 * New request retired after this work handler started, extend active
	 * period until next instance of the work.
	 */
	if (work_pending(work))
		goto out_unlock;

2920
	if (dev_priv->gt.active_requests)
2921
		goto out_unlock;
2922

2923 2924 2925
	if (wait_for(intel_execlists_idle(dev_priv), 10))
		DRM_ERROR("Timeout waiting for engines to idle\n");

2926
	for_each_engine(engine, dev_priv, id)
2927
		i915_gem_batch_pool_fini(&engine->batch_pool);
2928

2929 2930 2931
	GEM_BUG_ON(!dev_priv->gt.awake);
	dev_priv->gt.awake = false;
	rearm_hangcheck = false;
2932

2933 2934 2935 2936 2937
	if (INTEL_GEN(dev_priv) >= 6)
		gen6_rps_idle(dev_priv);
	intel_runtime_pm_put(dev_priv);
out_unlock:
	mutex_unlock(&dev->struct_mutex);
2938

2939 2940 2941 2942
out_rearm:
	if (rearm_hangcheck) {
		GEM_BUG_ON(!dev_priv->gt.awake);
		i915_queue_hangcheck(dev_priv);
2943
	}
2944 2945
}

2946 2947 2948 2949 2950 2951 2952 2953 2954 2955
void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
	struct drm_i915_gem_object *obj = to_intel_bo(gem);
	struct drm_i915_file_private *fpriv = file->driver_priv;
	struct i915_vma *vma, *vn;

	mutex_lock(&obj->base.dev->struct_mutex);
	list_for_each_entry_safe(vma, vn, &obj->vma_list, obj_link)
		if (vma->vm->file == fpriv)
			i915_vma_close(vma);
2956 2957 2958 2959 2960 2961

	if (i915_gem_object_is_active(obj) &&
	    !i915_gem_object_has_active_reference(obj)) {
		i915_gem_object_set_active_reference(obj);
		i915_gem_object_get(obj);
	}
2962 2963 2964
	mutex_unlock(&obj->base.dev->struct_mutex);
}

2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975
static unsigned long to_wait_timeout(s64 timeout_ns)
{
	if (timeout_ns < 0)
		return MAX_SCHEDULE_TIMEOUT;

	if (timeout_ns == 0)
		return 0;

	return nsecs_to_jiffies_timeout(timeout_ns);
}

2976 2977
/**
 * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
2978 2979 2980
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004
 *
 * Returns 0 if successful, else an error is returned with the remaining time in
 * the timeout parameter.
 *  -ETIME: object is still busy after timeout
 *  -ERESTARTSYS: signal interrupted the wait
 *  -ENONENT: object doesn't exist
 * Also possible, but rare:
 *  -EAGAIN: GPU wedged
 *  -ENOMEM: damn
 *  -ENODEV: Internal IRQ fail
 *  -E?: The add request failed
 *
 * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
 * non-zero timeout parameter the wait ioctl will wait for the given number of
 * nanoseconds on an object becoming unbusy. Since the wait itself does so
 * without holding struct_mutex the object may become re-busied before this
 * function completes. A similar but shorter * race condition exists in the busy
 * ioctl
 */
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
	struct drm_i915_gem_wait *args = data;
	struct drm_i915_gem_object *obj;
3005 3006
	ktime_t start;
	long ret;
3007

3008 3009 3010
	if (args->flags != 0)
		return -EINVAL;

3011
	obj = i915_gem_object_lookup(file, args->bo_handle);
3012
	if (!obj)
3013 3014
		return -ENOENT;

3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025
	start = ktime_get();

	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
				   to_wait_timeout(args->timeout_ns),
				   to_rps_client(file));

	if (args->timeout_ns > 0) {
		args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
		if (args->timeout_ns < 0)
			args->timeout_ns = 0;
3026 3027
	}

C
Chris Wilson 已提交
3028
	i915_gem_object_put(obj);
3029
	return ret;
3030 3031
}

3032
static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
3033
{
3034
	int ret, i;
3035

3036 3037 3038 3039 3040
	for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
		ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
		if (ret)
			return ret;
	}
3041

3042 3043 3044 3045 3046 3047 3048
	return 0;
}

int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
{
	int ret;

3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060
	if (flags & I915_WAIT_LOCKED) {
		struct i915_gem_timeline *tl;

		lockdep_assert_held(&i915->drm.struct_mutex);

		list_for_each_entry(tl, &i915->gt.timelines, link) {
			ret = wait_for_timeline(tl, flags);
			if (ret)
				return ret;
		}
	} else {
		ret = wait_for_timeline(&i915->gt.global_timeline, flags);
3061 3062 3063
		if (ret)
			return ret;
	}
3064

3065
	return 0;
3066 3067
}

3068 3069
void i915_gem_clflush_object(struct drm_i915_gem_object *obj,
			     bool force)
3070 3071 3072 3073 3074
{
	/* If we don't have a page list set up, then we're not pinned
	 * to GPU, and we can ignore the cache flush because it'll happen
	 * again at bind time.
	 */
C
Chris Wilson 已提交
3075
	if (!obj->mm.pages)
3076
		return;
3077

3078 3079 3080 3081
	/*
	 * Stolen memory is always coherent with the GPU as it is explicitly
	 * marked as wc by the system, or the system is cache-coherent.
	 */
3082
	if (obj->stolen || obj->phys_handle)
3083
		return;
3084

3085 3086 3087 3088 3089 3090 3091 3092
	/* If the GPU is snooping the contents of the CPU cache,
	 * we do not need to manually clear the CPU cache lines.  However,
	 * the caches are only snooped when the render cache is
	 * flushed/invalidated.  As we always have to emit invalidations
	 * and flushes when moving into and out of the RENDER domain, correct
	 * snooping behaviour occurs naturally as the result of our domain
	 * tracking.
	 */
3093 3094
	if (!force && cpu_cache_is_coherent(obj->base.dev, obj->cache_level)) {
		obj->cache_dirty = true;
3095
		return;
3096
	}
3097

C
Chris Wilson 已提交
3098
	trace_i915_gem_object_clflush(obj);
C
Chris Wilson 已提交
3099
	drm_clflush_sg(obj->mm.pages);
3100
	obj->cache_dirty = false;
3101 3102 3103 3104
}

/** Flushes the GTT write domain for the object if it's dirty. */
static void
3105
i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
3106
{
3107
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
C
Chris Wilson 已提交
3108

3109
	if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
3110 3111
		return;

3112
	/* No actual flushing is required for the GTT write domain.  Writes
3113
	 * to it "immediately" go to main memory as far as we know, so there's
3114
	 * no chipset flush.  It also doesn't land in render cache.
3115 3116 3117 3118
	 *
	 * However, we do have to enforce the order so that all writes through
	 * the GTT land before any writes to the device, such as updates to
	 * the GATT itself.
3119 3120 3121 3122 3123 3124 3125
	 *
	 * We also have to wait a bit for the writes to land from the GTT.
	 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
	 * timing. This issue has only been observed when switching quickly
	 * between GTT writes and CPU reads from inside the kernel on recent hw,
	 * and it appears to only affect discrete GTT blocks (i.e. on LLC
	 * system agents we cannot reproduce this behaviour).
3126
	 */
3127
	wmb();
3128
	if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv))
3129
		POSTING_READ(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
3130

3131
	intel_fb_obj_flush(obj, false, write_origin(obj, I915_GEM_DOMAIN_GTT));
3132

3133
	obj->base.write_domain = 0;
C
Chris Wilson 已提交
3134
	trace_i915_gem_object_change_domain(obj,
3135
					    obj->base.read_domains,
3136
					    I915_GEM_DOMAIN_GTT);
3137 3138 3139 3140
}

/** Flushes the CPU write domain for the object if it's dirty. */
static void
3141
i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
3142
{
3143
	if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
3144 3145
		return;

3146
	i915_gem_clflush_object(obj, obj->pin_display);
3147
	intel_fb_obj_flush(obj, false, ORIGIN_CPU);
3148

3149
	obj->base.write_domain = 0;
C
Chris Wilson 已提交
3150
	trace_i915_gem_object_change_domain(obj,
3151
					    obj->base.read_domains,
3152
					    I915_GEM_DOMAIN_CPU);
3153 3154
}

3155 3156
/**
 * Moves a single object to the GTT read, and possibly write domain.
3157 3158
 * @obj: object to act on
 * @write: ask for write access or read only
3159 3160 3161 3162
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
J
Jesse Barnes 已提交
3163
int
3164
i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3165
{
C
Chris Wilson 已提交
3166
	uint32_t old_write_domain, old_read_domains;
3167
	int ret;
3168

3169
	lockdep_assert_held(&obj->base.dev->struct_mutex);
3170

3171 3172 3173 3174 3175 3176
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   (write ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
3177 3178 3179
	if (ret)
		return ret;

3180 3181 3182
	if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
		return 0;

3183 3184 3185 3186 3187 3188 3189 3190
	/* Flush and acquire obj->pages so that we are coherent through
	 * direct access in memory with previous cached writes through
	 * shmemfs and that our cache domain tracking remains valid.
	 * For example, if the obj->filp was moved to swap without us
	 * being notified and releasing the pages, we would mistakenly
	 * continue to assume that the obj remained out of the CPU cached
	 * domain.
	 */
C
Chris Wilson 已提交
3191
	ret = i915_gem_object_pin_pages(obj);
3192 3193 3194
	if (ret)
		return ret;

3195
	i915_gem_object_flush_cpu_write_domain(obj);
C
Chris Wilson 已提交
3196

3197 3198 3199 3200 3201 3202 3203
	/* Serialise direct access to this object with the barriers for
	 * coherent writes from the GPU, by effectively invalidating the
	 * GTT domain upon first access.
	 */
	if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
		mb();

3204 3205
	old_write_domain = obj->base.write_domain;
	old_read_domains = obj->base.read_domains;
C
Chris Wilson 已提交
3206

3207 3208 3209
	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3210
	GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3211
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3212
	if (write) {
3213 3214
		obj->base.read_domains = I915_GEM_DOMAIN_GTT;
		obj->base.write_domain = I915_GEM_DOMAIN_GTT;
C
Chris Wilson 已提交
3215
		obj->mm.dirty = true;
3216 3217
	}

C
Chris Wilson 已提交
3218 3219 3220 3221
	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    old_write_domain);

C
Chris Wilson 已提交
3222
	i915_gem_object_unpin_pages(obj);
3223 3224 3225
	return 0;
}

3226 3227
/**
 * Changes the cache-level of an object across all VMA.
3228 3229
 * @obj: object to act on
 * @cache_level: new cache level to set for the object
3230 3231 3232 3233 3234 3235 3236 3237 3238 3239 3240
 *
 * After this function returns, the object will be in the new cache-level
 * across all GTT and the contents of the backing storage will be coherent,
 * with respect to the new cache-level. In order to keep the backing storage
 * coherent for all users, we only allow a single cache level to be set
 * globally on the object and prevent it from being changed whilst the
 * hardware is reading from the object. That is if the object is currently
 * on the scanout it will be set to uncached (or equivalent display
 * cache coherency) and all non-MOCS GPU access will also be uncached so
 * that all direct access to the scanout remains coherent.
 */
3241 3242 3243
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
				    enum i915_cache_level cache_level)
{
3244
	struct i915_vma *vma;
3245
	int ret;
3246

3247 3248
	lockdep_assert_held(&obj->base.dev->struct_mutex);

3249
	if (obj->cache_level == cache_level)
3250
		return 0;
3251

3252 3253 3254 3255 3256
	/* Inspect the list of currently bound VMA and unbind any that would
	 * be invalid given the new cache-level. This is principally to
	 * catch the issue of the CS prefetch crossing page boundaries and
	 * reading an invalid PTE on older architectures.
	 */
3257 3258
restart:
	list_for_each_entry(vma, &obj->vma_list, obj_link) {
3259 3260 3261
		if (!drm_mm_node_allocated(&vma->node))
			continue;

3262
		if (i915_vma_is_pinned(vma)) {
3263 3264 3265 3266
			DRM_DEBUG("can not change the cache level of pinned objects\n");
			return -EBUSY;
		}

3267 3268 3269 3270 3271 3272 3273 3274 3275 3276 3277 3278
		if (i915_gem_valid_gtt_space(vma, cache_level))
			continue;

		ret = i915_vma_unbind(vma);
		if (ret)
			return ret;

		/* As unbinding may affect other elements in the
		 * obj->vma_list (due to side-effects from retiring
		 * an active vma), play safe and restart the iterator.
		 */
		goto restart;
3279 3280
	}

3281 3282 3283 3284 3285 3286 3287
	/* We can reuse the existing drm_mm nodes but need to change the
	 * cache-level on the PTE. We could simply unbind them all and
	 * rebind with the correct cache-level on next use. However since
	 * we already have a valid slot, dma mapping, pages etc, we may as
	 * rewrite the PTE in the belief that doing so tramples upon less
	 * state and so involves less work.
	 */
3288
	if (obj->bind_count) {
3289 3290 3291 3292
		/* Before we change the PTE, the GPU must not be accessing it.
		 * If we wait upon the object, we know that all the bound
		 * VMA are no longer active.
		 */
3293 3294 3295 3296 3297 3298
		ret = i915_gem_object_wait(obj,
					   I915_WAIT_INTERRUPTIBLE |
					   I915_WAIT_LOCKED |
					   I915_WAIT_ALL,
					   MAX_SCHEDULE_TIMEOUT,
					   NULL);
3299 3300 3301
		if (ret)
			return ret;

3302 3303
		if (!HAS_LLC(to_i915(obj->base.dev)) &&
		    cache_level != I915_CACHE_NONE) {
3304 3305 3306 3307 3308 3309 3310 3311 3312 3313 3314 3315 3316 3317 3318 3319
			/* Access to snoopable pages through the GTT is
			 * incoherent and on some machines causes a hard
			 * lockup. Relinquish the CPU mmaping to force
			 * userspace to refault in the pages and we can
			 * then double check if the GTT mapping is still
			 * valid for that pointer access.
			 */
			i915_gem_release_mmap(obj);

			/* As we no longer need a fence for GTT access,
			 * we can relinquish it now (and so prevent having
			 * to steal a fence from someone else on the next
			 * fence request). Note GPU activity would have
			 * dropped the fence as all snoopable access is
			 * supposed to be linear.
			 */
3320 3321 3322 3323 3324
			list_for_each_entry(vma, &obj->vma_list, obj_link) {
				ret = i915_vma_put_fence(vma);
				if (ret)
					return ret;
			}
3325 3326 3327 3328 3329 3330 3331 3332
		} else {
			/* We either have incoherent backing store and
			 * so no GTT access or the architecture is fully
			 * coherent. In such cases, existing GTT mmaps
			 * ignore the cache bit in the PTE and we can
			 * rewrite it without confusing the GPU or having
			 * to force userspace to fault back in its mmaps.
			 */
3333 3334
		}

3335
		list_for_each_entry(vma, &obj->vma_list, obj_link) {
3336 3337 3338 3339 3340 3341 3342
			if (!drm_mm_node_allocated(&vma->node))
				continue;

			ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
			if (ret)
				return ret;
		}
3343 3344
	}

3345 3346 3347 3348
	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU &&
	    cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
		obj->cache_dirty = true;

3349
	list_for_each_entry(vma, &obj->vma_list, obj_link)
3350 3351 3352
		vma->node.color = cache_level;
	obj->cache_level = cache_level;

3353 3354 3355
	return 0;
}

B
Ben Widawsky 已提交
3356 3357
int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file)
3358
{
B
Ben Widawsky 已提交
3359
	struct drm_i915_gem_caching *args = data;
3360
	struct drm_i915_gem_object *obj;
3361
	int err = 0;
3362

3363 3364 3365 3366 3367 3368
	rcu_read_lock();
	obj = i915_gem_object_lookup_rcu(file, args->handle);
	if (!obj) {
		err = -ENOENT;
		goto out;
	}
3369

3370 3371 3372 3373 3374 3375
	switch (obj->cache_level) {
	case I915_CACHE_LLC:
	case I915_CACHE_L3_LLC:
		args->caching = I915_CACHING_CACHED;
		break;

3376 3377 3378 3379
	case I915_CACHE_WT:
		args->caching = I915_CACHING_DISPLAY;
		break;

3380 3381 3382 3383
	default:
		args->caching = I915_CACHING_NONE;
		break;
	}
3384 3385 3386
out:
	rcu_read_unlock();
	return err;
3387 3388
}

B
Ben Widawsky 已提交
3389 3390
int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
			       struct drm_file *file)
3391
{
3392
	struct drm_i915_private *i915 = to_i915(dev);
B
Ben Widawsky 已提交
3393
	struct drm_i915_gem_caching *args = data;
3394 3395 3396 3397
	struct drm_i915_gem_object *obj;
	enum i915_cache_level level;
	int ret;

B
Ben Widawsky 已提交
3398 3399
	switch (args->caching) {
	case I915_CACHING_NONE:
3400 3401
		level = I915_CACHE_NONE;
		break;
B
Ben Widawsky 已提交
3402
	case I915_CACHING_CACHED:
3403 3404 3405 3406 3407 3408
		/*
		 * Due to a HW issue on BXT A stepping, GPU stores via a
		 * snooped mapping may leave stale data in a corresponding CPU
		 * cacheline, whereas normally such cachelines would get
		 * invalidated.
		 */
3409
		if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
3410 3411
			return -ENODEV;

3412 3413
		level = I915_CACHE_LLC;
		break;
3414
	case I915_CACHING_DISPLAY:
3415
		level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
3416
		break;
3417 3418 3419 3420
	default:
		return -EINVAL;
	}

B
Ben Widawsky 已提交
3421 3422
	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
3423
		return ret;
B
Ben Widawsky 已提交
3424

3425 3426
	obj = i915_gem_object_lookup(file, args->handle);
	if (!obj) {
3427 3428 3429 3430 3431
		ret = -ENOENT;
		goto unlock;
	}

	ret = i915_gem_object_set_cache_level(obj, level);
3432
	i915_gem_object_put(obj);
3433 3434 3435 3436 3437
unlock:
	mutex_unlock(&dev->struct_mutex);
	return ret;
}

3438
/*
3439 3440 3441
 * Prepare buffer for display plane (scanout, cursors, etc).
 * Can be called from an uninterruptible phase (modesetting) and allows
 * any flushes to be pipelined (for pageflips).
3442
 */
C
Chris Wilson 已提交
3443
struct i915_vma *
3444 3445
i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
				     u32 alignment,
3446
				     const struct i915_ggtt_view *view)
3447
{
C
Chris Wilson 已提交
3448
	struct i915_vma *vma;
3449
	u32 old_read_domains, old_write_domain;
3450 3451
	int ret;

3452 3453
	lockdep_assert_held(&obj->base.dev->struct_mutex);

3454 3455 3456
	/* Mark the pin_display early so that we account for the
	 * display coherency whilst setting up the cache domains.
	 */
3457
	obj->pin_display++;
3458

3459 3460 3461 3462 3463 3464 3465 3466 3467
	/* The display engine is not coherent with the LLC cache on gen6.  As
	 * a result, we make sure that the pinning that is about to occur is
	 * done with uncached PTEs. This is lowest common denominator for all
	 * chipsets.
	 *
	 * However for gen6+, we could do better by using the GFDT bit instead
	 * of uncaching, which would allow us to flush all the LLC-cached data
	 * with that bit in the PTE to main memory with just one PIPE_CONTROL.
	 */
3468
	ret = i915_gem_object_set_cache_level(obj,
3469 3470
					      HAS_WT(to_i915(obj->base.dev)) ?
					      I915_CACHE_WT : I915_CACHE_NONE);
C
Chris Wilson 已提交
3471 3472
	if (ret) {
		vma = ERR_PTR(ret);
3473
		goto err_unpin_display;
C
Chris Wilson 已提交
3474
	}
3475

3476 3477
	/* As the user may map the buffer once pinned in the display plane
	 * (e.g. libkms for the bootup splash), we have to ensure that we
3478 3479 3480 3481
	 * always use map_and_fenceable for all scanout buffers. However,
	 * it may simply be too big to fit into mappable, in which case
	 * put it anyway and hope that userspace can cope (but always first
	 * try to preserve the existing ABI).
3482
	 */
3483 3484 3485 3486
	vma = ERR_PTR(-ENOSPC);
	if (view->type == I915_GGTT_VIEW_NORMAL)
		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
					       PIN_MAPPABLE | PIN_NONBLOCK);
3487 3488 3489 3490 3491 3492 3493 3494 3495 3496 3497 3498 3499 3500 3501 3502
	if (IS_ERR(vma)) {
		struct drm_i915_private *i915 = to_i915(obj->base.dev);
		unsigned int flags;

		/* Valleyview is definitely limited to scanning out the first
		 * 512MiB. Lets presume this behaviour was inherited from the
		 * g4x display engine and that all earlier gen are similarly
		 * limited. Testing suggests that it is a little more
		 * complicated than this. For example, Cherryview appears quite
		 * happy to scanout from anywhere within its global aperture.
		 */
		flags = 0;
		if (HAS_GMCH_DISPLAY(i915))
			flags = PIN_MAPPABLE;
		vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
	}
C
Chris Wilson 已提交
3503
	if (IS_ERR(vma))
3504
		goto err_unpin_display;
3505

3506 3507
	vma->display_alignment = max_t(u64, vma->display_alignment, alignment);

3508 3509 3510 3511 3512
	/* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
	if (obj->cache_dirty) {
		i915_gem_clflush_object(obj, true);
		intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
	}
3513

3514
	old_write_domain = obj->base.write_domain;
3515
	old_read_domains = obj->base.read_domains;
3516 3517 3518 3519

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3520
	obj->base.write_domain = 0;
3521
	obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3522 3523 3524

	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
3525
					    old_write_domain);
3526

C
Chris Wilson 已提交
3527
	return vma;
3528 3529

err_unpin_display:
3530
	obj->pin_display--;
C
Chris Wilson 已提交
3531
	return vma;
3532 3533 3534
}

void
C
Chris Wilson 已提交
3535
i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
3536
{
3537
	lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
3538

C
Chris Wilson 已提交
3539
	if (WARN_ON(vma->obj->pin_display == 0))
3540 3541
		return;

3542 3543
	if (--vma->obj->pin_display == 0)
		vma->display_alignment = 0;
3544

3545 3546 3547 3548
	/* Bump the LRU to try and avoid premature eviction whilst flipping  */
	if (!i915_vma_is_active(vma))
		list_move_tail(&vma->vm_link, &vma->vm->inactive_list);

C
Chris Wilson 已提交
3549
	i915_vma_unpin(vma);
3550 3551
}

3552 3553
/**
 * Moves a single object to the CPU read, and possibly write domain.
3554 3555
 * @obj: object to act on
 * @write: requesting write or read-only access
3556 3557 3558 3559
 *
 * This function returns when the move is complete, including waiting on
 * flushes to occur.
 */
3560
int
3561
i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
3562
{
C
Chris Wilson 已提交
3563
	uint32_t old_write_domain, old_read_domains;
3564 3565
	int ret;

3566
	lockdep_assert_held(&obj->base.dev->struct_mutex);
3567

3568 3569 3570 3571 3572 3573
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_LOCKED |
				   (write ? I915_WAIT_ALL : 0),
				   MAX_SCHEDULE_TIMEOUT,
				   NULL);
3574 3575 3576
	if (ret)
		return ret;

3577 3578 3579
	if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
		return 0;

3580
	i915_gem_object_flush_gtt_write_domain(obj);
3581

3582 3583
	old_write_domain = obj->base.write_domain;
	old_read_domains = obj->base.read_domains;
C
Chris Wilson 已提交
3584

3585
	/* Flush the CPU cache if it's still invalid. */
3586
	if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
3587
		i915_gem_clflush_object(obj, false);
3588

3589
		obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
3590 3591 3592 3593 3594
	}

	/* It should now be out of any other write domains, and we can update
	 * the domain values for our changes.
	 */
3595
	GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
3596 3597 3598 3599 3600

	/* If we're writing through the CPU, then the GPU read domains will
	 * need to be invalidated at next use.
	 */
	if (write) {
3601 3602
		obj->base.read_domains = I915_GEM_DOMAIN_CPU;
		obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3603
	}
3604

C
Chris Wilson 已提交
3605 3606 3607 3608
	trace_i915_gem_object_change_domain(obj,
					    old_read_domains,
					    old_write_domain);

3609 3610 3611
	return 0;
}

3612 3613 3614
/* Throttle our rendering by waiting until the ring has completed our requests
 * emitted over 20 msec ago.
 *
3615 3616 3617 3618
 * Note that if we were to use the current jiffies each time around the loop,
 * we wouldn't escape the function with any frames outstanding if the time to
 * render a frame was over 20ms.
 *
3619 3620 3621
 * This should get us reasonable parallelism between CPU and GPU but also
 * relatively low latency when blocking on a particular request to finish.
 */
3622
static int
3623
i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
3624
{
3625
	struct drm_i915_private *dev_priv = to_i915(dev);
3626
	struct drm_i915_file_private *file_priv = file->driver_priv;
3627
	unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
3628
	struct drm_i915_gem_request *request, *target = NULL;
3629
	long ret;
3630

3631 3632 3633
	/* ABI: return -EIO if already wedged */
	if (i915_terminally_wedged(&dev_priv->gpu_error))
		return -EIO;
3634

3635
	spin_lock(&file_priv->mm.lock);
3636
	list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
3637 3638
		if (time_after_eq(request->emitted_jiffies, recent_enough))
			break;
3639

3640 3641 3642 3643 3644 3645 3646
		/*
		 * Note that the request might not have been submitted yet.
		 * In which case emitted_jiffies will be zero.
		 */
		if (!request->emitted_jiffies)
			continue;

3647
		target = request;
3648
	}
3649
	if (target)
3650
		i915_gem_request_get(target);
3651
	spin_unlock(&file_priv->mm.lock);
3652

3653
	if (target == NULL)
3654
		return 0;
3655

3656 3657 3658
	ret = i915_wait_request(target,
				I915_WAIT_INTERRUPTIBLE,
				MAX_SCHEDULE_TIMEOUT);
3659
	i915_gem_request_put(target);
3660

3661
	return ret < 0 ? ret : 0;
3662 3663
}

C
Chris Wilson 已提交
3664
struct i915_vma *
3665 3666
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
			 const struct i915_ggtt_view *view,
3667
			 u64 size,
3668 3669
			 u64 alignment,
			 u64 flags)
3670
{
3671 3672
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
	struct i915_address_space *vm = &dev_priv->ggtt.base;
3673 3674
	struct i915_vma *vma;
	int ret;
3675

3676 3677
	lockdep_assert_held(&obj->base.dev->struct_mutex);

C
Chris Wilson 已提交
3678
	vma = i915_gem_obj_lookup_or_create_vma(obj, vm, view);
3679
	if (IS_ERR(vma))
C
Chris Wilson 已提交
3680
		return vma;
3681 3682 3683 3684

	if (i915_vma_misplaced(vma, size, alignment, flags)) {
		if (flags & PIN_NONBLOCK &&
		    (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
C
Chris Wilson 已提交
3685
			return ERR_PTR(-ENOSPC);
3686

3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717 3718 3719 3720 3721
		if (flags & PIN_MAPPABLE) {
			u32 fence_size;

			fence_size = i915_gem_get_ggtt_size(dev_priv, vma->size,
							    i915_gem_object_get_tiling(obj));
			/* If the required space is larger than the available
			 * aperture, we will not able to find a slot for the
			 * object and unbinding the object now will be in
			 * vain. Worse, doing so may cause us to ping-pong
			 * the object in and out of the Global GTT and
			 * waste a lot of cycles under the mutex.
			 */
			if (fence_size > dev_priv->ggtt.mappable_end)
				return ERR_PTR(-E2BIG);

			/* If NONBLOCK is set the caller is optimistically
			 * trying to cache the full object within the mappable
			 * aperture, and *must* have a fallback in place for
			 * situations where we cannot bind the object. We
			 * can be a little more lax here and use the fallback
			 * more often to avoid costly migrations of ourselves
			 * and other objects within the aperture.
			 *
			 * Half-the-aperture is used as a simple heuristic.
			 * More interesting would to do search for a free
			 * block prior to making the commitment to unbind.
			 * That caters for the self-harm case, and with a
			 * little more heuristics (e.g. NOFAULT, NOEVICT)
			 * we could try to minimise harm to others.
			 */
			if (flags & PIN_NONBLOCK &&
			    fence_size > dev_priv->ggtt.mappable_end / 2)
				return ERR_PTR(-ENOSPC);
		}

3722 3723
		WARN(i915_vma_is_pinned(vma),
		     "bo is already pinned in ggtt with incorrect alignment:"
3724 3725 3726
		     " offset=%08x, req.alignment=%llx,"
		     " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
		     i915_ggtt_offset(vma), alignment,
3727
		     !!(flags & PIN_MAPPABLE),
3728
		     i915_vma_is_map_and_fenceable(vma));
3729 3730
		ret = i915_vma_unbind(vma);
		if (ret)
C
Chris Wilson 已提交
3731
			return ERR_PTR(ret);
3732 3733
	}

C
Chris Wilson 已提交
3734 3735 3736
	ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
	if (ret)
		return ERR_PTR(ret);
3737

C
Chris Wilson 已提交
3738
	return vma;
3739 3740
}

3741
static __always_inline unsigned int __busy_read_flag(unsigned int id)
3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755
{
	/* Note that we could alias engines in the execbuf API, but
	 * that would be very unwise as it prevents userspace from
	 * fine control over engine selection. Ahem.
	 *
	 * This should be something like EXEC_MAX_ENGINE instead of
	 * I915_NUM_ENGINES.
	 */
	BUILD_BUG_ON(I915_NUM_ENGINES > 16);
	return 0x10000 << id;
}

static __always_inline unsigned int __busy_write_id(unsigned int id)
{
3756 3757 3758 3759 3760 3761 3762 3763 3764
	/* The uABI guarantees an active writer is also amongst the read
	 * engines. This would be true if we accessed the activity tracking
	 * under the lock, but as we perform the lookup of the object and
	 * its activity locklessly we can not guarantee that the last_write
	 * being active implies that we have set the same engine flag from
	 * last_read - hence we always set both read and write busy for
	 * last_write.
	 */
	return id | __busy_read_flag(id);
3765 3766
}

3767
static __always_inline unsigned int
3768
__busy_set_if_active(const struct dma_fence *fence,
3769 3770
		     unsigned int (*flag)(unsigned int id))
{
3771
	struct drm_i915_gem_request *rq;
3772

3773 3774 3775 3776
	/* We have to check the current hw status of the fence as the uABI
	 * guarantees forward progress. We could rely on the idle worker
	 * to eventually flush us, but to minimise latency just ask the
	 * hardware.
3777
	 *
3778
	 * Note we only report on the status of native fences.
3779
	 */
3780 3781 3782 3783 3784 3785 3786 3787 3788
	if (!dma_fence_is_i915(fence))
		return 0;

	/* opencode to_request() in order to avoid const warnings */
	rq = container_of(fence, struct drm_i915_gem_request, fence);
	if (i915_gem_request_completed(rq))
		return 0;

	return flag(rq->engine->exec_id);
3789 3790
}

3791
static __always_inline unsigned int
3792
busy_check_reader(const struct dma_fence *fence)
3793
{
3794
	return __busy_set_if_active(fence, __busy_read_flag);
3795 3796
}

3797
static __always_inline unsigned int
3798
busy_check_writer(const struct dma_fence *fence)
3799
{
3800 3801 3802 3803
	if (!fence)
		return 0;

	return __busy_set_if_active(fence, __busy_write_id);
3804 3805
}

3806 3807
int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3808
		    struct drm_file *file)
3809 3810
{
	struct drm_i915_gem_busy *args = data;
3811
	struct drm_i915_gem_object *obj;
3812 3813
	struct reservation_object_list *list;
	unsigned int seq;
3814
	int err;
3815

3816
	err = -ENOENT;
3817 3818
	rcu_read_lock();
	obj = i915_gem_object_lookup_rcu(file, args->handle);
3819
	if (!obj)
3820
		goto out;
3821

3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3836 3837 3838 3839
	/* A discrepancy here is that we do not report the status of
	 * non-i915 fences, i.e. even though we may report the object as idle,
	 * a call to set-domain may still stall waiting for foreign rendering.
	 * This also means that wait-ioctl may report an object as busy,
	 * where busy-ioctl considers it idle.
	 *
	 * We trade the ability to warn of foreign fences to report on which
	 * i915 engines are active for the object.
	 *
	 * Alternatively, we can trade that extra information on read/write
	 * activity with
	 *	args->busy =
	 *		!reservation_object_test_signaled_rcu(obj->resv, true);
	 * to report the overall busyness. This is what the wait-ioctl does.
	 *
	 */
retry:
	seq = raw_read_seqcount(&obj->resv->seq);
3840

3841 3842
	/* Translate the exclusive fence to the READ *and* WRITE engine */
	args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
3843

3844 3845 3846 3847
	/* Translate shared fences to READ set of engines */
	list = rcu_dereference(obj->resv->fence);
	if (list) {
		unsigned int shared_count = list->shared_count, i;
3848

3849 3850 3851 3852 3853 3854
		for (i = 0; i < shared_count; ++i) {
			struct dma_fence *fence =
				rcu_dereference(list->shared[i]);

			args->busy |= busy_check_reader(fence);
		}
3855
	}
3856

3857 3858 3859 3860
	if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
		goto retry;

	err = 0;
3861 3862 3863
out:
	rcu_read_unlock();
	return err;
3864 3865 3866 3867 3868 3869
}

int
i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
			struct drm_file *file_priv)
{
3870
	return i915_gem_ring_throttle(dev, file_priv);
3871 3872
}

3873 3874 3875 3876
int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
3877
	struct drm_i915_private *dev_priv = to_i915(dev);
3878
	struct drm_i915_gem_madvise *args = data;
3879
	struct drm_i915_gem_object *obj;
3880
	int err;
3881 3882 3883 3884 3885 3886 3887 3888 3889

	switch (args->madv) {
	case I915_MADV_DONTNEED:
	case I915_MADV_WILLNEED:
	    break;
	default:
	    return -EINVAL;
	}

3890
	obj = i915_gem_object_lookup(file_priv, args->handle);
3891 3892 3893 3894 3895 3896
	if (!obj)
		return -ENOENT;

	err = mutex_lock_interruptible(&obj->mm.lock);
	if (err)
		goto out;
3897

C
Chris Wilson 已提交
3898
	if (obj->mm.pages &&
3899
	    i915_gem_object_is_tiled(obj) &&
3900
	    dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
3901 3902
		if (obj->mm.madv == I915_MADV_WILLNEED) {
			GEM_BUG_ON(!obj->mm.quirked);
C
Chris Wilson 已提交
3903
			__i915_gem_object_unpin_pages(obj);
3904 3905 3906
			obj->mm.quirked = false;
		}
		if (args->madv == I915_MADV_WILLNEED) {
3907
			GEM_BUG_ON(obj->mm.quirked);
C
Chris Wilson 已提交
3908
			__i915_gem_object_pin_pages(obj);
3909 3910
			obj->mm.quirked = true;
		}
3911 3912
	}

C
Chris Wilson 已提交
3913 3914
	if (obj->mm.madv != __I915_MADV_PURGED)
		obj->mm.madv = args->madv;
3915

C
Chris Wilson 已提交
3916
	/* if the object is no longer attached, discard its backing storage */
C
Chris Wilson 已提交
3917
	if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
3918 3919
		i915_gem_object_truncate(obj);

C
Chris Wilson 已提交
3920
	args->retained = obj->mm.madv != __I915_MADV_PURGED;
3921
	mutex_unlock(&obj->mm.lock);
C
Chris Wilson 已提交
3922

3923
out:
3924
	i915_gem_object_put(obj);
3925
	return err;
3926 3927
}

3928 3929 3930 3931 3932 3933 3934 3935 3936 3937
static void
frontbuffer_retire(struct i915_gem_active *active,
		   struct drm_i915_gem_request *request)
{
	struct drm_i915_gem_object *obj =
		container_of(active, typeof(*obj), frontbuffer_write);

	intel_fb_obj_flush(obj, true, ORIGIN_CS);
}

3938 3939
void i915_gem_object_init(struct drm_i915_gem_object *obj,
			  const struct drm_i915_gem_object_ops *ops)
3940
{
3941 3942
	mutex_init(&obj->mm.lock);

3943
	INIT_LIST_HEAD(&obj->global_link);
3944
	INIT_LIST_HEAD(&obj->userfault_link);
3945
	INIT_LIST_HEAD(&obj->obj_exec_link);
B
Ben Widawsky 已提交
3946
	INIT_LIST_HEAD(&obj->vma_list);
3947
	INIT_LIST_HEAD(&obj->batch_pool_link);
3948

3949 3950
	obj->ops = ops;

3951 3952 3953
	reservation_object_init(&obj->__builtin_resv);
	obj->resv = &obj->__builtin_resv;

3954
	obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
3955
	init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
C
Chris Wilson 已提交
3956 3957 3958 3959

	obj->mm.madv = I915_MADV_WILLNEED;
	INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
	mutex_init(&obj->mm.get_page.lock);
3960

3961
	i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
3962 3963
}

3964
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
3965 3966
	.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
		 I915_GEM_OBJECT_IS_SHRINKABLE,
3967 3968 3969 3970
	.get_pages = i915_gem_object_get_pages_gtt,
	.put_pages = i915_gem_object_put_pages_gtt,
};

3971 3972 3973 3974 3975
/* Note we don't consider signbits :| */
#define overflows_type(x, T) \
	(sizeof(x) > sizeof(T) && (x) >> (sizeof(T) * BITS_PER_BYTE))

struct drm_i915_gem_object *
3976
i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
3977
{
3978
	struct drm_i915_gem_object *obj;
3979
	struct address_space *mapping;
D
Daniel Vetter 已提交
3980
	gfp_t mask;
3981
	int ret;
3982

3983 3984 3985 3986 3987 3988 3989 3990 3991 3992 3993
	/* There is a prevalence of the assumption that we fit the object's
	 * page count inside a 32bit _signed_ variable. Let's document this and
	 * catch if we ever need to fix it. In the meantime, if you do spot
	 * such a local variable, please consider fixing!
	 */
	if (WARN_ON(size >> PAGE_SHIFT > INT_MAX))
		return ERR_PTR(-E2BIG);

	if (overflows_type(size, obj->base.size))
		return ERR_PTR(-E2BIG);

3994
	obj = i915_gem_object_alloc(dev_priv);
3995
	if (obj == NULL)
3996
		return ERR_PTR(-ENOMEM);
3997

3998
	ret = drm_gem_object_init(&dev_priv->drm, &obj->base, size);
3999 4000
	if (ret)
		goto fail;
4001

4002
	mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4003
	if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) {
4004 4005 4006 4007 4008
		/* 965gm cannot relocate objects above 4GiB. */
		mask &= ~__GFP_HIGHMEM;
		mask |= __GFP_DMA32;
	}

4009
	mapping = obj->base.filp->f_mapping;
4010
	mapping_set_gfp_mask(mapping, mask);
4011

4012
	i915_gem_object_init(obj, &i915_gem_object_ops);
4013

4014 4015
	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4016

4017
	if (HAS_LLC(dev_priv)) {
4018
		/* On some devices, we can have the GPU use the LLC (the CPU
4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033
		 * cache) for about a 10% performance improvement
		 * compared to uncached.  Graphics requests other than
		 * display scanout are coherent with the CPU in
		 * accessing this cache.  This means in this mode we
		 * don't need to clflush on the CPU side, and on the
		 * GPU side we only need to flush internal caches to
		 * get data visible to the CPU.
		 *
		 * However, we maintain the display planes as UC, and so
		 * need to rebind when first used as such.
		 */
		obj->cache_level = I915_CACHE_LLC;
	} else
		obj->cache_level = I915_CACHE_NONE;

4034 4035
	trace_i915_gem_object_create(obj);

4036
	return obj;
4037 4038 4039 4040

fail:
	i915_gem_object_free(obj);
	return ERR_PTR(ret);
4041 4042
}

4043 4044 4045 4046 4047 4048 4049 4050
static bool discard_backing_storage(struct drm_i915_gem_object *obj)
{
	/* If we are the last user of the backing storage (be it shmemfs
	 * pages or stolen etc), we know that the pages are going to be
	 * immediately released. In this case, we can then skip copying
	 * back the contents from the GPU.
	 */

C
Chris Wilson 已提交
4051
	if (obj->mm.madv != I915_MADV_WILLNEED)
4052 4053 4054 4055 4056 4057 4058 4059 4060 4061 4062 4063 4064 4065 4066
		return false;

	if (obj->base.filp == NULL)
		return true;

	/* At first glance, this looks racy, but then again so would be
	 * userspace racing mmap against close. However, the first external
	 * reference to the filp can only be obtained through the
	 * i915_gem_mmap_ioctl() which safeguards us against the user
	 * acquiring such a reference whilst we are in the middle of
	 * freeing the object.
	 */
	return atomic_long_read(&obj->base.filp->f_count) == 1;
}

4067 4068
static void __i915_gem_free_objects(struct drm_i915_private *i915,
				    struct llist_node *freed)
4069
{
4070
	struct drm_i915_gem_object *obj, *on;
4071

4072 4073 4074 4075 4076 4077 4078 4079 4080 4081 4082 4083 4084 4085 4086
	mutex_lock(&i915->drm.struct_mutex);
	intel_runtime_pm_get(i915);
	llist_for_each_entry(obj, freed, freed) {
		struct i915_vma *vma, *vn;

		trace_i915_gem_object_destroy(obj);

		GEM_BUG_ON(i915_gem_object_is_active(obj));
		list_for_each_entry_safe(vma, vn,
					 &obj->vma_list, obj_link) {
			GEM_BUG_ON(!i915_vma_is_ggtt(vma));
			GEM_BUG_ON(i915_vma_is_active(vma));
			vma->flags &= ~I915_VMA_PIN_MASK;
			i915_vma_close(vma);
		}
4087 4088
		GEM_BUG_ON(!list_empty(&obj->vma_list));
		GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
4089

4090
		list_del(&obj->global_link);
4091 4092 4093 4094 4095 4096 4097 4098 4099 4100
	}
	intel_runtime_pm_put(i915);
	mutex_unlock(&i915->drm.struct_mutex);

	llist_for_each_entry_safe(obj, on, freed, freed) {
		GEM_BUG_ON(obj->bind_count);
		GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));

		if (obj->ops->release)
			obj->ops->release(obj);
4101

4102 4103
		if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
			atomic_set(&obj->mm.pages_pin_count, 0);
4104
		__i915_gem_object_put_pages(obj, I915_MM_NORMAL);
4105 4106 4107 4108 4109
		GEM_BUG_ON(obj->mm.pages);

		if (obj->base.import_attach)
			drm_prime_gem_destroy(&obj->base, NULL);

4110
		reservation_object_fini(&obj->__builtin_resv);
4111 4112 4113 4114 4115 4116 4117 4118 4119 4120 4121 4122 4123 4124 4125 4126 4127 4128 4129 4130 4131 4132
		drm_gem_object_release(&obj->base);
		i915_gem_info_remove_obj(i915, obj->base.size);

		kfree(obj->bit_17);
		i915_gem_object_free(obj);
	}
}

static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
{
	struct llist_node *freed;

	freed = llist_del_all(&i915->mm.free_list);
	if (unlikely(freed))
		__i915_gem_free_objects(i915, freed);
}

static void __i915_gem_free_work(struct work_struct *work)
{
	struct drm_i915_private *i915 =
		container_of(work, struct drm_i915_private, mm.free_work);
	struct llist_node *freed;
4133

4134 4135 4136 4137 4138 4139 4140
	/* All file-owned VMA should have been released by this point through
	 * i915_gem_close_object(), or earlier by i915_gem_context_close().
	 * However, the object may also be bound into the global GTT (e.g.
	 * older GPUs without per-process support, or for direct access through
	 * the GTT either for the user or for scanout). Those VMA still need to
	 * unbound now.
	 */
4141

4142 4143 4144
	while ((freed = llist_del_all(&i915->mm.free_list)))
		__i915_gem_free_objects(i915, freed);
}
4145

4146 4147 4148 4149 4150 4151 4152 4153 4154 4155 4156 4157 4158 4159
static void __i915_gem_free_object_rcu(struct rcu_head *head)
{
	struct drm_i915_gem_object *obj =
		container_of(head, typeof(*obj), rcu);
	struct drm_i915_private *i915 = to_i915(obj->base.dev);

	/* We can't simply use call_rcu() from i915_gem_free_object()
	 * as we need to block whilst unbinding, and the call_rcu
	 * task may be called from softirq context. So we take a
	 * detour through a worker.
	 */
	if (llist_add(&obj->freed, &i915->mm.free_list))
		schedule_work(&i915->mm.free_work);
}
4160

4161 4162 4163
void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
	struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
C
Chris Wilson 已提交
4164

4165 4166 4167
	if (obj->mm.quirked)
		__i915_gem_object_unpin_pages(obj);

4168
	if (discard_backing_storage(obj))
C
Chris Wilson 已提交
4169
		obj->mm.madv = I915_MADV_DONTNEED;
4170

4171 4172 4173 4174 4175 4176
	/* Before we free the object, make sure any pure RCU-only
	 * read-side critical sections are complete, e.g.
	 * i915_gem_busy_ioctl(). For the corresponding synchronized
	 * lookup see i915_gem_object_lookup_rcu().
	 */
	call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
4177 4178
}

4179 4180 4181 4182 4183 4184 4185 4186 4187 4188 4189
void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
{
	lockdep_assert_held(&obj->base.dev->struct_mutex);

	GEM_BUG_ON(i915_gem_object_has_active_reference(obj));
	if (i915_gem_object_is_active(obj))
		i915_gem_object_set_active_reference(obj);
	else
		i915_gem_object_put(obj);
}

4190 4191 4192 4193 4194 4195 4196 4197 4198
static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;

	for_each_engine(engine, dev_priv, id)
		GEM_BUG_ON(engine->last_context != dev_priv->kernel_context);
}

4199
int i915_gem_suspend(struct drm_i915_private *dev_priv)
4200
{
4201
	struct drm_device *dev = &dev_priv->drm;
4202
	int ret;
4203

4204 4205
	intel_suspend_gt_powersave(dev_priv);

4206
	mutex_lock(&dev->struct_mutex);
4207 4208 4209 4210 4211 4212 4213 4214 4215 4216 4217 4218 4219

	/* We have to flush all the executing contexts to main memory so
	 * that they can saved in the hibernation image. To ensure the last
	 * context image is coherent, we have to switch away from it. That
	 * leaves the dev_priv->kernel_context still active when
	 * we actually suspend, and its image in memory may not match the GPU
	 * state. Fortunately, the kernel_context is disposable and we do
	 * not rely on its state.
	 */
	ret = i915_gem_switch_to_kernel_context(dev_priv);
	if (ret)
		goto err;

4220 4221 4222
	ret = i915_gem_wait_for_idle(dev_priv,
				     I915_WAIT_INTERRUPTIBLE |
				     I915_WAIT_LOCKED);
4223
	if (ret)
4224
		goto err;
4225

4226
	i915_gem_retire_requests(dev_priv);
4227
	GEM_BUG_ON(dev_priv->gt.active_requests);
4228

4229
	assert_kernel_context_is_current(dev_priv);
4230
	i915_gem_context_lost(dev_priv);
4231 4232
	mutex_unlock(&dev->struct_mutex);

4233
	cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4234 4235
	cancel_delayed_work_sync(&dev_priv->gt.retire_work);
	flush_delayed_work(&dev_priv->gt.idle_work);
4236
	flush_work(&dev_priv->mm.free_work);
4237

4238 4239 4240
	/* Assert that we sucessfully flushed all the work and
	 * reset the GPU back to its idle, low power state.
	 */
4241
	WARN_ON(dev_priv->gt.awake);
4242
	WARN_ON(!intel_execlists_idle(dev_priv));
4243

4244 4245 4246 4247 4248 4249 4250 4251 4252 4253 4254 4255 4256 4257 4258 4259 4260 4261 4262
	/*
	 * Neither the BIOS, ourselves or any other kernel
	 * expects the system to be in execlists mode on startup,
	 * so we need to reset the GPU back to legacy mode. And the only
	 * known way to disable logical contexts is through a GPU reset.
	 *
	 * So in order to leave the system in a known default configuration,
	 * always reset the GPU upon unload and suspend. Afterwards we then
	 * clean up the GEM state tracking, flushing off the requests and
	 * leaving the system in a known idle state.
	 *
	 * Note that is of the upmost importance that the GPU is idle and
	 * all stray writes are flushed *before* we dismantle the backing
	 * storage for the pinned objects.
	 *
	 * However, since we are uncertain that resetting the GPU on older
	 * machines is a good idea, we don't - just in case it leaves the
	 * machine in an unusable condition.
	 */
4263
	if (HAS_HW_CONTEXTS(dev_priv)) {
4264 4265 4266 4267
		int reset = intel_gpu_reset(dev_priv, ALL_ENGINES);
		WARN_ON(reset && reset != -ENODEV);
	}

4268
	return 0;
4269 4270 4271 4272

err:
	mutex_unlock(&dev->struct_mutex);
	return ret;
4273 4274
}

4275
void i915_gem_resume(struct drm_i915_private *dev_priv)
4276
{
4277
	struct drm_device *dev = &dev_priv->drm;
4278

4279 4280
	WARN_ON(dev_priv->gt.awake);

4281
	mutex_lock(&dev->struct_mutex);
4282
	i915_gem_restore_gtt_mappings(dev_priv);
4283 4284 4285 4286 4287

	/* As we didn't flush the kernel context before suspend, we cannot
	 * guarantee that the context image is complete. So let's just reset
	 * it and start again.
	 */
4288
	dev_priv->gt.resume(dev_priv);
4289 4290 4291 4292

	mutex_unlock(&dev->struct_mutex);
}

4293
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
4294
{
4295
	if (INTEL_GEN(dev_priv) < 5 ||
4296 4297 4298 4299 4300 4301
	    dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
		return;

	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
				 DISP_TILE_SURFACE_SWIZZLING);

4302
	if (IS_GEN5(dev_priv))
4303 4304
		return;

4305
	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4306
	if (IS_GEN6(dev_priv))
4307
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4308
	else if (IS_GEN7(dev_priv))
4309
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4310
	else if (IS_GEN8(dev_priv))
B
Ben Widawsky 已提交
4311
		I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4312 4313
	else
		BUG();
4314
}
D
Daniel Vetter 已提交
4315

4316
static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
4317 4318 4319 4320 4321 4322 4323
{
	I915_WRITE(RING_CTL(base), 0);
	I915_WRITE(RING_HEAD(base), 0);
	I915_WRITE(RING_TAIL(base), 0);
	I915_WRITE(RING_START(base), 0);
}

4324
static void init_unused_rings(struct drm_i915_private *dev_priv)
4325
{
4326 4327 4328 4329 4330 4331 4332 4333 4334 4335 4336 4337
	if (IS_I830(dev_priv)) {
		init_unused_ring(dev_priv, PRB1_BASE);
		init_unused_ring(dev_priv, SRB0_BASE);
		init_unused_ring(dev_priv, SRB1_BASE);
		init_unused_ring(dev_priv, SRB2_BASE);
		init_unused_ring(dev_priv, SRB3_BASE);
	} else if (IS_GEN2(dev_priv)) {
		init_unused_ring(dev_priv, SRB0_BASE);
		init_unused_ring(dev_priv, SRB1_BASE);
	} else if (IS_GEN3(dev_priv)) {
		init_unused_ring(dev_priv, PRB1_BASE);
		init_unused_ring(dev_priv, PRB2_BASE);
4338 4339 4340
	}
}

4341
int
4342
i915_gem_init_hw(struct drm_i915_private *dev_priv)
4343
{
4344
	struct intel_engine_cs *engine;
4345
	enum intel_engine_id id;
C
Chris Wilson 已提交
4346
	int ret;
4347

4348 4349
	dev_priv->gt.last_init_time = ktime_get();

4350 4351 4352
	/* Double layer security blanket, see i915_gem_init() */
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

4353
	if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
4354
		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4355

4356
	if (IS_HASWELL(dev_priv))
4357
		I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
4358
			   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4359

4360
	if (HAS_PCH_NOP(dev_priv)) {
4361
		if (IS_IVYBRIDGE(dev_priv)) {
4362 4363 4364
			u32 temp = I915_READ(GEN7_MSG_CTL);
			temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
			I915_WRITE(GEN7_MSG_CTL, temp);
4365
		} else if (INTEL_GEN(dev_priv) >= 7) {
4366 4367 4368 4369
			u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
			temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
			I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
		}
4370 4371
	}

4372
	i915_gem_init_swizzling(dev_priv);
4373

4374 4375 4376 4377 4378 4379
	/*
	 * At least 830 can leave some of the unused rings
	 * "active" (ie. head != tail) after resume which
	 * will prevent c3 entry. Makes sure all unused rings
	 * are totally idle.
	 */
4380
	init_unused_rings(dev_priv);
4381

4382
	BUG_ON(!dev_priv->kernel_context);
4383

4384
	ret = i915_ppgtt_init_hw(dev_priv);
4385 4386 4387 4388 4389 4390
	if (ret) {
		DRM_ERROR("PPGTT enable HW failed %d\n", ret);
		goto out;
	}

	/* Need to do basic initialisation of all rings first: */
4391
	for_each_engine(engine, dev_priv, id) {
4392
		ret = engine->init_hw(engine);
D
Daniel Vetter 已提交
4393
		if (ret)
4394
			goto out;
D
Daniel Vetter 已提交
4395
	}
4396

4397
	intel_mocs_init_l3cc_table(dev_priv);
4398

4399
	/* We can't enable contexts until all firmware is loaded */
4400
	ret = intel_guc_setup(dev_priv);
4401 4402
	if (ret)
		goto out;
4403

4404 4405
out:
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4406
	return ret;
4407 4408
}

4409 4410 4411 4412 4413 4414 4415 4416 4417 4418 4419 4420 4421 4422 4423 4424 4425 4426 4427 4428 4429
bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
{
	if (INTEL_INFO(dev_priv)->gen < 6)
		return false;

	/* TODO: make semaphores and Execlists play nicely together */
	if (i915.enable_execlists)
		return false;

	if (value >= 0)
		return value;

#ifdef CONFIG_INTEL_IOMMU
	/* Enable semaphores on SNB when IO remapping is off */
	if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
		return false;
#endif

	return true;
}

4430
int i915_gem_init(struct drm_i915_private *dev_priv)
4431 4432 4433
{
	int ret;

4434
	mutex_lock(&dev_priv->drm.struct_mutex);
4435

4436
	if (!i915.enable_execlists) {
4437
		dev_priv->gt.resume = intel_legacy_submission_resume;
4438
		dev_priv->gt.cleanup_engine = intel_engine_cleanup;
4439
	} else {
4440
		dev_priv->gt.resume = intel_lr_context_resume;
4441
		dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
4442 4443
	}

4444 4445 4446 4447 4448 4449 4450 4451
	/* This is just a security blanket to placate dragons.
	 * On some systems, we very sporadically observe that the first TLBs
	 * used by the CS may be stale, despite us poking the TLB reset. If
	 * we hold the forcewake during initialisation these problems
	 * just magically go away.
	 */
	intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);

4452
	i915_gem_init_userptr(dev_priv);
4453 4454 4455 4456

	ret = i915_gem_init_ggtt(dev_priv);
	if (ret)
		goto out_unlock;
4457

4458
	ret = i915_gem_context_init(dev_priv);
4459 4460
	if (ret)
		goto out_unlock;
4461

4462
	ret = intel_engines_init(dev_priv);
D
Daniel Vetter 已提交
4463
	if (ret)
4464
		goto out_unlock;
4465

4466
	ret = i915_gem_init_hw(dev_priv);
4467
	if (ret == -EIO) {
4468
		/* Allow engine initialisation to fail by marking the GPU as
4469 4470 4471 4472
		 * wedged. But we only want to do this where the GPU is angry,
		 * for all other failure, such as an allocation failure, bail.
		 */
		DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
4473
		i915_gem_set_wedged(dev_priv);
4474
		ret = 0;
4475
	}
4476 4477

out_unlock:
4478
	intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
4479
	mutex_unlock(&dev_priv->drm.struct_mutex);
4480

4481
	return ret;
4482 4483
}

4484
void
4485
i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
4486
{
4487
	struct intel_engine_cs *engine;
4488
	enum intel_engine_id id;
4489

4490
	for_each_engine(engine, dev_priv, id)
4491
		dev_priv->gt.cleanup_engine(engine);
4492 4493
}

4494 4495 4496
void
i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
4497
	int i;
4498 4499 4500 4501

	if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
	    !IS_CHERRYVIEW(dev_priv))
		dev_priv->num_fence_regs = 32;
4502 4503 4504
	else if (INTEL_INFO(dev_priv)->gen >= 4 ||
		 IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
		 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
4505 4506 4507 4508
		dev_priv->num_fence_regs = 16;
	else
		dev_priv->num_fence_regs = 8;

4509
	if (intel_vgpu_active(dev_priv))
4510 4511 4512 4513
		dev_priv->num_fence_regs =
				I915_READ(vgtif_reg(avail_rs.fence_num));

	/* Initialize fence registers to zero */
4514 4515 4516 4517 4518 4519 4520
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
		struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];

		fence->i915 = dev_priv;
		fence->id = i;
		list_add_tail(&fence->link, &dev_priv->mm.fence_list);
	}
4521
	i915_gem_restore_fences(dev_priv);
4522

4523
	i915_gem_detect_bit_6_swizzle(dev_priv);
4524 4525
}

4526
int
4527
i915_gem_load_init(struct drm_i915_private *dev_priv)
4528
{
4529
	int err = -ENOMEM;
4530

4531 4532
	dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
	if (!dev_priv->objects)
4533 4534
		goto err_out;

4535 4536
	dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
	if (!dev_priv->vmas)
4537 4538
		goto err_objects;

4539 4540 4541 4542 4543
	dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
					SLAB_HWCACHE_ALIGN |
					SLAB_RECLAIM_ACCOUNT |
					SLAB_DESTROY_BY_RCU);
	if (!dev_priv->requests)
4544 4545
		goto err_vmas;

4546 4547 4548 4549 4550 4551
	dev_priv->dependencies = KMEM_CACHE(i915_dependency,
					    SLAB_HWCACHE_ALIGN |
					    SLAB_RECLAIM_ACCOUNT);
	if (!dev_priv->dependencies)
		goto err_requests;

4552 4553
	mutex_lock(&dev_priv->drm.struct_mutex);
	INIT_LIST_HEAD(&dev_priv->gt.timelines);
4554
	err = i915_gem_timeline_init__global(dev_priv);
4555 4556
	mutex_unlock(&dev_priv->drm.struct_mutex);
	if (err)
4557
		goto err_dependencies;
4558

4559
	INIT_LIST_HEAD(&dev_priv->context_list);
4560 4561
	INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
	init_llist_head(&dev_priv->mm.free_list);
C
Chris Wilson 已提交
4562 4563
	INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
	INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4564
	INIT_LIST_HEAD(&dev_priv->mm.fence_list);
4565
	INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
4566
	INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
4567
			  i915_gem_retire_work_handler);
4568
	INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
4569
			  i915_gem_idle_work_handler);
4570
	init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
4571
	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
4572

4573 4574
	dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;

4575
	init_waitqueue_head(&dev_priv->pending_flip_queue);
4576

4577 4578
	dev_priv->mm.interruptible = true;

4579 4580
	atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);

4581
	spin_lock_init(&dev_priv->fb_tracking.lock);
4582 4583 4584

	return 0;

4585 4586
err_dependencies:
	kmem_cache_destroy(dev_priv->dependencies);
4587 4588 4589 4590 4591 4592 4593 4594
err_requests:
	kmem_cache_destroy(dev_priv->requests);
err_vmas:
	kmem_cache_destroy(dev_priv->vmas);
err_objects:
	kmem_cache_destroy(dev_priv->objects);
err_out:
	return err;
4595
}
4596

4597
void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
4598
{
4599 4600
	WARN_ON(!llist_empty(&dev_priv->mm.free_list));

4601 4602 4603 4604 4605
	mutex_lock(&dev_priv->drm.struct_mutex);
	i915_gem_timeline_fini(&dev_priv->gt.global_timeline);
	WARN_ON(!list_empty(&dev_priv->gt.timelines));
	mutex_unlock(&dev_priv->drm.struct_mutex);

4606
	kmem_cache_destroy(dev_priv->dependencies);
4607 4608 4609
	kmem_cache_destroy(dev_priv->requests);
	kmem_cache_destroy(dev_priv->vmas);
	kmem_cache_destroy(dev_priv->objects);
4610 4611 4612

	/* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
	rcu_barrier();
4613 4614
}

4615 4616 4617 4618 4619 4620 4621 4622 4623 4624 4625 4626 4627
int i915_gem_freeze(struct drm_i915_private *dev_priv)
{
	intel_runtime_pm_get(dev_priv);

	mutex_lock(&dev_priv->drm.struct_mutex);
	i915_gem_shrink_all(dev_priv);
	mutex_unlock(&dev_priv->drm.struct_mutex);

	intel_runtime_pm_put(dev_priv);

	return 0;
}

4628 4629 4630
int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
{
	struct drm_i915_gem_object *obj;
4631 4632 4633 4634 4635
	struct list_head *phases[] = {
		&dev_priv->mm.unbound_list,
		&dev_priv->mm.bound_list,
		NULL
	}, **p;
4636 4637 4638 4639 4640 4641 4642 4643 4644 4645

	/* Called just before we write the hibernation image.
	 *
	 * We need to update the domain tracking to reflect that the CPU
	 * will be accessing all the pages to create and restore from the
	 * hibernation, and so upon restoration those pages will be in the
	 * CPU domain.
	 *
	 * To make sure the hibernation image contains the latest state,
	 * we update that state just before writing out the image.
4646 4647 4648
	 *
	 * To try and reduce the hibernation image, we manually shrink
	 * the objects as well.
4649 4650
	 */

4651 4652
	mutex_lock(&dev_priv->drm.struct_mutex);
	i915_gem_shrink(dev_priv, -1UL, I915_SHRINK_UNBOUND);
4653

4654
	for (p = phases; *p; p++) {
4655
		list_for_each_entry(obj, *p, global_link) {
4656 4657 4658
			obj->base.read_domains = I915_GEM_DOMAIN_CPU;
			obj->base.write_domain = I915_GEM_DOMAIN_CPU;
		}
4659
	}
4660
	mutex_unlock(&dev_priv->drm.struct_mutex);
4661 4662 4663 4664

	return 0;
}

4665
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
4666
{
4667
	struct drm_i915_file_private *file_priv = file->driver_priv;
4668
	struct drm_i915_gem_request *request;
4669 4670 4671 4672 4673

	/* Clean up our request list when the client is going away, so that
	 * later retire_requests won't dereference our soon-to-be-gone
	 * file_priv.
	 */
4674
	spin_lock(&file_priv->mm.lock);
4675
	list_for_each_entry(request, &file_priv->mm.request_list, client_list)
4676
		request->file_priv = NULL;
4677
	spin_unlock(&file_priv->mm.lock);
4678

4679
	if (!list_empty(&file_priv->rps.link)) {
4680
		spin_lock(&to_i915(dev)->rps.client_lock);
4681
		list_del(&file_priv->rps.link);
4682
		spin_unlock(&to_i915(dev)->rps.client_lock);
4683
	}
4684 4685 4686 4687 4688
}

int i915_gem_open(struct drm_device *dev, struct drm_file *file)
{
	struct drm_i915_file_private *file_priv;
4689
	int ret;
4690

4691
	DRM_DEBUG("\n");
4692 4693 4694 4695 4696 4697

	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
	if (!file_priv)
		return -ENOMEM;

	file->driver_priv = file_priv;
4698
	file_priv->dev_priv = to_i915(dev);
4699
	file_priv->file = file;
4700
	INIT_LIST_HEAD(&file_priv->rps.link);
4701 4702 4703 4704

	spin_lock_init(&file_priv->mm.lock);
	INIT_LIST_HEAD(&file_priv->mm.request_list);

4705
	file_priv->bsd_engine = -1;
4706

4707 4708 4709
	ret = i915_gem_context_open(dev, file);
	if (ret)
		kfree(file_priv);
4710

4711
	return ret;
4712 4713
}

4714 4715
/**
 * i915_gem_track_fb - update frontbuffer tracking
4716 4717 4718
 * @old: current GEM buffer for the frontbuffer slots
 * @new: new GEM buffer for the frontbuffer slots
 * @frontbuffer_bits: bitmask of frontbuffer slots
4719 4720 4721 4722
 *
 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
 * from @old and setting them in @new. Both @old and @new can be NULL.
 */
4723 4724 4725 4726
void i915_gem_track_fb(struct drm_i915_gem_object *old,
		       struct drm_i915_gem_object *new,
		       unsigned frontbuffer_bits)
{
4727 4728 4729 4730 4731 4732 4733 4734 4735
	/* Control of individual bits within the mask are guarded by
	 * the owning plane->mutex, i.e. we can never see concurrent
	 * manipulation of individual bits. But since the bitfield as a whole
	 * is updated using RMW, we need to use atomics in order to update
	 * the bits.
	 */
	BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
		     sizeof(atomic_t) * BITS_PER_BYTE);

4736
	if (old) {
4737 4738
		WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
		atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
4739 4740 4741
	}

	if (new) {
4742 4743
		WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
		atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
4744 4745 4746
	}
}

4747 4748
/* Allocate a new GEM object and fill it with the supplied data */
struct drm_i915_gem_object *
4749
i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
4750 4751 4752 4753 4754 4755 4756
			         const void *data, size_t size)
{
	struct drm_i915_gem_object *obj;
	struct sg_table *sg;
	size_t bytes;
	int ret;

4757
	obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
4758
	if (IS_ERR(obj))
4759 4760 4761 4762 4763 4764
		return obj;

	ret = i915_gem_object_set_to_cpu_domain(obj, true);
	if (ret)
		goto fail;

C
Chris Wilson 已提交
4765
	ret = i915_gem_object_pin_pages(obj);
4766 4767 4768
	if (ret)
		goto fail;

C
Chris Wilson 已提交
4769
	sg = obj->mm.pages;
4770
	bytes = sg_copy_from_buffer(sg->sgl, sg->nents, (void *)data, size);
C
Chris Wilson 已提交
4771
	obj->mm.dirty = true; /* Backing store is now out of date */
4772 4773 4774 4775 4776 4777 4778 4779 4780 4781 4782
	i915_gem_object_unpin_pages(obj);

	if (WARN_ON(bytes != size)) {
		DRM_ERROR("Incomplete copy, wrote %zu of %zu", bytes, size);
		ret = -EFAULT;
		goto fail;
	}

	return obj;

fail:
4783
	i915_gem_object_put(obj);
4784 4785
	return ERR_PTR(ret);
}
4786 4787 4788 4789 4790 4791

struct scatterlist *
i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
		       unsigned int n,
		       unsigned int *offset)
{
C
Chris Wilson 已提交
4792
	struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
4793 4794 4795 4796 4797
	struct scatterlist *sg;
	unsigned int idx, count;

	might_sleep();
	GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
C
Chris Wilson 已提交
4798
	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
4799 4800 4801 4802 4803 4804 4805 4806 4807 4808 4809 4810 4811 4812 4813 4814 4815 4816 4817 4818 4819 4820 4821 4822 4823 4824 4825 4826 4827 4828 4829 4830 4831 4832 4833 4834 4835 4836 4837 4838 4839 4840 4841 4842 4843 4844 4845 4846 4847 4848 4849 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4862 4863 4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890 4891 4892 4893 4894 4895 4896 4897 4898 4899 4900 4901 4902 4903 4904 4905 4906 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922

	/* As we iterate forward through the sg, we record each entry in a
	 * radixtree for quick repeated (backwards) lookups. If we have seen
	 * this index previously, we will have an entry for it.
	 *
	 * Initial lookup is O(N), but this is amortized to O(1) for
	 * sequential page access (where each new request is consecutive
	 * to the previous one). Repeated lookups are O(lg(obj->base.size)),
	 * i.e. O(1) with a large constant!
	 */
	if (n < READ_ONCE(iter->sg_idx))
		goto lookup;

	mutex_lock(&iter->lock);

	/* We prefer to reuse the last sg so that repeated lookup of this
	 * (or the subsequent) sg are fast - comparing against the last
	 * sg is faster than going through the radixtree.
	 */

	sg = iter->sg_pos;
	idx = iter->sg_idx;
	count = __sg_page_count(sg);

	while (idx + count <= n) {
		unsigned long exception, i;
		int ret;

		/* If we cannot allocate and insert this entry, or the
		 * individual pages from this range, cancel updating the
		 * sg_idx so that on this lookup we are forced to linearly
		 * scan onwards, but on future lookups we will try the
		 * insertion again (in which case we need to be careful of
		 * the error return reporting that we have already inserted
		 * this index).
		 */
		ret = radix_tree_insert(&iter->radix, idx, sg);
		if (ret && ret != -EEXIST)
			goto scan;

		exception =
			RADIX_TREE_EXCEPTIONAL_ENTRY |
			idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
		for (i = 1; i < count; i++) {
			ret = radix_tree_insert(&iter->radix, idx + i,
						(void *)exception);
			if (ret && ret != -EEXIST)
				goto scan;
		}

		idx += count;
		sg = ____sg_next(sg);
		count = __sg_page_count(sg);
	}

scan:
	iter->sg_pos = sg;
	iter->sg_idx = idx;

	mutex_unlock(&iter->lock);

	if (unlikely(n < idx)) /* insertion completed by another thread */
		goto lookup;

	/* In case we failed to insert the entry into the radixtree, we need
	 * to look beyond the current sg.
	 */
	while (idx + count <= n) {
		idx += count;
		sg = ____sg_next(sg);
		count = __sg_page_count(sg);
	}

	*offset = n - idx;
	return sg;

lookup:
	rcu_read_lock();

	sg = radix_tree_lookup(&iter->radix, n);
	GEM_BUG_ON(!sg);

	/* If this index is in the middle of multi-page sg entry,
	 * the radixtree will contain an exceptional entry that points
	 * to the start of that range. We will return the pointer to
	 * the base page and the offset of this page within the
	 * sg entry's range.
	 */
	*offset = 0;
	if (unlikely(radix_tree_exception(sg))) {
		unsigned long base =
			(unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;

		sg = radix_tree_lookup(&iter->radix, base);
		GEM_BUG_ON(!sg);

		*offset = n - base;
	}

	rcu_read_unlock();

	return sg;
}

struct page *
i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
{
	struct scatterlist *sg;
	unsigned int offset;

	GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));

	sg = i915_gem_object_get_sg(obj, n, &offset);
	return nth_page(sg_page(sg), offset);
}

/* Like i915_gem_object_get_page(), but mark the returned page dirty */
struct page *
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
			       unsigned int n)
{
	struct page *page;

	page = i915_gem_object_get_page(obj, n);
C
Chris Wilson 已提交
4923
	if (!obj->mm.dirty)
4924 4925 4926 4927 4928 4929 4930 4931 4932 4933 4934 4935 4936 4937 4938
		set_page_dirty(page);

	return page;
}

dma_addr_t
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
				unsigned long n)
{
	struct scatterlist *sg;
	unsigned int offset;

	sg = i915_gem_object_get_sg(obj, n, &offset);
	return sg_dma_address(sg) + (offset << PAGE_SHIFT);
}