i915_gem.c 49.4 KB
Newer Older
1
/*
2
 * Copyright © 2008-2015 Intel Corporation
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *
 */

28
#include <drm/drm_vma_manager.h>
29
#include <drm/i915_drm.h>
30
#include <linux/dma-fence-array.h>
31
#include <linux/kthread.h>
32
#include <linux/reservation.h>
33
#include <linux/shmem_fs.h>
34
#include <linux/slab.h>
35
#include <linux/stop_machine.h>
36
#include <linux/swap.h>
J
Jesse Barnes 已提交
37
#include <linux/pci.h>
38
#include <linux/dma-buf.h>
39
#include <linux/mman.h>
40

41 42
#include "gem/i915_gem_clflush.h"
#include "gem/i915_gem_context.h"
43
#include "gem/i915_gem_ioctls.h"
44 45
#include "gem/i915_gem_pm.h"
#include "gem/i915_gemfs.h"
46 47
#include "gt/intel_engine_pm.h"
#include "gt/intel_gt_pm.h"
48 49 50 51
#include "gt/intel_mocs.h"
#include "gt/intel_reset.h"
#include "gt/intel_workarounds.h"

52
#include "i915_drv.h"
53
#include "i915_scatterlist.h"
54 55 56
#include "i915_trace.h"
#include "i915_vgpu.h"

57
#include "intel_display.h"
58 59
#include "intel_drv.h"
#include "intel_frontbuffer.h"
60
#include "intel_pm.h"
61

62
static int
63
insert_mappable_node(struct i915_ggtt *ggtt,
64 65 66
                     struct drm_mm_node *node, u32 size)
{
	memset(node, 0, sizeof(*node));
67
	return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
68 69 70
					   size, 0, I915_COLOR_UNEVICTABLE,
					   0, ggtt->mappable_end,
					   DRM_MM_INSERT_LOW);
71 72 73 74 75 76 77 78
}

static void
remove_mappable_node(struct drm_mm_node *node)
{
	drm_mm_remove_node(node);
}

79 80
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
81
			    struct drm_file *file)
82
{
83
	struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
84
	struct drm_i915_gem_get_aperture *args = data;
85
	struct i915_vma *vma;
86
	u64 pinned;
87

88 89
	mutex_lock(&ggtt->vm.mutex);

90
	pinned = ggtt->vm.reserved;
91
	list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
92
		if (i915_vma_is_pinned(vma))
93
			pinned += vma->node.size;
94 95

	mutex_unlock(&ggtt->vm.mutex);
96

97
	args->aper_size = ggtt->vm.total;
98
	args->aper_available_size = args->aper_size - pinned;
99

100 101 102
	return 0;
}

103
int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
104 105 106
{
	struct i915_vma *vma;
	LIST_HEAD(still_in_list);
107
	int ret = 0;
108 109

	lockdep_assert_held(&obj->base.dev->struct_mutex);
110

111 112 113 114
	spin_lock(&obj->vma.lock);
	while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
						       struct i915_vma,
						       obj_link))) {
115
		list_move_tail(&vma->obj_link, &still_in_list);
116 117
		spin_unlock(&obj->vma.lock);

118
		ret = i915_vma_unbind(vma);
119 120

		spin_lock(&obj->vma.lock);
121
	}
122 123
	list_splice(&still_in_list, &obj->vma.list);
	spin_unlock(&obj->vma.lock);
124 125 126 127

	return ret;
}

128 129 130
static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pwrite *args,
131
		     struct drm_file *file)
132 133
{
	void *vaddr = obj->phys_handle->vaddr + args->offset;
134
	char __user *user_data = u64_to_user_ptr(args->data_ptr);
135 136 137 138

	/* We manually control the domain here and pretend that it
	 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
	 */
139
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
140 141
	if (copy_from_user(vaddr, user_data, args->size))
		return -EFAULT;
142

143
	drm_clflush_virt_range(vaddr, args->size);
144
	i915_gem_chipset_flush(to_i915(obj->base.dev));
145

146
	intel_fb_obj_flush(obj, ORIGIN_CPU);
147
	return 0;
148 149
}

150 151
static int
i915_gem_create(struct drm_file *file,
152
		struct drm_i915_private *dev_priv,
153
		u64 *size_p,
154
		u32 *handle_p)
155
{
156
	struct drm_i915_gem_object *obj;
157
	u32 handle;
158 159
	u64 size;
	int ret;
160

161
	size = round_up(*size_p, PAGE_SIZE);
162 163
	if (size == 0)
		return -EINVAL;
164 165

	/* Allocate the new object */
166
	obj = i915_gem_object_create_shmem(dev_priv, size);
167 168
	if (IS_ERR(obj))
		return PTR_ERR(obj);
169

170
	ret = drm_gem_handle_create(file, &obj->base, &handle);
171
	/* drop reference from allocate - handle holds it now */
C
Chris Wilson 已提交
172
	i915_gem_object_put(obj);
173 174
	if (ret)
		return ret;
175

176
	*handle_p = handle;
177
	*size_p = size;
178 179 180
	return 0;
}

181 182 183 184 185
int
i915_gem_dumb_create(struct drm_file *file,
		     struct drm_device *dev,
		     struct drm_mode_create_dumb *args)
{
186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202
	int cpp = DIV_ROUND_UP(args->bpp, 8);
	u32 format;

	switch (cpp) {
	case 1:
		format = DRM_FORMAT_C8;
		break;
	case 2:
		format = DRM_FORMAT_RGB565;
		break;
	case 4:
		format = DRM_FORMAT_XRGB8888;
		break;
	default:
		return -EINVAL;
	}

203
	/* have to work out size/pitch and return them */
204 205 206 207 208 209 210
	args->pitch = ALIGN(args->width * cpp, 64);

	/* align stride to page size so that we can remap */
	if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
						    DRM_FORMAT_MOD_LINEAR))
		args->pitch = ALIGN(args->pitch, 4096);

211
	args->size = args->pitch * args->height;
212
	return i915_gem_create(file, to_i915(dev),
213
			       &args->size, &args->handle);
214 215 216 217
}

/**
 * Creates a new mm object and returns a handle to it.
218 219 220
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
221 222 223 224 225
 */
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file)
{
226
	struct drm_i915_private *dev_priv = to_i915(dev);
227
	struct drm_i915_gem_create *args = data;
228

229
	i915_gem_flush_free_objects(dev_priv);
230

231
	return i915_gem_create(file, dev_priv,
232
			       &args->size, &args->handle);
233 234
}

235
void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
236
{
237 238
	intel_wakeref_t wakeref;

239 240 241 242 243
	/*
	 * No actual flushing is required for the GTT write domain for reads
	 * from the GTT domain. Writes to it "immediately" go to main memory
	 * as far as we know, so there's no chipset flush. It also doesn't
	 * land in the GPU render cache.
244 245 246 247 248 249 250 251 252 253
	 *
	 * However, we do have to enforce the order so that all writes through
	 * the GTT land before any writes to the device, such as updates to
	 * the GATT itself.
	 *
	 * We also have to wait a bit for the writes to land from the GTT.
	 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
	 * timing. This issue has only been observed when switching quickly
	 * between GTT writes and CPU reads from inside the kernel on recent hw,
	 * and it appears to only affect discrete GTT blocks (i.e. on LLC
254 255
	 * system agents we cannot reproduce this behaviour, until Cannonlake
	 * that was!).
256
	 */
257

258 259 260 261 262
	wmb();

	if (INTEL_INFO(dev_priv)->has_coherent_ggtt)
		return;

263
	i915_gem_chipset_flush(dev_priv);
264

265 266
	with_intel_runtime_pm(dev_priv, wakeref) {
		spin_lock_irq(&dev_priv->uncore.lock);
267

268
		POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
269

270 271
		spin_unlock_irq(&dev_priv->uncore.lock);
	}
272 273
}

274
static int
275 276
shmem_pread(struct page *page, int offset, int len, char __user *user_data,
	    bool needs_clflush)
277 278 279 280 281 282
{
	char *vaddr;
	int ret;

	vaddr = kmap(page);

283 284
	if (needs_clflush)
		drm_clflush_virt_range(vaddr + offset, len);
285

286
	ret = __copy_to_user(user_data, vaddr + offset, len);
287

288
	kunmap(page);
289

290
	return ret ? -EFAULT : 0;
291 292 293 294 295 296 297 298
}

static int
i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pread *args)
{
	unsigned int needs_clflush;
	unsigned int idx, offset;
299 300 301
	struct dma_fence *fence;
	char __user *user_data;
	u64 remain;
302 303
	int ret;

304
	ret = i915_gem_object_prepare_read(obj, &needs_clflush);
305 306 307
	if (ret)
		return ret;

308 309 310 311 312
	fence = i915_gem_object_lock_fence(obj);
	i915_gem_object_finish_access(obj);
	if (!fence)
		return -ENOMEM;

313 314 315 316 317
	remain = args->size;
	user_data = u64_to_user_ptr(args->data_ptr);
	offset = offset_in_page(args->offset);
	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
		struct page *page = i915_gem_object_get_page(obj, idx);
318
		unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
319 320 321 322 323 324 325 326 327 328 329

		ret = shmem_pread(page, offset, length, user_data,
				  needs_clflush);
		if (ret)
			break;

		remain -= length;
		user_data += length;
		offset = 0;
	}

330
	i915_gem_object_unlock_fence(obj, fence);
331 332 333 334 335 336 337
	return ret;
}

static inline bool
gtt_user_read(struct io_mapping *mapping,
	      loff_t base, int offset,
	      char __user *user_data, int length)
338
{
339
	void __iomem *vaddr;
340
	unsigned long unwritten;
341 342

	/* We can use the cpu mem copy function because this is X86. */
343 344 345 346
	vaddr = io_mapping_map_atomic_wc(mapping, base);
	unwritten = __copy_to_user_inatomic(user_data,
					    (void __force *)vaddr + offset,
					    length);
347 348
	io_mapping_unmap_atomic(vaddr);
	if (unwritten) {
349 350 351 352
		vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
		unwritten = copy_to_user(user_data,
					 (void __force *)vaddr + offset,
					 length);
353 354
		io_mapping_unmap(vaddr);
	}
355 356 357 358
	return unwritten;
}

static int
359 360
i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
		   const struct drm_i915_gem_pread *args)
361
{
362 363
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	struct i915_ggtt *ggtt = &i915->ggtt;
364
	intel_wakeref_t wakeref;
365
	struct drm_mm_node node;
366
	struct dma_fence *fence;
367
	void __user *user_data;
368
	struct i915_vma *vma;
369
	u64 remain, offset;
370 371
	int ret;

372 373 374 375
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
	if (ret)
		return ret;

376
	wakeref = intel_runtime_pm_get(i915);
377
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
378 379 380
				       PIN_MAPPABLE |
				       PIN_NONFAULT |
				       PIN_NONBLOCK);
381 382 383
	if (!IS_ERR(vma)) {
		node.start = i915_ggtt_offset(vma);
		node.allocated = false;
384
		ret = i915_vma_put_fence(vma);
385 386 387 388 389
		if (ret) {
			i915_vma_unpin(vma);
			vma = ERR_PTR(ret);
		}
	}
C
Chris Wilson 已提交
390
	if (IS_ERR(vma)) {
391
		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
392
		if (ret)
393 394
			goto out_unlock;
		GEM_BUG_ON(!node.allocated);
395 396
	}

397 398 399
	mutex_unlock(&i915->drm.struct_mutex);

	ret = i915_gem_object_lock_interruptible(obj);
400 401 402
	if (ret)
		goto out_unpin;

403 404 405 406 407 408 409 410 411 412 413 414
	ret = i915_gem_object_set_to_gtt_domain(obj, false);
	if (ret) {
		i915_gem_object_unlock(obj);
		goto out_unpin;
	}

	fence = i915_gem_object_lock_fence(obj);
	i915_gem_object_unlock(obj);
	if (!fence) {
		ret = -ENOMEM;
		goto out_unpin;
	}
415

416 417 418
	user_data = u64_to_user_ptr(args->data_ptr);
	remain = args->size;
	offset = args->offset;
419 420 421 422 423 424 425 426 427 428 429 430 431 432

	while (remain > 0) {
		/* Operation in this page
		 *
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
		 */
		u32 page_base = node.start;
		unsigned page_offset = offset_in_page(offset);
		unsigned page_length = PAGE_SIZE - page_offset;
		page_length = remain < page_length ? remain : page_length;
		if (node.allocated) {
			wmb();
433 434 435
			ggtt->vm.insert_page(&ggtt->vm,
					     i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
					     node.start, I915_CACHE_NONE, 0);
436 437 438 439
			wmb();
		} else {
			page_base += offset & PAGE_MASK;
		}
440

441
		if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
442
				  user_data, page_length)) {
443 444 445 446 447 448 449 450 451
			ret = -EFAULT;
			break;
		}

		remain -= page_length;
		user_data += page_length;
		offset += page_length;
	}

452
	i915_gem_object_unlock_fence(obj, fence);
453
out_unpin:
454
	mutex_lock(&i915->drm.struct_mutex);
455 456
	if (node.allocated) {
		wmb();
457
		ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
458 459
		remove_mappable_node(&node);
	} else {
C
Chris Wilson 已提交
460
		i915_vma_unpin(vma);
461
	}
462
out_unlock:
463
	intel_runtime_pm_put(i915, wakeref);
464
	mutex_unlock(&i915->drm.struct_mutex);
465

466 467 468
	return ret;
}

469 470
/**
 * Reads data from the object referenced by handle.
471 472 473
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
474 475 476 477 478
 *
 * On error, the contents of *data are undefined.
 */
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
479
		     struct drm_file *file)
480 481
{
	struct drm_i915_gem_pread *args = data;
482
	struct drm_i915_gem_object *obj;
483
	int ret;
484

485 486 487
	if (args->size == 0)
		return 0;

488
	if (!access_ok(u64_to_user_ptr(args->data_ptr),
489 490 491
		       args->size))
		return -EFAULT;

492
	obj = i915_gem_object_lookup(file, args->handle);
493 494
	if (!obj)
		return -ENOENT;
495

496
	/* Bounds check source.  */
497
	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
C
Chris Wilson 已提交
498
		ret = -EINVAL;
499
		goto out;
C
Chris Wilson 已提交
500 501
	}

C
Chris Wilson 已提交
502 503
	trace_i915_gem_object_pread(obj, args->offset, args->size);

504 505
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE,
506
				   MAX_SCHEDULE_TIMEOUT);
507
	if (ret)
508
		goto out;
509

510
	ret = i915_gem_object_pin_pages(obj);
511
	if (ret)
512
		goto out;
513

514
	ret = i915_gem_shmem_pread(obj, args);
515
	if (ret == -EFAULT || ret == -ENODEV)
516
		ret = i915_gem_gtt_pread(obj, args);
517

518 519
	i915_gem_object_unpin_pages(obj);
out:
C
Chris Wilson 已提交
520
	i915_gem_object_put(obj);
521
	return ret;
522 523
}

524 525
/* This is the fast write path which cannot handle
 * page faults in the source data
526
 */
527

528 529 530 531
static inline bool
ggtt_write(struct io_mapping *mapping,
	   loff_t base, int offset,
	   char __user *user_data, int length)
532
{
533
	void __iomem *vaddr;
534
	unsigned long unwritten;
535

536
	/* We can use the cpu mem copy function because this is X86. */
537 538
	vaddr = io_mapping_map_atomic_wc(mapping, base);
	unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
539
						      user_data, length);
540 541
	io_mapping_unmap_atomic(vaddr);
	if (unwritten) {
542 543 544
		vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
		unwritten = copy_from_user((void __force *)vaddr + offset,
					   user_data, length);
545 546
		io_mapping_unmap(vaddr);
	}
547 548 549 550

	return unwritten;
}

551 552 553
/**
 * This is the fast pwrite path, where we copy the data directly from the
 * user into the GTT, uncached.
554
 * @obj: i915 GEM object
555
 * @args: pwrite arguments structure
556
 */
557
static int
558 559
i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
			 const struct drm_i915_gem_pwrite *args)
560
{
561
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
562
	struct i915_ggtt *ggtt = &i915->ggtt;
563
	intel_wakeref_t wakeref;
564
	struct drm_mm_node node;
565
	struct dma_fence *fence;
566 567 568
	struct i915_vma *vma;
	u64 remain, offset;
	void __user *user_data;
569
	int ret;
570

571 572 573
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
	if (ret)
		return ret;
D
Daniel Vetter 已提交
574

575 576 577 578 579 580 581 582
	if (i915_gem_object_has_struct_page(obj)) {
		/*
		 * Avoid waking the device up if we can fallback, as
		 * waking/resuming is very slow (worst-case 10-100 ms
		 * depending on PCI sleeps and our own resume time).
		 * This easily dwarfs any performance advantage from
		 * using the cache bypass of indirect GGTT access.
		 */
583 584
		wakeref = intel_runtime_pm_get_if_in_use(i915);
		if (!wakeref) {
585 586 587 588 589
			ret = -EFAULT;
			goto out_unlock;
		}
	} else {
		/* No backing pages, no fallback, we must force GGTT access */
590
		wakeref = intel_runtime_pm_get(i915);
591 592
	}

C
Chris Wilson 已提交
593
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
594 595 596
				       PIN_MAPPABLE |
				       PIN_NONFAULT |
				       PIN_NONBLOCK);
597 598 599
	if (!IS_ERR(vma)) {
		node.start = i915_ggtt_offset(vma);
		node.allocated = false;
600
		ret = i915_vma_put_fence(vma);
601 602 603 604 605
		if (ret) {
			i915_vma_unpin(vma);
			vma = ERR_PTR(ret);
		}
	}
C
Chris Wilson 已提交
606
	if (IS_ERR(vma)) {
607
		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
608
		if (ret)
609
			goto out_rpm;
610
		GEM_BUG_ON(!node.allocated);
611
	}
D
Daniel Vetter 已提交
612

613 614 615
	mutex_unlock(&i915->drm.struct_mutex);

	ret = i915_gem_object_lock_interruptible(obj);
D
Daniel Vetter 已提交
616 617 618
	if (ret)
		goto out_unpin;

619 620 621 622 623 624 625 626 627 628 629 630
	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret) {
		i915_gem_object_unlock(obj);
		goto out_unpin;
	}

	fence = i915_gem_object_lock_fence(obj);
	i915_gem_object_unlock(obj);
	if (!fence) {
		ret = -ENOMEM;
		goto out_unpin;
	}
631

632
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
633

634 635 636 637
	user_data = u64_to_user_ptr(args->data_ptr);
	offset = args->offset;
	remain = args->size;
	while (remain) {
638 639
		/* Operation in this page
		 *
640 641 642
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
643
		 */
644
		u32 page_base = node.start;
645 646
		unsigned int page_offset = offset_in_page(offset);
		unsigned int page_length = PAGE_SIZE - page_offset;
647 648 649
		page_length = remain < page_length ? remain : page_length;
		if (node.allocated) {
			wmb(); /* flush the write before we modify the GGTT */
650 651 652
			ggtt->vm.insert_page(&ggtt->vm,
					     i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
					     node.start, I915_CACHE_NONE, 0);
653 654 655 656
			wmb(); /* flush modifications to the GGTT (insert_page) */
		} else {
			page_base += offset & PAGE_MASK;
		}
657
		/* If we get a fault while copying data, then (presumably) our
658 659
		 * source page isn't available.  Return the error and we'll
		 * retry in the slow path.
660 661
		 * If the object is non-shmem backed, we retry again with the
		 * path that handles page fault.
662
		 */
663
		if (ggtt_write(&ggtt->iomap, page_base, page_offset,
664 665 666
			       user_data, page_length)) {
			ret = -EFAULT;
			break;
D
Daniel Vetter 已提交
667
		}
668

669 670 671
		remain -= page_length;
		user_data += page_length;
		offset += page_length;
672
	}
673
	intel_fb_obj_flush(obj, ORIGIN_CPU);
674

675
	i915_gem_object_unlock_fence(obj, fence);
D
Daniel Vetter 已提交
676
out_unpin:
677
	mutex_lock(&i915->drm.struct_mutex);
678 679
	if (node.allocated) {
		wmb();
680
		ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
681 682
		remove_mappable_node(&node);
	} else {
C
Chris Wilson 已提交
683
		i915_vma_unpin(vma);
684
	}
685
out_rpm:
686
	intel_runtime_pm_put(i915, wakeref);
687
out_unlock:
688
	mutex_unlock(&i915->drm.struct_mutex);
689
	return ret;
690 691
}

692 693 694 695 696
/* Per-page copy function for the shmem pwrite fastpath.
 * Flushes invalid cachelines before writing to the target if
 * needs_clflush_before is set and flushes out any written cachelines after
 * writing if needs_clflush is set.
 */
697
static int
698 699 700
shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
	     bool needs_clflush_before,
	     bool needs_clflush_after)
701
{
702
	char *vaddr;
703 704
	int ret;

705
	vaddr = kmap(page);
706

707 708
	if (needs_clflush_before)
		drm_clflush_virt_range(vaddr + offset, len);
709

710 711 712
	ret = __copy_from_user(vaddr + offset, user_data, len);
	if (!ret && needs_clflush_after)
		drm_clflush_virt_range(vaddr + offset, len);
713

714 715 716
	kunmap(page);

	return ret ? -EFAULT : 0;
717 718 719 720 721 722 723
}

static int
i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
		      const struct drm_i915_gem_pwrite *args)
{
	unsigned int partial_cacheline_write;
724
	unsigned int needs_clflush;
725
	unsigned int offset, idx;
726 727 728
	struct dma_fence *fence;
	void __user *user_data;
	u64 remain;
729
	int ret;
730

731
	ret = i915_gem_object_prepare_write(obj, &needs_clflush);
732 733
	if (ret)
		return ret;
734

735 736 737 738 739
	fence = i915_gem_object_lock_fence(obj);
	i915_gem_object_finish_access(obj);
	if (!fence)
		return -ENOMEM;

740 741 742 743 744 745 746
	/* If we don't overwrite a cacheline completely we need to be
	 * careful to have up-to-date data by first clflushing. Don't
	 * overcomplicate things and flush the entire patch.
	 */
	partial_cacheline_write = 0;
	if (needs_clflush & CLFLUSH_BEFORE)
		partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
747

748 749 750 751 752
	user_data = u64_to_user_ptr(args->data_ptr);
	remain = args->size;
	offset = offset_in_page(args->offset);
	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
		struct page *page = i915_gem_object_get_page(obj, idx);
753
		unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
754

755 756 757
		ret = shmem_pwrite(page, offset, length, user_data,
				   (offset | length) & partial_cacheline_write,
				   needs_clflush & CLFLUSH_AFTER);
758
		if (ret)
759
			break;
760

761 762 763
		remain -= length;
		user_data += length;
		offset = 0;
764
	}
765

766
	intel_fb_obj_flush(obj, ORIGIN_CPU);
767 768
	i915_gem_object_unlock_fence(obj, fence);

769
	return ret;
770 771 772 773
}

/**
 * Writes data to the object referenced by handle.
774 775 776
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
777 778 779 780 781
 *
 * On error, the contents of the buffer that were to be modified are undefined.
 */
int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
782
		      struct drm_file *file)
783 784
{
	struct drm_i915_gem_pwrite *args = data;
785
	struct drm_i915_gem_object *obj;
786 787 788 789 790
	int ret;

	if (args->size == 0)
		return 0;

791
	if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
792 793
		return -EFAULT;

794
	obj = i915_gem_object_lookup(file, args->handle);
795 796
	if (!obj)
		return -ENOENT;
797

798
	/* Bounds check destination. */
799
	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
C
Chris Wilson 已提交
800
		ret = -EINVAL;
801
		goto err;
C
Chris Wilson 已提交
802 803
	}

804 805 806 807 808 809
	/* Writes not allowed into this read-only object */
	if (i915_gem_object_is_readonly(obj)) {
		ret = -EINVAL;
		goto err;
	}

C
Chris Wilson 已提交
810 811
	trace_i915_gem_object_pwrite(obj, args->offset, args->size);

812 813 814 815 816 817
	ret = -ENODEV;
	if (obj->ops->pwrite)
		ret = obj->ops->pwrite(obj, args);
	if (ret != -ENODEV)
		goto err;

818 819 820
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_ALL,
821
				   MAX_SCHEDULE_TIMEOUT);
822 823 824
	if (ret)
		goto err;

825
	ret = i915_gem_object_pin_pages(obj);
826
	if (ret)
827
		goto err;
828

D
Daniel Vetter 已提交
829
	ret = -EFAULT;
830 831 832 833 834 835
	/* We can only do the GTT pwrite on untiled buffers, as otherwise
	 * it would end up going through the fenced access, and we'll get
	 * different detiling behavior between reading and writing.
	 * pread/pwrite currently are reading and writing from the CPU
	 * perspective, requiring manual detiling by the client.
	 */
836
	if (!i915_gem_object_has_struct_page(obj) ||
837
	    cpu_write_needs_clflush(obj))
D
Daniel Vetter 已提交
838 839
		/* Note that the gtt paths might fail with non-page-backed user
		 * pointers (e.g. gtt mappings when moving data between
840 841
		 * textures). Fallback to the shmem path in that case.
		 */
842
		ret = i915_gem_gtt_pwrite_fast(obj, args);
843

844
	if (ret == -EFAULT || ret == -ENOSPC) {
845 846
		if (obj->phys_handle)
			ret = i915_gem_phys_pwrite(obj, args, file);
847
		else
848
			ret = i915_gem_shmem_pwrite(obj, args);
849
	}
850

851
	i915_gem_object_unpin_pages(obj);
852
err:
C
Chris Wilson 已提交
853
	i915_gem_object_put(obj);
854
	return ret;
855 856 857 858
}

/**
 * Called when user space has done writes to this buffer
859 860 861
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
862 863 864
 */
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
865
			 struct drm_file *file)
866 867
{
	struct drm_i915_gem_sw_finish *args = data;
868
	struct drm_i915_gem_object *obj;
869

870
	obj = i915_gem_object_lookup(file, args->handle);
871 872
	if (!obj)
		return -ENOENT;
873

T
Tina Zhang 已提交
874 875 876 877 878
	/*
	 * Proxy objects are barred from CPU access, so there is no
	 * need to ban sw_finish as it is a nop.
	 */

879
	/* Pinned buffers may be scanout, so flush the cache */
880
	i915_gem_object_flush_if_display(obj);
C
Chris Wilson 已提交
881
	i915_gem_object_put(obj);
882 883

	return 0;
884 885
}

886
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
887
{
888
	struct drm_i915_gem_object *obj, *on;
889
	int i;
890

891 892 893 894 895 896
	/*
	 * Only called during RPM suspend. All users of the userfault_list
	 * must be holding an RPM wakeref to ensure that this can not
	 * run concurrently with themselves (and use the struct_mutex for
	 * protection between themselves).
	 */
897

898
	list_for_each_entry_safe(obj, on,
899 900
				 &dev_priv->mm.userfault_list, userfault_link)
		__i915_gem_object_release_mmap(obj);
901 902 903 904 905 906 907 908

	/* The fence will be lost when the device powers down. If any were
	 * in use by hardware (i.e. they are pinned), we should not be powering
	 * down! All other fences will be reacquired by the user upon waking.
	 */
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
		struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];

909 910 911 912 913 914 915 916 917 918
		/* Ideally we want to assert that the fence register is not
		 * live at this point (i.e. that no piece of code will be
		 * trying to write through fence + GTT, as that both violates
		 * our tracking of activity and associated locking/barriers,
		 * but also is illegal given that the hw is powered down).
		 *
		 * Previously we used reg->pin_count as a "liveness" indicator.
		 * That is not sufficient, and we need a more fine-grained
		 * tool if we want to have a sanity check here.
		 */
919 920 921 922

		if (!reg->vma)
			continue;

923
		GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
924 925
		reg->dirty = true;
	}
926 927
}

928 929
static int wait_for_engines(struct drm_i915_private *i915)
{
930
	if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
931 932
		dev_err(i915->drm.dev,
			"Failed to idle engines, declaring wedged!\n");
933
		GEM_TRACE_DUMP();
934 935
		i915_gem_set_wedged(i915);
		return -EIO;
936 937 938 939 940
	}

	return 0;
}

941 942 943 944 945 946 947 948
static long
wait_for_timelines(struct drm_i915_private *i915,
		   unsigned int flags, long timeout)
{
	struct i915_gt_timelines *gt = &i915->gt.timelines;
	struct i915_timeline *tl;

	mutex_lock(&gt->mutex);
C
Chris Wilson 已提交
949
	list_for_each_entry(tl, &gt->active_list, link) {
950 951
		struct i915_request *rq;

952
		rq = i915_active_request_get_unlocked(&tl->last_request);
953 954 955 956 957 958 959 960 961 962 963 964 965 966 967
		if (!rq)
			continue;

		mutex_unlock(&gt->mutex);

		/*
		 * "Race-to-idle".
		 *
		 * Switching to the kernel context is often used a synchronous
		 * step prior to idling, e.g. in suspend for flushing all
		 * current operations to memory before sleeping. These we
		 * want to complete as quickly as possible to avoid prolonged
		 * stalls, so allow the gpu to boost to maximum clocks.
		 */
		if (flags & I915_WAIT_FOR_IDLE_BOOST)
968
			gen6_rps_boost(rq);
969 970 971 972 973 974 975 976

		timeout = i915_request_wait(rq, flags, timeout);
		i915_request_put(rq);
		if (timeout < 0)
			return timeout;

		/* restart after reacquiring the lock */
		mutex_lock(&gt->mutex);
C
Chris Wilson 已提交
977
		tl = list_entry(&gt->active_list, typeof(*tl), link);
978 979 980 981 982 983
	}
	mutex_unlock(&gt->mutex);

	return timeout;
}

984 985
int i915_gem_wait_for_idle(struct drm_i915_private *i915,
			   unsigned int flags, long timeout)
986
{
987
	GEM_TRACE("flags=%x (%s), timeout=%ld%s, awake?=%s\n",
988
		  flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
989 990
		  timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "",
		  yesno(i915->gt.awake));
991

992 993 994 995
	/* If the device is asleep, we have no requests outstanding */
	if (!READ_ONCE(i915->gt.awake))
		return 0;

996 997 998 999
	timeout = wait_for_timelines(i915, flags, timeout);
	if (timeout < 0)
		return timeout;

1000
	if (flags & I915_WAIT_LOCKED) {
1001
		int err;
1002 1003 1004

		lockdep_assert_held(&i915->drm.struct_mutex);

1005 1006 1007 1008
		err = wait_for_engines(i915);
		if (err)
			return err;

1009
		i915_retire_requests(i915);
1010
	}
1011 1012

	return 0;
1013 1014
}

C
Chris Wilson 已提交
1015
struct i915_vma *
1016 1017
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
			 const struct i915_ggtt_view *view,
1018
			 u64 size,
1019 1020
			 u64 alignment,
			 u64 flags)
1021
{
1022
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
1023
	struct i915_address_space *vm = &dev_priv->ggtt.vm;
1024 1025
	struct i915_vma *vma;
	int ret;
1026

1027 1028
	lockdep_assert_held(&obj->base.dev->struct_mutex);

1029 1030
	if (flags & PIN_MAPPABLE &&
	    (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
		/* If the required space is larger than the available
		 * aperture, we will not able to find a slot for the
		 * object and unbinding the object now will be in
		 * vain. Worse, doing so may cause us to ping-pong
		 * the object in and out of the Global GTT and
		 * waste a lot of cycles under the mutex.
		 */
		if (obj->base.size > dev_priv->ggtt.mappable_end)
			return ERR_PTR(-E2BIG);

		/* If NONBLOCK is set the caller is optimistically
		 * trying to cache the full object within the mappable
		 * aperture, and *must* have a fallback in place for
		 * situations where we cannot bind the object. We
		 * can be a little more lax here and use the fallback
		 * more often to avoid costly migrations of ourselves
		 * and other objects within the aperture.
		 *
		 * Half-the-aperture is used as a simple heuristic.
		 * More interesting would to do search for a free
		 * block prior to making the commitment to unbind.
		 * That caters for the self-harm case, and with a
		 * little more heuristics (e.g. NOFAULT, NOEVICT)
		 * we could try to minimise harm to others.
		 */
		if (flags & PIN_NONBLOCK &&
		    obj->base.size > dev_priv->ggtt.mappable_end / 2)
			return ERR_PTR(-ENOSPC);
	}

1061
	vma = i915_vma_instance(obj, vm, view);
1062
	if (IS_ERR(vma))
C
Chris Wilson 已提交
1063
		return vma;
1064 1065

	if (i915_vma_misplaced(vma, size, alignment, flags)) {
1066 1067 1068
		if (flags & PIN_NONBLOCK) {
			if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
				return ERR_PTR(-ENOSPC);
1069

1070
			if (flags & PIN_MAPPABLE &&
1071
			    vma->fence_size > dev_priv->ggtt.mappable_end / 2)
1072 1073 1074
				return ERR_PTR(-ENOSPC);
		}

1075 1076
		WARN(i915_vma_is_pinned(vma),
		     "bo is already pinned in ggtt with incorrect alignment:"
1077 1078 1079
		     " offset=%08x, req.alignment=%llx,"
		     " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
		     i915_ggtt_offset(vma), alignment,
1080
		     !!(flags & PIN_MAPPABLE),
1081
		     i915_vma_is_map_and_fenceable(vma));
1082 1083
		ret = i915_vma_unbind(vma);
		if (ret)
C
Chris Wilson 已提交
1084
			return ERR_PTR(ret);
1085 1086
	}

C
Chris Wilson 已提交
1087 1088 1089
	ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
	if (ret)
		return ERR_PTR(ret);
1090

C
Chris Wilson 已提交
1091
	return vma;
1092 1093
}

1094 1095 1096 1097
int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
1098
	struct drm_i915_private *i915 = to_i915(dev);
1099
	struct drm_i915_gem_madvise *args = data;
1100
	struct drm_i915_gem_object *obj;
1101
	int err;
1102 1103 1104 1105 1106 1107 1108 1109 1110

	switch (args->madv) {
	case I915_MADV_DONTNEED:
	case I915_MADV_WILLNEED:
	    break;
	default:
	    return -EINVAL;
	}

1111
	obj = i915_gem_object_lookup(file_priv, args->handle);
1112 1113 1114 1115 1116 1117
	if (!obj)
		return -ENOENT;

	err = mutex_lock_interruptible(&obj->mm.lock);
	if (err)
		goto out;
1118

1119
	if (i915_gem_object_has_pages(obj) &&
1120
	    i915_gem_object_is_tiled(obj) &&
1121
	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
1122 1123
		if (obj->mm.madv == I915_MADV_WILLNEED) {
			GEM_BUG_ON(!obj->mm.quirked);
C
Chris Wilson 已提交
1124
			__i915_gem_object_unpin_pages(obj);
1125 1126 1127
			obj->mm.quirked = false;
		}
		if (args->madv == I915_MADV_WILLNEED) {
1128
			GEM_BUG_ON(obj->mm.quirked);
C
Chris Wilson 已提交
1129
			__i915_gem_object_pin_pages(obj);
1130 1131
			obj->mm.quirked = true;
		}
1132 1133
	}

C
Chris Wilson 已提交
1134 1135
	if (obj->mm.madv != __I915_MADV_PURGED)
		obj->mm.madv = args->madv;
1136

1137 1138 1139
	if (i915_gem_object_has_pages(obj)) {
		struct list_head *list;

1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150
		if (i915_gem_object_is_shrinkable(obj)) {
			spin_lock(&i915->mm.obj_lock);
			if (obj->mm.madv != I915_MADV_WILLNEED)
				list = &i915->mm.purge_list;
			else if (obj->bind_count)
				list = &i915->mm.bound_list;
			else
				list = &i915->mm.unbound_list;
			list_move_tail(&obj->mm.link, list);
			spin_unlock(&i915->mm.obj_lock);
		}
1151 1152
	}

C
Chris Wilson 已提交
1153
	/* if the object is no longer attached, discard its backing storage */
1154 1155
	if (obj->mm.madv == I915_MADV_DONTNEED &&
	    !i915_gem_object_has_pages(obj))
1156
		i915_gem_object_truncate(obj);
1157

C
Chris Wilson 已提交
1158
	args->retained = obj->mm.madv != __I915_MADV_PURGED;
1159
	mutex_unlock(&obj->mm.lock);
C
Chris Wilson 已提交
1160

1161
out:
1162
	i915_gem_object_put(obj);
1163
	return err;
1164 1165
}

1166 1167
void i915_gem_sanitize(struct drm_i915_private *i915)
{
1168 1169
	intel_wakeref_t wakeref;

1170 1171
	GEM_TRACE("\n");

1172
	wakeref = intel_runtime_pm_get(i915);
1173
	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
1174 1175 1176 1177 1178 1179 1180

	/*
	 * As we have just resumed the machine and woken the device up from
	 * deep PCI sleep (presumably D3_cold), assume the HW has been reset
	 * back to defaults, recovering from whatever wedged state we left it
	 * in and so worth trying to use the device once more.
	 */
1181
	if (i915_terminally_wedged(i915))
1182 1183
		i915_gem_unset_wedged(i915);

1184 1185 1186 1187 1188 1189
	/*
	 * If we inherit context state from the BIOS or earlier occupants
	 * of the GPU, the GPU may be in an inconsistent state when we
	 * try to take over. The only way to remove the earlier state
	 * is by resetting. However, resetting on earlier gen is tricky as
	 * it may impact the display and we are uncertain about the stability
1190
	 * of the reset, so this could be applied to even earlier gen.
1191
	 */
1192
	intel_gt_sanitize(i915, false);
1193

1194
	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
1195
	intel_runtime_pm_put(i915, wakeref);
1196

1197
	mutex_lock(&i915->drm.struct_mutex);
1198 1199
	i915_gem_contexts_lost(i915);
	mutex_unlock(&i915->drm.struct_mutex);
1200 1201
}

1202
void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
1203
{
1204
	if (INTEL_GEN(dev_priv) < 5 ||
1205 1206 1207 1208 1209 1210
	    dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
		return;

	I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
				 DISP_TILE_SURFACE_SWIZZLING);

1211
	if (IS_GEN(dev_priv, 5))
1212 1213
		return;

1214
	I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
1215
	if (IS_GEN(dev_priv, 6))
1216
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
1217
	else if (IS_GEN(dev_priv, 7))
1218
		I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
1219
	else if (IS_GEN(dev_priv, 8))
B
Ben Widawsky 已提交
1220
		I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
1221 1222
	else
		BUG();
1223
}
D
Daniel Vetter 已提交
1224

1225
static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
1226 1227 1228 1229 1230 1231 1232
{
	I915_WRITE(RING_CTL(base), 0);
	I915_WRITE(RING_HEAD(base), 0);
	I915_WRITE(RING_TAIL(base), 0);
	I915_WRITE(RING_START(base), 0);
}

1233
static void init_unused_rings(struct drm_i915_private *dev_priv)
1234
{
1235 1236 1237 1238 1239 1240
	if (IS_I830(dev_priv)) {
		init_unused_ring(dev_priv, PRB1_BASE);
		init_unused_ring(dev_priv, SRB0_BASE);
		init_unused_ring(dev_priv, SRB1_BASE);
		init_unused_ring(dev_priv, SRB2_BASE);
		init_unused_ring(dev_priv, SRB3_BASE);
1241
	} else if (IS_GEN(dev_priv, 2)) {
1242 1243
		init_unused_ring(dev_priv, SRB0_BASE);
		init_unused_ring(dev_priv, SRB1_BASE);
1244
	} else if (IS_GEN(dev_priv, 3)) {
1245 1246
		init_unused_ring(dev_priv, PRB1_BASE);
		init_unused_ring(dev_priv, PRB2_BASE);
1247 1248 1249
	}
}

1250 1251
int i915_gem_init_hw(struct drm_i915_private *dev_priv)
{
C
Chris Wilson 已提交
1252
	int ret;
1253

1254 1255
	dev_priv->gt.last_init_time = ktime_get();

1256
	/* Double layer security blanket, see i915_gem_init() */
1257
	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1258

1259
	if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
1260
		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
1261

1262
	if (IS_HASWELL(dev_priv))
1263
		I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
1264
			   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
1265

1266
	/* Apply the GT workarounds... */
1267
	intel_gt_apply_workarounds(dev_priv);
1268 1269
	/* ...and determine whether they are sticking. */
	intel_gt_verify_workarounds(dev_priv, "init");
1270

1271
	i915_gem_init_swizzling(dev_priv);
1272

1273 1274 1275 1276 1277 1278
	/*
	 * At least 830 can leave some of the unused rings
	 * "active" (ie. head != tail) after resume which
	 * will prevent c3 entry. Makes sure all unused rings
	 * are totally idle.
	 */
1279
	init_unused_rings(dev_priv);
1280

1281
	BUG_ON(!dev_priv->kernel_context);
1282 1283
	ret = i915_terminally_wedged(dev_priv);
	if (ret)
1284
		goto out;
1285

1286
	ret = i915_ppgtt_init_hw(dev_priv);
1287
	if (ret) {
1288
		DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
1289 1290 1291
		goto out;
	}

1292 1293 1294 1295 1296 1297
	ret = intel_wopcm_init_hw(&dev_priv->wopcm);
	if (ret) {
		DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
		goto out;
	}

1298 1299
	/* We can't enable contexts until all firmware is loaded */
	ret = intel_uc_init_hw(dev_priv);
1300 1301
	if (ret) {
		DRM_ERROR("Enabling uc failed (%d)\n", ret);
1302
		goto out;
1303
	}
1304

1305
	intel_mocs_init_l3cc_table(dev_priv);
1306

1307
	/* Only when the HW is re-initialised, can we replay the requests */
1308
	ret = intel_engines_resume(dev_priv);
1309 1310
	if (ret)
		goto cleanup_uc;
1311

1312
	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1313

1314
	intel_engines_set_scheduler_caps(dev_priv);
1315
	return 0;
1316 1317 1318

cleanup_uc:
	intel_uc_fini_hw(dev_priv);
1319
out:
1320
	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1321 1322

	return ret;
1323 1324
}

1325 1326 1327
static int __intel_engines_record_defaults(struct drm_i915_private *i915)
{
	struct intel_engine_cs *engine;
1328 1329
	struct i915_gem_context *ctx;
	struct i915_gem_engines *e;
1330
	enum intel_engine_id id;
1331
	int err = 0;
1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345

	/*
	 * As we reset the gpu during very early sanitisation, the current
	 * register state on the GPU should reflect its defaults values.
	 * We load a context onto the hw (with restore-inhibit), then switch
	 * over to a second context to save that default register state. We
	 * can then prime every new context with that state so they all start
	 * from the same default HW values.
	 */

	ctx = i915_gem_context_create_kernel(i915, 0);
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);

1346 1347
	e = i915_gem_context_lock_engines(ctx);

1348
	for_each_engine(engine, i915, id) {
1349
		struct intel_context *ce = e->engines[id];
1350
		struct i915_request *rq;
1351

1352
		rq = intel_context_create_request(ce);
1353 1354
		if (IS_ERR(rq)) {
			err = PTR_ERR(rq);
1355
			goto err_active;
1356 1357
		}

1358
		err = 0;
1359 1360
		if (rq->engine->init_context)
			err = rq->engine->init_context(rq);
1361

1362
		i915_request_add(rq);
1363 1364 1365 1366
		if (err)
			goto err_active;
	}

1367
	/* Flush the default context image to memory, and enable powersaving. */
1368
	if (!i915_gem_load_power_context(i915)) {
1369
		err = -EIO;
1370
		goto err_active;
1371
	}
1372 1373

	for_each_engine(engine, i915, id) {
1374 1375
		struct intel_context *ce = e->engines[id];
		struct i915_vma *state = ce->state;
1376
		void *vaddr;
1377 1378 1379 1380

		if (!state)
			continue;

1381
		GEM_BUG_ON(intel_context_is_pinned(ce));
1382

1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394
		/*
		 * As we will hold a reference to the logical state, it will
		 * not be torn down with the context, and importantly the
		 * object will hold onto its vma (making it possible for a
		 * stray GTT write to corrupt our defaults). Unmap the vma
		 * from the GTT to prevent such accidents and reclaim the
		 * space.
		 */
		err = i915_vma_unbind(state);
		if (err)
			goto err_active;

1395
		i915_gem_object_lock(state->obj);
1396
		err = i915_gem_object_set_to_cpu_domain(state->obj, false);
1397
		i915_gem_object_unlock(state->obj);
1398 1399 1400 1401
		if (err)
			goto err_active;

		engine->default_state = i915_gem_object_get(state->obj);
1402 1403
		i915_gem_object_set_cache_coherency(engine->default_state,
						    I915_CACHE_LLC);
1404 1405 1406

		/* Check we can acquire the image of the context state */
		vaddr = i915_gem_object_pin_map(engine->default_state,
1407
						I915_MAP_FORCE_WB);
1408 1409 1410 1411 1412 1413
		if (IS_ERR(vaddr)) {
			err = PTR_ERR(vaddr);
			goto err_active;
		}

		i915_gem_object_unpin_map(engine->default_state);
1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434
	}

	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
		unsigned int found = intel_engines_has_context_isolation(i915);

		/*
		 * Make sure that classes with multiple engine instances all
		 * share the same basic configuration.
		 */
		for_each_engine(engine, i915, id) {
			unsigned int bit = BIT(engine->uabi_class);
			unsigned int expected = engine->default_state ? bit : 0;

			if ((found & bit) != expected) {
				DRM_ERROR("mismatching default context state for class %d on engine %s\n",
					  engine->uabi_class, engine->name);
			}
		}
	}

out_ctx:
1435
	i915_gem_context_unlock_engines(ctx);
1436 1437 1438 1439 1440 1441 1442
	i915_gem_context_set_closed(ctx);
	i915_gem_context_put(ctx);
	return err;

err_active:
	/*
	 * If we have to abandon now, we expect the engines to be idle
1443 1444
	 * and ready to be torn-down. The quickest way we can accomplish
	 * this is by declaring ourselves wedged.
1445
	 */
1446
	i915_gem_set_wedged(i915);
1447 1448 1449
	goto out_ctx;
}

1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487
static int
i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
{
	struct drm_i915_gem_object *obj;
	struct i915_vma *vma;
	int ret;

	obj = i915_gem_object_create_stolen(i915, size);
	if (!obj)
		obj = i915_gem_object_create_internal(i915, size);
	if (IS_ERR(obj)) {
		DRM_ERROR("Failed to allocate scratch page\n");
		return PTR_ERR(obj);
	}

	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto err_unref;
	}

	ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
	if (ret)
		goto err_unref;

	i915->gt.scratch = vma;
	return 0;

err_unref:
	i915_gem_object_put(obj);
	return ret;
}

static void i915_gem_fini_scratch(struct drm_i915_private *i915)
{
	i915_vma_unpin_and_release(&i915->gt.scratch, 0);
}

1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504
static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	int err = 0;

	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
		return 0;

	for_each_engine(engine, i915, id) {
		if (intel_engine_verify_workarounds(engine, "load"))
			err = -EIO;
	}

	return err;
}

1505
int i915_gem_init(struct drm_i915_private *dev_priv)
1506 1507 1508
{
	int ret;

1509 1510
	/* We need to fallback to 4K pages if host doesn't support huge gtt. */
	if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
1511 1512 1513
		mkwrite_device_info(dev_priv)->page_sizes =
			I915_GTT_PAGE_SIZE_4K;

1514
	dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
1515

1516 1517
	i915_timelines_init(dev_priv);

1518 1519 1520 1521
	ret = i915_gem_init_userptr(dev_priv);
	if (ret)
		return ret;

1522
	ret = intel_uc_init_misc(dev_priv);
1523 1524 1525
	if (ret)
		return ret;

1526
	ret = intel_wopcm_init(&dev_priv->wopcm);
1527
	if (ret)
1528
		goto err_uc_misc;
1529

1530 1531 1532 1533 1534 1535
	/* This is just a security blanket to placate dragons.
	 * On some systems, we very sporadically observe that the first TLBs
	 * used by the CS may be stale, despite us poking the TLB reset. If
	 * we hold the forcewake during initialisation these problems
	 * just magically go away.
	 */
1536
	mutex_lock(&dev_priv->drm.struct_mutex);
1537
	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1538

1539
	ret = i915_gem_init_ggtt(dev_priv);
1540 1541 1542 1543
	if (ret) {
		GEM_BUG_ON(ret == -EIO);
		goto err_unlock;
	}
1544

1545
	ret = i915_gem_init_scratch(dev_priv,
1546
				    IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
1547 1548 1549 1550
	if (ret) {
		GEM_BUG_ON(ret == -EIO);
		goto err_ggtt;
	}
1551

1552 1553 1554 1555 1556 1557
	ret = intel_engines_setup(dev_priv);
	if (ret) {
		GEM_BUG_ON(ret == -EIO);
		goto err_unlock;
	}

1558 1559 1560 1561 1562 1563
	ret = i915_gem_contexts_init(dev_priv);
	if (ret) {
		GEM_BUG_ON(ret == -EIO);
		goto err_scratch;
	}

1564
	ret = intel_engines_init(dev_priv);
1565 1566 1567 1568
	if (ret) {
		GEM_BUG_ON(ret == -EIO);
		goto err_context;
	}
1569

1570 1571
	intel_init_gt_powersave(dev_priv);

1572
	ret = intel_uc_init(dev_priv);
1573
	if (ret)
1574
		goto err_pm;
1575

1576 1577 1578 1579
	ret = i915_gem_init_hw(dev_priv);
	if (ret)
		goto err_uc_init;

1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590
	/*
	 * Despite its name intel_init_clock_gating applies both display
	 * clock gating workarounds; GT mmio workarounds and the occasional
	 * GT power context workaround. Worse, sometimes it includes a context
	 * register workaround which we need to apply before we record the
	 * default HW state for all contexts.
	 *
	 * FIXME: break up the workarounds and apply them at the right time!
	 */
	intel_init_clock_gating(dev_priv);

1591 1592 1593 1594
	ret = intel_engines_verify_workarounds(dev_priv);
	if (ret)
		goto err_init_hw;

1595
	ret = __intel_engines_record_defaults(dev_priv);
1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608
	if (ret)
		goto err_init_hw;

	if (i915_inject_load_failure()) {
		ret = -ENODEV;
		goto err_init_hw;
	}

	if (i915_inject_load_failure()) {
		ret = -EIO;
		goto err_init_hw;
	}

1609
	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620
	mutex_unlock(&dev_priv->drm.struct_mutex);

	return 0;

	/*
	 * Unwinding is complicated by that we want to handle -EIO to mean
	 * disable GPU submission but keep KMS alive. We want to mark the
	 * HW as irrevisibly wedged, but keep enough state around that the
	 * driver doesn't explode during runtime.
	 */
err_init_hw:
1621 1622
	mutex_unlock(&dev_priv->drm.struct_mutex);

1623
	i915_gem_set_wedged(dev_priv);
1624
	i915_gem_suspend(dev_priv);
1625 1626
	i915_gem_suspend_late(dev_priv);

1627 1628
	i915_gem_drain_workqueue(dev_priv);

1629
	mutex_lock(&dev_priv->drm.struct_mutex);
1630
	intel_uc_fini_hw(dev_priv);
1631 1632
err_uc_init:
	intel_uc_fini(dev_priv);
1633 1634 1635
err_pm:
	if (ret != -EIO) {
		intel_cleanup_gt_powersave(dev_priv);
1636
		intel_engines_cleanup(dev_priv);
1637 1638 1639 1640
	}
err_context:
	if (ret != -EIO)
		i915_gem_contexts_fini(dev_priv);
1641 1642
err_scratch:
	i915_gem_fini_scratch(dev_priv);
1643 1644
err_ggtt:
err_unlock:
1645
	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1646 1647
	mutex_unlock(&dev_priv->drm.struct_mutex);

1648
err_uc_misc:
1649
	intel_uc_fini_misc(dev_priv);
1650

1651
	if (ret != -EIO) {
1652
		i915_gem_cleanup_userptr(dev_priv);
1653 1654
		i915_timelines_fini(dev_priv);
	}
1655

1656
	if (ret == -EIO) {
1657 1658
		mutex_lock(&dev_priv->drm.struct_mutex);

1659 1660
		/*
		 * Allow engine initialisation to fail by marking the GPU as
1661 1662 1663
		 * wedged. But we only want to do this where the GPU is angry,
		 * for all other failure, such as an allocation failure, bail.
		 */
1664
		if (!i915_reset_failed(dev_priv)) {
1665 1666
			i915_load_error(dev_priv,
					"Failed to initialize GPU, declaring it wedged!\n");
1667 1668
			i915_gem_set_wedged(dev_priv);
		}
1669 1670 1671 1672 1673 1674 1675 1676

		/* Minimal basic recovery for KMS */
		ret = i915_ggtt_enable_hw(dev_priv);
		i915_gem_restore_gtt_mappings(dev_priv);
		i915_gem_restore_fences(dev_priv);
		intel_init_clock_gating(dev_priv);

		mutex_unlock(&dev_priv->drm.struct_mutex);
1677 1678
	}

1679
	i915_gem_drain_freed_objects(dev_priv);
1680
	return ret;
1681 1682
}

1683
void i915_gem_fini_hw(struct drm_i915_private *dev_priv)
1684
{
1685 1686
	GEM_BUG_ON(dev_priv->gt.awake);

1687 1688
	intel_wakeref_auto_fini(&dev_priv->mm.userfault_wakeref);

1689
	i915_gem_suspend_late(dev_priv);
1690
	intel_disable_gt_powersave(dev_priv);
1691 1692 1693 1694 1695 1696 1697

	/* Flush any outstanding unpin_work. */
	i915_gem_drain_workqueue(dev_priv);

	mutex_lock(&dev_priv->drm.struct_mutex);
	intel_uc_fini_hw(dev_priv);
	intel_uc_fini(dev_priv);
1698 1699 1700 1701 1702 1703 1704 1705
	mutex_unlock(&dev_priv->drm.struct_mutex);

	i915_gem_drain_freed_objects(dev_priv);
}

void i915_gem_fini(struct drm_i915_private *dev_priv)
{
	mutex_lock(&dev_priv->drm.struct_mutex);
1706
	intel_engines_cleanup(dev_priv);
1707
	i915_gem_contexts_fini(dev_priv);
1708
	i915_gem_fini_scratch(dev_priv);
1709 1710
	mutex_unlock(&dev_priv->drm.struct_mutex);

1711 1712
	intel_wa_list_free(&dev_priv->gt_wa_list);

1713 1714
	intel_cleanup_gt_powersave(dev_priv);

1715 1716
	intel_uc_fini_misc(dev_priv);
	i915_gem_cleanup_userptr(dev_priv);
1717
	i915_timelines_fini(dev_priv);
1718 1719 1720 1721 1722 1723

	i915_gem_drain_freed_objects(dev_priv);

	WARN_ON(!list_empty(&dev_priv->contexts.list));
}

1724 1725 1726 1727 1728
void i915_gem_init_mmio(struct drm_i915_private *i915)
{
	i915_gem_sanitize(i915);
}

1729 1730 1731
void
i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
{
1732
	int i;
1733

1734
	if (INTEL_GEN(dev_priv) >= 7 && !IS_VALLEYVIEW(dev_priv) &&
1735 1736
	    !IS_CHERRYVIEW(dev_priv))
		dev_priv->num_fence_regs = 32;
1737
	else if (INTEL_GEN(dev_priv) >= 4 ||
1738 1739
		 IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
		 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
1740 1741 1742 1743
		dev_priv->num_fence_regs = 16;
	else
		dev_priv->num_fence_regs = 8;

1744
	if (intel_vgpu_active(dev_priv))
1745 1746 1747 1748
		dev_priv->num_fence_regs =
				I915_READ(vgtif_reg(avail_rs.fence_num));

	/* Initialize fence registers to zero */
1749 1750 1751 1752 1753 1754 1755
	for (i = 0; i < dev_priv->num_fence_regs; i++) {
		struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];

		fence->i915 = dev_priv;
		fence->id = i;
		list_add_tail(&fence->link, &dev_priv->mm.fence_list);
	}
1756
	i915_gem_restore_fences(dev_priv);
1757

1758
	i915_gem_detect_bit_6_swizzle(dev_priv);
1759 1760
}

1761 1762 1763 1764 1765 1766 1767
static void i915_gem_init__mm(struct drm_i915_private *i915)
{
	spin_lock_init(&i915->mm.obj_lock);
	spin_lock_init(&i915->mm.free_lock);

	init_llist_head(&i915->mm.free_list);

1768
	INIT_LIST_HEAD(&i915->mm.purge_list);
1769 1770 1771
	INIT_LIST_HEAD(&i915->mm.unbound_list);
	INIT_LIST_HEAD(&i915->mm.bound_list);
	INIT_LIST_HEAD(&i915->mm.fence_list);
1772

1773
	INIT_LIST_HEAD(&i915->mm.userfault_list);
1774
	intel_wakeref_auto_init(&i915->mm.userfault_wakeref, i915);
1775

1776
	i915_gem_init__objects(i915);
1777 1778
}

1779
int i915_gem_init_early(struct drm_i915_private *dev_priv)
1780
{
1781
	int err;
1782

1783 1784
	intel_gt_pm_init(dev_priv);

1785
	INIT_LIST_HEAD(&dev_priv->gt.active_rings);
1786
	INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
1787
	spin_lock_init(&dev_priv->gt.closed_lock);
1788

1789
	i915_gem_init__mm(dev_priv);
1790
	i915_gem_init__pm(dev_priv);
1791

1792
	init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
1793
	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
1794
	mutex_init(&dev_priv->gpu_error.wedge_mutex);
1795
	init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
1796

1797 1798
	atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);

1799
	spin_lock_init(&dev_priv->fb_tracking.lock);
1800

M
Matthew Auld 已提交
1801 1802 1803 1804
	err = i915_gemfs_init(dev_priv);
	if (err)
		DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);

1805
	return 0;
1806
}
1807

1808
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
1809
{
1810
	i915_gem_drain_freed_objects(dev_priv);
1811 1812
	GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
	GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
1813
	WARN_ON(dev_priv->mm.shrink_count);
1814

1815 1816
	cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);

M
Matthew Auld 已提交
1817
	i915_gemfs_fini(dev_priv);
1818 1819
}

1820 1821
int i915_gem_freeze(struct drm_i915_private *dev_priv)
{
1822 1823 1824
	/* Discard all purgeable objects, let userspace recover those as
	 * required after resuming.
	 */
1825 1826 1827 1828 1829
	i915_gem_shrink_all(dev_priv);

	return 0;
}

1830
int i915_gem_freeze_late(struct drm_i915_private *i915)
1831 1832
{
	struct drm_i915_gem_object *obj;
1833
	struct list_head *phases[] = {
1834 1835
		&i915->mm.unbound_list,
		&i915->mm.bound_list,
1836
		NULL
1837
	}, **phase;
1838

1839 1840
	/*
	 * Called just before we write the hibernation image.
1841 1842 1843 1844 1845 1846 1847 1848
	 *
	 * We need to update the domain tracking to reflect that the CPU
	 * will be accessing all the pages to create and restore from the
	 * hibernation, and so upon restoration those pages will be in the
	 * CPU domain.
	 *
	 * To make sure the hibernation image contains the latest state,
	 * we update that state just before writing out the image.
1849 1850
	 *
	 * To try and reduce the hibernation image, we manually shrink
1851
	 * the objects as well, see i915_gem_freeze()
1852 1853
	 */

1854 1855
	i915_gem_shrink(i915, -1UL, NULL, I915_SHRINK_UNBOUND);
	i915_gem_drain_freed_objects(i915);
1856

1857
	for (phase = phases; *phase; phase++) {
1858 1859
		list_for_each_entry(obj, *phase, mm.link) {
			i915_gem_object_lock(obj);
1860
			WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
1861 1862
			i915_gem_object_unlock(obj);
		}
1863
	}
1864
	GEM_BUG_ON(!list_empty(&i915->mm.purge_list));
1865 1866 1867 1868

	return 0;
}

1869
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
1870
{
1871
	struct drm_i915_file_private *file_priv = file->driver_priv;
1872
	struct i915_request *request;
1873 1874 1875 1876 1877

	/* Clean up our request list when the client is going away, so that
	 * later retire_requests won't dereference our soon-to-be-gone
	 * file_priv.
	 */
1878
	spin_lock(&file_priv->mm.lock);
1879
	list_for_each_entry(request, &file_priv->mm.request_list, client_link)
1880
		request->file_priv = NULL;
1881
	spin_unlock(&file_priv->mm.lock);
1882 1883
}

1884
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
1885 1886
{
	struct drm_i915_file_private *file_priv;
1887
	int ret;
1888

1889
	DRM_DEBUG("\n");
1890 1891 1892 1893 1894 1895

	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
	if (!file_priv)
		return -ENOMEM;

	file->driver_priv = file_priv;
1896
	file_priv->dev_priv = i915;
1897
	file_priv->file = file;
1898 1899 1900 1901

	spin_lock_init(&file_priv->mm.lock);
	INIT_LIST_HEAD(&file_priv->mm.request_list);

1902
	file_priv->bsd_engine = -1;
1903
	file_priv->hang_timestamp = jiffies;
1904

1905
	ret = i915_gem_context_open(i915, file);
1906 1907
	if (ret)
		kfree(file_priv);
1908

1909
	return ret;
1910 1911
}

1912 1913
/**
 * i915_gem_track_fb - update frontbuffer tracking
1914 1915 1916
 * @old: current GEM buffer for the frontbuffer slots
 * @new: new GEM buffer for the frontbuffer slots
 * @frontbuffer_bits: bitmask of frontbuffer slots
1917 1918 1919 1920
 *
 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
 * from @old and setting them in @new. Both @old and @new can be NULL.
 */
1921 1922 1923 1924
void i915_gem_track_fb(struct drm_i915_gem_object *old,
		       struct drm_i915_gem_object *new,
		       unsigned frontbuffer_bits)
{
1925 1926 1927 1928 1929 1930 1931
	/* Control of individual bits within the mask are guarded by
	 * the owning plane->mutex, i.e. we can never see concurrent
	 * manipulation of individual bits. But since the bitfield as a whole
	 * is updated using RMW, we need to use atomics in order to update
	 * the bits.
	 */
	BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
1932
		     BITS_PER_TYPE(atomic_t));
1933

1934
	if (old) {
1935 1936
		WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
		atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
1937 1938 1939
	}

	if (new) {
1940 1941
		WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
		atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
1942 1943 1944
	}
}

1945
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1946
#include "selftests/mock_gem_device.c"
1947
#include "selftests/i915_gem.c"
1948
#endif