i915_gem.c 47.7 KB
Newer Older
1
/*
2
 * Copyright © 2008-2015 Intel Corporation
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *
 */

28
#include <drm/drm_vma_manager.h>
29
#include <drm/i915_drm.h>
30
#include <linux/dma-fence-array.h>
31
#include <linux/kthread.h>
32
#include <linux/reservation.h>
33
#include <linux/shmem_fs.h>
34
#include <linux/slab.h>
35
#include <linux/stop_machine.h>
36
#include <linux/swap.h>
J
Jesse Barnes 已提交
37
#include <linux/pci.h>
38
#include <linux/dma-buf.h>
39
#include <linux/mman.h>
40

41 42 43
#include "display/intel_display.h"
#include "display/intel_frontbuffer.h"

44 45
#include "gem/i915_gem_clflush.h"
#include "gem/i915_gem_context.h"
46
#include "gem/i915_gem_ioctls.h"
47 48
#include "gem/i915_gem_pm.h"
#include "gem/i915_gemfs.h"
49 50
#include "gt/intel_engine_pm.h"
#include "gt/intel_gt_pm.h"
51 52 53 54
#include "gt/intel_mocs.h"
#include "gt/intel_reset.h"
#include "gt/intel_workarounds.h"

55
#include "i915_drv.h"
56
#include "i915_scatterlist.h"
57 58 59 60
#include "i915_trace.h"
#include "i915_vgpu.h"

#include "intel_drv.h"
61
#include "intel_pm.h"
62

63
static int
64
insert_mappable_node(struct i915_ggtt *ggtt,
65 66 67
                     struct drm_mm_node *node, u32 size)
{
	memset(node, 0, sizeof(*node));
68
	return drm_mm_insert_node_in_range(&ggtt->vm.mm, node,
69 70 71
					   size, 0, I915_COLOR_UNEVICTABLE,
					   0, ggtt->mappable_end,
					   DRM_MM_INSERT_LOW);
72 73 74 75 76 77 78 79
}

static void
remove_mappable_node(struct drm_mm_node *node)
{
	drm_mm_remove_node(node);
}

80 81
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
82
			    struct drm_file *file)
83
{
84
	struct i915_ggtt *ggtt = &to_i915(dev)->ggtt;
85
	struct drm_i915_gem_get_aperture *args = data;
86
	struct i915_vma *vma;
87
	u64 pinned;
88

89 90
	mutex_lock(&ggtt->vm.mutex);

91
	pinned = ggtt->vm.reserved;
92
	list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
93
		if (i915_vma_is_pinned(vma))
94
			pinned += vma->node.size;
95 96

	mutex_unlock(&ggtt->vm.mutex);
97

98
	args->aper_size = ggtt->vm.total;
99
	args->aper_available_size = args->aper_size - pinned;
100

101 102 103
	return 0;
}

104
int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
105 106 107
{
	struct i915_vma *vma;
	LIST_HEAD(still_in_list);
108
	int ret = 0;
109 110

	lockdep_assert_held(&obj->base.dev->struct_mutex);
111

112 113 114 115
	spin_lock(&obj->vma.lock);
	while (!ret && (vma = list_first_entry_or_null(&obj->vma.list,
						       struct i915_vma,
						       obj_link))) {
116
		list_move_tail(&vma->obj_link, &still_in_list);
117 118
		spin_unlock(&obj->vma.lock);

119
		ret = i915_vma_unbind(vma);
120 121

		spin_lock(&obj->vma.lock);
122
	}
123 124
	list_splice(&still_in_list, &obj->vma.list);
	spin_unlock(&obj->vma.lock);
125 126 127 128

	return ret;
}

129 130 131
static int
i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pwrite *args,
132
		     struct drm_file *file)
133 134
{
	void *vaddr = obj->phys_handle->vaddr + args->offset;
135
	char __user *user_data = u64_to_user_ptr(args->data_ptr);
136 137 138 139

	/* We manually control the domain here and pretend that it
	 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
	 */
140
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
141 142
	if (copy_from_user(vaddr, user_data, args->size))
		return -EFAULT;
143

144
	drm_clflush_virt_range(vaddr, args->size);
145
	i915_gem_chipset_flush(to_i915(obj->base.dev));
146

147
	intel_fb_obj_flush(obj, ORIGIN_CPU);
148
	return 0;
149 150
}

151 152
static int
i915_gem_create(struct drm_file *file,
153
		struct drm_i915_private *dev_priv,
154
		u64 *size_p,
155
		u32 *handle_p)
156
{
157
	struct drm_i915_gem_object *obj;
158
	u32 handle;
159 160
	u64 size;
	int ret;
161

162
	size = round_up(*size_p, PAGE_SIZE);
163 164
	if (size == 0)
		return -EINVAL;
165 166

	/* Allocate the new object */
167
	obj = i915_gem_object_create_shmem(dev_priv, size);
168 169
	if (IS_ERR(obj))
		return PTR_ERR(obj);
170

171
	ret = drm_gem_handle_create(file, &obj->base, &handle);
172
	/* drop reference from allocate - handle holds it now */
C
Chris Wilson 已提交
173
	i915_gem_object_put(obj);
174 175
	if (ret)
		return ret;
176

177
	*handle_p = handle;
178
	*size_p = size;
179 180 181
	return 0;
}

182 183 184 185 186
int
i915_gem_dumb_create(struct drm_file *file,
		     struct drm_device *dev,
		     struct drm_mode_create_dumb *args)
{
187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
	int cpp = DIV_ROUND_UP(args->bpp, 8);
	u32 format;

	switch (cpp) {
	case 1:
		format = DRM_FORMAT_C8;
		break;
	case 2:
		format = DRM_FORMAT_RGB565;
		break;
	case 4:
		format = DRM_FORMAT_XRGB8888;
		break;
	default:
		return -EINVAL;
	}

204
	/* have to work out size/pitch and return them */
205 206 207 208 209 210 211
	args->pitch = ALIGN(args->width * cpp, 64);

	/* align stride to page size so that we can remap */
	if (args->pitch > intel_plane_fb_max_stride(to_i915(dev), format,
						    DRM_FORMAT_MOD_LINEAR))
		args->pitch = ALIGN(args->pitch, 4096);

212
	args->size = args->pitch * args->height;
213
	return i915_gem_create(file, to_i915(dev),
214
			       &args->size, &args->handle);
215 216 217 218
}

/**
 * Creates a new mm object and returns a handle to it.
219 220 221
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
222 223 224 225 226
 */
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
		      struct drm_file *file)
{
227
	struct drm_i915_private *dev_priv = to_i915(dev);
228
	struct drm_i915_gem_create *args = data;
229

230
	i915_gem_flush_free_objects(dev_priv);
231

232
	return i915_gem_create(file, dev_priv,
233
			       &args->size, &args->handle);
234 235
}

236
void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
237
{
238 239
	intel_wakeref_t wakeref;

240 241 242 243 244
	/*
	 * No actual flushing is required for the GTT write domain for reads
	 * from the GTT domain. Writes to it "immediately" go to main memory
	 * as far as we know, so there's no chipset flush. It also doesn't
	 * land in the GPU render cache.
245 246 247 248 249 250 251 252 253 254
	 *
	 * However, we do have to enforce the order so that all writes through
	 * the GTT land before any writes to the device, such as updates to
	 * the GATT itself.
	 *
	 * We also have to wait a bit for the writes to land from the GTT.
	 * An uncached read (i.e. mmio) seems to be ideal for the round-trip
	 * timing. This issue has only been observed when switching quickly
	 * between GTT writes and CPU reads from inside the kernel on recent hw,
	 * and it appears to only affect discrete GTT blocks (i.e. on LLC
255 256
	 * system agents we cannot reproduce this behaviour, until Cannonlake
	 * that was!).
257
	 */
258

259 260 261 262 263
	wmb();

	if (INTEL_INFO(dev_priv)->has_coherent_ggtt)
		return;

264
	i915_gem_chipset_flush(dev_priv);
265

266
	with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
267
		struct intel_uncore *uncore = &dev_priv->uncore;
268

269 270 271 272
		spin_lock_irq(&uncore->lock);
		intel_uncore_posting_read_fw(uncore,
					     RING_HEAD(RENDER_RING_BASE));
		spin_unlock_irq(&uncore->lock);
273
	}
274 275
}

276
static int
277 278
shmem_pread(struct page *page, int offset, int len, char __user *user_data,
	    bool needs_clflush)
279 280 281 282 283 284
{
	char *vaddr;
	int ret;

	vaddr = kmap(page);

285 286
	if (needs_clflush)
		drm_clflush_virt_range(vaddr + offset, len);
287

288
	ret = __copy_to_user(user_data, vaddr + offset, len);
289

290
	kunmap(page);
291

292
	return ret ? -EFAULT : 0;
293 294 295 296 297 298 299 300
}

static int
i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
		     struct drm_i915_gem_pread *args)
{
	unsigned int needs_clflush;
	unsigned int idx, offset;
301 302 303
	struct dma_fence *fence;
	char __user *user_data;
	u64 remain;
304 305
	int ret;

306
	ret = i915_gem_object_prepare_read(obj, &needs_clflush);
307 308 309
	if (ret)
		return ret;

310 311 312 313 314
	fence = i915_gem_object_lock_fence(obj);
	i915_gem_object_finish_access(obj);
	if (!fence)
		return -ENOMEM;

315 316 317 318 319
	remain = args->size;
	user_data = u64_to_user_ptr(args->data_ptr);
	offset = offset_in_page(args->offset);
	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
		struct page *page = i915_gem_object_get_page(obj, idx);
320
		unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
321 322 323 324 325 326 327 328 329 330 331

		ret = shmem_pread(page, offset, length, user_data,
				  needs_clflush);
		if (ret)
			break;

		remain -= length;
		user_data += length;
		offset = 0;
	}

332
	i915_gem_object_unlock_fence(obj, fence);
333 334 335 336 337 338 339
	return ret;
}

static inline bool
gtt_user_read(struct io_mapping *mapping,
	      loff_t base, int offset,
	      char __user *user_data, int length)
340
{
341
	void __iomem *vaddr;
342
	unsigned long unwritten;
343 344

	/* We can use the cpu mem copy function because this is X86. */
345 346 347 348
	vaddr = io_mapping_map_atomic_wc(mapping, base);
	unwritten = __copy_to_user_inatomic(user_data,
					    (void __force *)vaddr + offset,
					    length);
349 350
	io_mapping_unmap_atomic(vaddr);
	if (unwritten) {
351 352 353 354
		vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
		unwritten = copy_to_user(user_data,
					 (void __force *)vaddr + offset,
					 length);
355 356
		io_mapping_unmap(vaddr);
	}
357 358 359 360
	return unwritten;
}

static int
361 362
i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
		   const struct drm_i915_gem_pread *args)
363
{
364 365
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
	struct i915_ggtt *ggtt = &i915->ggtt;
366
	intel_wakeref_t wakeref;
367
	struct drm_mm_node node;
368
	struct dma_fence *fence;
369
	void __user *user_data;
370
	struct i915_vma *vma;
371
	u64 remain, offset;
372 373
	int ret;

374 375 376 377
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
	if (ret)
		return ret;

378
	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
379
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
380 381 382
				       PIN_MAPPABLE |
				       PIN_NONFAULT |
				       PIN_NONBLOCK);
383 384 385
	if (!IS_ERR(vma)) {
		node.start = i915_ggtt_offset(vma);
		node.allocated = false;
386
		ret = i915_vma_put_fence(vma);
387 388 389 390 391
		if (ret) {
			i915_vma_unpin(vma);
			vma = ERR_PTR(ret);
		}
	}
C
Chris Wilson 已提交
392
	if (IS_ERR(vma)) {
393
		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
394
		if (ret)
395 396
			goto out_unlock;
		GEM_BUG_ON(!node.allocated);
397 398
	}

399 400 401
	mutex_unlock(&i915->drm.struct_mutex);

	ret = i915_gem_object_lock_interruptible(obj);
402 403 404
	if (ret)
		goto out_unpin;

405 406 407 408 409 410 411 412 413 414 415 416
	ret = i915_gem_object_set_to_gtt_domain(obj, false);
	if (ret) {
		i915_gem_object_unlock(obj);
		goto out_unpin;
	}

	fence = i915_gem_object_lock_fence(obj);
	i915_gem_object_unlock(obj);
	if (!fence) {
		ret = -ENOMEM;
		goto out_unpin;
	}
417

418 419 420
	user_data = u64_to_user_ptr(args->data_ptr);
	remain = args->size;
	offset = args->offset;
421 422 423 424 425 426 427 428 429 430 431 432 433 434

	while (remain > 0) {
		/* Operation in this page
		 *
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
		 */
		u32 page_base = node.start;
		unsigned page_offset = offset_in_page(offset);
		unsigned page_length = PAGE_SIZE - page_offset;
		page_length = remain < page_length ? remain : page_length;
		if (node.allocated) {
			wmb();
435 436 437
			ggtt->vm.insert_page(&ggtt->vm,
					     i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
					     node.start, I915_CACHE_NONE, 0);
438 439 440 441
			wmb();
		} else {
			page_base += offset & PAGE_MASK;
		}
442

443
		if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
444
				  user_data, page_length)) {
445 446 447 448 449 450 451 452 453
			ret = -EFAULT;
			break;
		}

		remain -= page_length;
		user_data += page_length;
		offset += page_length;
	}

454
	i915_gem_object_unlock_fence(obj, fence);
455
out_unpin:
456
	mutex_lock(&i915->drm.struct_mutex);
457 458
	if (node.allocated) {
		wmb();
459
		ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
460 461
		remove_mappable_node(&node);
	} else {
C
Chris Wilson 已提交
462
		i915_vma_unpin(vma);
463
	}
464
out_unlock:
465
	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
466
	mutex_unlock(&i915->drm.struct_mutex);
467

468 469 470
	return ret;
}

471 472
/**
 * Reads data from the object referenced by handle.
473 474 475
 * @dev: drm device pointer
 * @data: ioctl data blob
 * @file: drm file pointer
476 477 478 479 480
 *
 * On error, the contents of *data are undefined.
 */
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
481
		     struct drm_file *file)
482 483
{
	struct drm_i915_gem_pread *args = data;
484
	struct drm_i915_gem_object *obj;
485
	int ret;
486

487 488 489
	if (args->size == 0)
		return 0;

490
	if (!access_ok(u64_to_user_ptr(args->data_ptr),
491 492 493
		       args->size))
		return -EFAULT;

494
	obj = i915_gem_object_lookup(file, args->handle);
495 496
	if (!obj)
		return -ENOENT;
497

498
	/* Bounds check source.  */
499
	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
C
Chris Wilson 已提交
500
		ret = -EINVAL;
501
		goto out;
C
Chris Wilson 已提交
502 503
	}

C
Chris Wilson 已提交
504 505
	trace_i915_gem_object_pread(obj, args->offset, args->size);

506 507
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE,
508
				   MAX_SCHEDULE_TIMEOUT);
509
	if (ret)
510
		goto out;
511

512
	ret = i915_gem_object_pin_pages(obj);
513
	if (ret)
514
		goto out;
515

516
	ret = i915_gem_shmem_pread(obj, args);
517
	if (ret == -EFAULT || ret == -ENODEV)
518
		ret = i915_gem_gtt_pread(obj, args);
519

520 521
	i915_gem_object_unpin_pages(obj);
out:
C
Chris Wilson 已提交
522
	i915_gem_object_put(obj);
523
	return ret;
524 525
}

526 527
/* This is the fast write path which cannot handle
 * page faults in the source data
528
 */
529

530 531 532 533
static inline bool
ggtt_write(struct io_mapping *mapping,
	   loff_t base, int offset,
	   char __user *user_data, int length)
534
{
535
	void __iomem *vaddr;
536
	unsigned long unwritten;
537

538
	/* We can use the cpu mem copy function because this is X86. */
539 540
	vaddr = io_mapping_map_atomic_wc(mapping, base);
	unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
541
						      user_data, length);
542 543
	io_mapping_unmap_atomic(vaddr);
	if (unwritten) {
544 545 546
		vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
		unwritten = copy_from_user((void __force *)vaddr + offset,
					   user_data, length);
547 548
		io_mapping_unmap(vaddr);
	}
549 550 551 552

	return unwritten;
}

553 554 555
/**
 * This is the fast pwrite path, where we copy the data directly from the
 * user into the GTT, uncached.
556
 * @obj: i915 GEM object
557
 * @args: pwrite arguments structure
558
 */
559
static int
560 561
i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
			 const struct drm_i915_gem_pwrite *args)
562
{
563
	struct drm_i915_private *i915 = to_i915(obj->base.dev);
564
	struct i915_ggtt *ggtt = &i915->ggtt;
565
	struct intel_runtime_pm *rpm = &i915->runtime_pm;
566
	intel_wakeref_t wakeref;
567
	struct drm_mm_node node;
568
	struct dma_fence *fence;
569 570 571
	struct i915_vma *vma;
	u64 remain, offset;
	void __user *user_data;
572
	int ret;
573

574 575 576
	ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
	if (ret)
		return ret;
D
Daniel Vetter 已提交
577

578 579 580 581 582 583 584 585
	if (i915_gem_object_has_struct_page(obj)) {
		/*
		 * Avoid waking the device up if we can fallback, as
		 * waking/resuming is very slow (worst-case 10-100 ms
		 * depending on PCI sleeps and our own resume time).
		 * This easily dwarfs any performance advantage from
		 * using the cache bypass of indirect GGTT access.
		 */
586
		wakeref = intel_runtime_pm_get_if_in_use(rpm);
587
		if (!wakeref) {
588 589 590 591 592
			ret = -EFAULT;
			goto out_unlock;
		}
	} else {
		/* No backing pages, no fallback, we must force GGTT access */
593
		wakeref = intel_runtime_pm_get(rpm);
594 595
	}

C
Chris Wilson 已提交
596
	vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
597 598 599
				       PIN_MAPPABLE |
				       PIN_NONFAULT |
				       PIN_NONBLOCK);
600 601 602
	if (!IS_ERR(vma)) {
		node.start = i915_ggtt_offset(vma);
		node.allocated = false;
603
		ret = i915_vma_put_fence(vma);
604 605 606 607 608
		if (ret) {
			i915_vma_unpin(vma);
			vma = ERR_PTR(ret);
		}
	}
C
Chris Wilson 已提交
609
	if (IS_ERR(vma)) {
610
		ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
611
		if (ret)
612
			goto out_rpm;
613
		GEM_BUG_ON(!node.allocated);
614
	}
D
Daniel Vetter 已提交
615

616 617 618
	mutex_unlock(&i915->drm.struct_mutex);

	ret = i915_gem_object_lock_interruptible(obj);
D
Daniel Vetter 已提交
619 620 621
	if (ret)
		goto out_unpin;

622 623 624 625 626 627 628 629 630 631 632 633
	ret = i915_gem_object_set_to_gtt_domain(obj, true);
	if (ret) {
		i915_gem_object_unlock(obj);
		goto out_unpin;
	}

	fence = i915_gem_object_lock_fence(obj);
	i915_gem_object_unlock(obj);
	if (!fence) {
		ret = -ENOMEM;
		goto out_unpin;
	}
634

635
	intel_fb_obj_invalidate(obj, ORIGIN_CPU);
636

637 638 639 640
	user_data = u64_to_user_ptr(args->data_ptr);
	offset = args->offset;
	remain = args->size;
	while (remain) {
641 642
		/* Operation in this page
		 *
643 644 645
		 * page_base = page offset within aperture
		 * page_offset = offset within page
		 * page_length = bytes to copy for this page
646
		 */
647
		u32 page_base = node.start;
648 649
		unsigned int page_offset = offset_in_page(offset);
		unsigned int page_length = PAGE_SIZE - page_offset;
650 651 652
		page_length = remain < page_length ? remain : page_length;
		if (node.allocated) {
			wmb(); /* flush the write before we modify the GGTT */
653 654 655
			ggtt->vm.insert_page(&ggtt->vm,
					     i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
					     node.start, I915_CACHE_NONE, 0);
656 657 658 659
			wmb(); /* flush modifications to the GGTT (insert_page) */
		} else {
			page_base += offset & PAGE_MASK;
		}
660
		/* If we get a fault while copying data, then (presumably) our
661 662
		 * source page isn't available.  Return the error and we'll
		 * retry in the slow path.
663 664
		 * If the object is non-shmem backed, we retry again with the
		 * path that handles page fault.
665
		 */
666
		if (ggtt_write(&ggtt->iomap, page_base, page_offset,
667 668 669
			       user_data, page_length)) {
			ret = -EFAULT;
			break;
D
Daniel Vetter 已提交
670
		}
671

672 673 674
		remain -= page_length;
		user_data += page_length;
		offset += page_length;
675
	}
676
	intel_fb_obj_flush(obj, ORIGIN_CPU);
677

678
	i915_gem_object_unlock_fence(obj, fence);
D
Daniel Vetter 已提交
679
out_unpin:
680
	mutex_lock(&i915->drm.struct_mutex);
681 682
	if (node.allocated) {
		wmb();
683
		ggtt->vm.clear_range(&ggtt->vm, node.start, node.size);
684 685
		remove_mappable_node(&node);
	} else {
C
Chris Wilson 已提交
686
		i915_vma_unpin(vma);
687
	}
688
out_rpm:
689
	intel_runtime_pm_put(rpm, wakeref);
690
out_unlock:
691
	mutex_unlock(&i915->drm.struct_mutex);
692
	return ret;
693 694
}

695 696 697 698 699
/* Per-page copy function for the shmem pwrite fastpath.
 * Flushes invalid cachelines before writing to the target if
 * needs_clflush_before is set and flushes out any written cachelines after
 * writing if needs_clflush is set.
 */
700
static int
701 702 703
shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
	     bool needs_clflush_before,
	     bool needs_clflush_after)
704
{
705
	char *vaddr;
706 707
	int ret;

708
	vaddr = kmap(page);
709

710 711
	if (needs_clflush_before)
		drm_clflush_virt_range(vaddr + offset, len);
712

713 714 715
	ret = __copy_from_user(vaddr + offset, user_data, len);
	if (!ret && needs_clflush_after)
		drm_clflush_virt_range(vaddr + offset, len);
716

717 718 719
	kunmap(page);

	return ret ? -EFAULT : 0;
720 721 722 723 724 725 726
}

static int
i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
		      const struct drm_i915_gem_pwrite *args)
{
	unsigned int partial_cacheline_write;
727
	unsigned int needs_clflush;
728
	unsigned int offset, idx;
729 730 731
	struct dma_fence *fence;
	void __user *user_data;
	u64 remain;
732
	int ret;
733

734
	ret = i915_gem_object_prepare_write(obj, &needs_clflush);
735 736
	if (ret)
		return ret;
737

738 739 740 741 742
	fence = i915_gem_object_lock_fence(obj);
	i915_gem_object_finish_access(obj);
	if (!fence)
		return -ENOMEM;

743 744 745 746 747 748 749
	/* If we don't overwrite a cacheline completely we need to be
	 * careful to have up-to-date data by first clflushing. Don't
	 * overcomplicate things and flush the entire patch.
	 */
	partial_cacheline_write = 0;
	if (needs_clflush & CLFLUSH_BEFORE)
		partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
750

751 752 753 754 755
	user_data = u64_to_user_ptr(args->data_ptr);
	remain = args->size;
	offset = offset_in_page(args->offset);
	for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
		struct page *page = i915_gem_object_get_page(obj, idx);
756
		unsigned int length = min_t(u64, remain, PAGE_SIZE - offset);
757

758 759 760
		ret = shmem_pwrite(page, offset, length, user_data,
				   (offset | length) & partial_cacheline_write,
				   needs_clflush & CLFLUSH_AFTER);
761
		if (ret)
762
			break;
763

764 765 766
		remain -= length;
		user_data += length;
		offset = 0;
767
	}
768

769
	intel_fb_obj_flush(obj, ORIGIN_CPU);
770 771
	i915_gem_object_unlock_fence(obj, fence);

772
	return ret;
773 774 775 776
}

/**
 * Writes data to the object referenced by handle.
777 778 779
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
780 781 782 783 784
 *
 * On error, the contents of the buffer that were to be modified are undefined.
 */
int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
785
		      struct drm_file *file)
786 787
{
	struct drm_i915_gem_pwrite *args = data;
788
	struct drm_i915_gem_object *obj;
789 790 791 792 793
	int ret;

	if (args->size == 0)
		return 0;

794
	if (!access_ok(u64_to_user_ptr(args->data_ptr), args->size))
795 796
		return -EFAULT;

797
	obj = i915_gem_object_lookup(file, args->handle);
798 799
	if (!obj)
		return -ENOENT;
800

801
	/* Bounds check destination. */
802
	if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
C
Chris Wilson 已提交
803
		ret = -EINVAL;
804
		goto err;
C
Chris Wilson 已提交
805 806
	}

807 808 809 810 811 812
	/* Writes not allowed into this read-only object */
	if (i915_gem_object_is_readonly(obj)) {
		ret = -EINVAL;
		goto err;
	}

C
Chris Wilson 已提交
813 814
	trace_i915_gem_object_pwrite(obj, args->offset, args->size);

815 816 817 818 819 820
	ret = -ENODEV;
	if (obj->ops->pwrite)
		ret = obj->ops->pwrite(obj, args);
	if (ret != -ENODEV)
		goto err;

821 822 823
	ret = i915_gem_object_wait(obj,
				   I915_WAIT_INTERRUPTIBLE |
				   I915_WAIT_ALL,
824
				   MAX_SCHEDULE_TIMEOUT);
825 826 827
	if (ret)
		goto err;

828
	ret = i915_gem_object_pin_pages(obj);
829
	if (ret)
830
		goto err;
831

D
Daniel Vetter 已提交
832
	ret = -EFAULT;
833 834 835 836 837 838
	/* We can only do the GTT pwrite on untiled buffers, as otherwise
	 * it would end up going through the fenced access, and we'll get
	 * different detiling behavior between reading and writing.
	 * pread/pwrite currently are reading and writing from the CPU
	 * perspective, requiring manual detiling by the client.
	 */
839
	if (!i915_gem_object_has_struct_page(obj) ||
840
	    cpu_write_needs_clflush(obj))
D
Daniel Vetter 已提交
841 842
		/* Note that the gtt paths might fail with non-page-backed user
		 * pointers (e.g. gtt mappings when moving data between
843 844
		 * textures). Fallback to the shmem path in that case.
		 */
845
		ret = i915_gem_gtt_pwrite_fast(obj, args);
846

847
	if (ret == -EFAULT || ret == -ENOSPC) {
848 849
		if (obj->phys_handle)
			ret = i915_gem_phys_pwrite(obj, args, file);
850
		else
851
			ret = i915_gem_shmem_pwrite(obj, args);
852
	}
853

854
	i915_gem_object_unpin_pages(obj);
855
err:
C
Chris Wilson 已提交
856
	i915_gem_object_put(obj);
857
	return ret;
858 859 860 861
}

/**
 * Called when user space has done writes to this buffer
862 863 864
 * @dev: drm device
 * @data: ioctl data blob
 * @file: drm file
865 866 867
 */
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
868
			 struct drm_file *file)
869 870
{
	struct drm_i915_gem_sw_finish *args = data;
871
	struct drm_i915_gem_object *obj;
872

873
	obj = i915_gem_object_lookup(file, args->handle);
874 875
	if (!obj)
		return -ENOENT;
876

T
Tina Zhang 已提交
877 878 879 880 881
	/*
	 * Proxy objects are barred from CPU access, so there is no
	 * need to ban sw_finish as it is a nop.
	 */

882
	/* Pinned buffers may be scanout, so flush the cache */
883
	i915_gem_object_flush_if_display(obj);
C
Chris Wilson 已提交
884
	i915_gem_object_put(obj);
885 886

	return 0;
887 888
}

889
void i915_gem_runtime_suspend(struct drm_i915_private *i915)
890
{
891
	struct drm_i915_gem_object *obj, *on;
892
	int i;
893

894 895 896 897 898 899
	/*
	 * Only called during RPM suspend. All users of the userfault_list
	 * must be holding an RPM wakeref to ensure that this can not
	 * run concurrently with themselves (and use the struct_mutex for
	 * protection between themselves).
	 */
900

901
	list_for_each_entry_safe(obj, on,
902
				 &i915->ggtt.userfault_list, userfault_link)
903
		__i915_gem_object_release_mmap(obj);
904

905 906
	/*
	 * The fence will be lost when the device powers down. If any were
907 908 909
	 * in use by hardware (i.e. they are pinned), we should not be powering
	 * down! All other fences will be reacquired by the user upon waking.
	 */
910 911
	for (i = 0; i < i915->ggtt.num_fences; i++) {
		struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
912

913 914
		/*
		 * Ideally we want to assert that the fence register is not
915 916 917 918 919 920 921 922 923
		 * live at this point (i.e. that no piece of code will be
		 * trying to write through fence + GTT, as that both violates
		 * our tracking of activity and associated locking/barriers,
		 * but also is illegal given that the hw is powered down).
		 *
		 * Previously we used reg->pin_count as a "liveness" indicator.
		 * That is not sufficient, and we need a more fine-grained
		 * tool if we want to have a sanity check here.
		 */
924 925 926 927

		if (!reg->vma)
			continue;

928
		GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
929 930
		reg->dirty = true;
	}
931 932
}

933 934
static int wait_for_engines(struct drm_i915_private *i915)
{
935
	if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
936 937
		dev_err(i915->drm.dev,
			"Failed to idle engines, declaring wedged!\n");
938
		GEM_TRACE_DUMP();
939 940
		i915_gem_set_wedged(i915);
		return -EIO;
941 942 943 944 945
	}

	return 0;
}

946 947 948 949 950 951 952 953
static long
wait_for_timelines(struct drm_i915_private *i915,
		   unsigned int flags, long timeout)
{
	struct i915_gt_timelines *gt = &i915->gt.timelines;
	struct i915_timeline *tl;

	mutex_lock(&gt->mutex);
C
Chris Wilson 已提交
954
	list_for_each_entry(tl, &gt->active_list, link) {
955 956
		struct i915_request *rq;

957
		rq = i915_active_request_get_unlocked(&tl->last_request);
958 959 960 961 962 963 964 965 966 967 968 969 970 971 972
		if (!rq)
			continue;

		mutex_unlock(&gt->mutex);

		/*
		 * "Race-to-idle".
		 *
		 * Switching to the kernel context is often used a synchronous
		 * step prior to idling, e.g. in suspend for flushing all
		 * current operations to memory before sleeping. These we
		 * want to complete as quickly as possible to avoid prolonged
		 * stalls, so allow the gpu to boost to maximum clocks.
		 */
		if (flags & I915_WAIT_FOR_IDLE_BOOST)
973
			gen6_rps_boost(rq);
974 975 976 977 978 979 980 981

		timeout = i915_request_wait(rq, flags, timeout);
		i915_request_put(rq);
		if (timeout < 0)
			return timeout;

		/* restart after reacquiring the lock */
		mutex_lock(&gt->mutex);
C
Chris Wilson 已提交
982
		tl = list_entry(&gt->active_list, typeof(*tl), link);
983 984 985 986 987 988
	}
	mutex_unlock(&gt->mutex);

	return timeout;
}

989 990
int i915_gem_wait_for_idle(struct drm_i915_private *i915,
			   unsigned int flags, long timeout)
991
{
992
	GEM_TRACE("flags=%x (%s), timeout=%ld%s, awake?=%s\n",
993
		  flags, flags & I915_WAIT_LOCKED ? "locked" : "unlocked",
994 995
		  timeout, timeout == MAX_SCHEDULE_TIMEOUT ? " (forever)" : "",
		  yesno(i915->gt.awake));
996

997 998 999 1000
	/* If the device is asleep, we have no requests outstanding */
	if (!READ_ONCE(i915->gt.awake))
		return 0;

1001 1002 1003 1004
	timeout = wait_for_timelines(i915, flags, timeout);
	if (timeout < 0)
		return timeout;

1005
	if (flags & I915_WAIT_LOCKED) {
1006
		int err;
1007 1008 1009

		lockdep_assert_held(&i915->drm.struct_mutex);

1010 1011 1012 1013
		err = wait_for_engines(i915);
		if (err)
			return err;

1014
		i915_retire_requests(i915);
1015
	}
1016 1017

	return 0;
1018 1019
}

C
Chris Wilson 已提交
1020
struct i915_vma *
1021 1022
i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
			 const struct i915_ggtt_view *view,
1023
			 u64 size,
1024 1025
			 u64 alignment,
			 u64 flags)
1026
{
1027
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
1028
	struct i915_address_space *vm = &dev_priv->ggtt.vm;
1029 1030
	struct i915_vma *vma;
	int ret;
1031

1032 1033
	lockdep_assert_held(&obj->base.dev->struct_mutex);

1034 1035
	if (flags & PIN_MAPPABLE &&
	    (!view || view->type == I915_GGTT_VIEW_NORMAL)) {
1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065
		/* If the required space is larger than the available
		 * aperture, we will not able to find a slot for the
		 * object and unbinding the object now will be in
		 * vain. Worse, doing so may cause us to ping-pong
		 * the object in and out of the Global GTT and
		 * waste a lot of cycles under the mutex.
		 */
		if (obj->base.size > dev_priv->ggtt.mappable_end)
			return ERR_PTR(-E2BIG);

		/* If NONBLOCK is set the caller is optimistically
		 * trying to cache the full object within the mappable
		 * aperture, and *must* have a fallback in place for
		 * situations where we cannot bind the object. We
		 * can be a little more lax here and use the fallback
		 * more often to avoid costly migrations of ourselves
		 * and other objects within the aperture.
		 *
		 * Half-the-aperture is used as a simple heuristic.
		 * More interesting would to do search for a free
		 * block prior to making the commitment to unbind.
		 * That caters for the self-harm case, and with a
		 * little more heuristics (e.g. NOFAULT, NOEVICT)
		 * we could try to minimise harm to others.
		 */
		if (flags & PIN_NONBLOCK &&
		    obj->base.size > dev_priv->ggtt.mappable_end / 2)
			return ERR_PTR(-ENOSPC);
	}

1066
	vma = i915_vma_instance(obj, vm, view);
1067
	if (IS_ERR(vma))
C
Chris Wilson 已提交
1068
		return vma;
1069 1070

	if (i915_vma_misplaced(vma, size, alignment, flags)) {
1071 1072 1073
		if (flags & PIN_NONBLOCK) {
			if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
				return ERR_PTR(-ENOSPC);
1074

1075
			if (flags & PIN_MAPPABLE &&
1076
			    vma->fence_size > dev_priv->ggtt.mappable_end / 2)
1077 1078 1079
				return ERR_PTR(-ENOSPC);
		}

1080 1081
		WARN(i915_vma_is_pinned(vma),
		     "bo is already pinned in ggtt with incorrect alignment:"
1082 1083 1084
		     " offset=%08x, req.alignment=%llx,"
		     " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
		     i915_ggtt_offset(vma), alignment,
1085
		     !!(flags & PIN_MAPPABLE),
1086
		     i915_vma_is_map_and_fenceable(vma));
1087 1088
		ret = i915_vma_unbind(vma);
		if (ret)
C
Chris Wilson 已提交
1089
			return ERR_PTR(ret);
1090 1091
	}

C
Chris Wilson 已提交
1092 1093 1094
	ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
	if (ret)
		return ERR_PTR(ret);
1095

C
Chris Wilson 已提交
1096
	return vma;
1097 1098
}

1099 1100 1101 1102
int
i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
		       struct drm_file *file_priv)
{
1103
	struct drm_i915_private *i915 = to_i915(dev);
1104
	struct drm_i915_gem_madvise *args = data;
1105
	struct drm_i915_gem_object *obj;
1106
	int err;
1107 1108 1109 1110 1111 1112 1113 1114 1115

	switch (args->madv) {
	case I915_MADV_DONTNEED:
	case I915_MADV_WILLNEED:
	    break;
	default:
	    return -EINVAL;
	}

1116
	obj = i915_gem_object_lookup(file_priv, args->handle);
1117 1118 1119 1120 1121 1122
	if (!obj)
		return -ENOENT;

	err = mutex_lock_interruptible(&obj->mm.lock);
	if (err)
		goto out;
1123

1124
	if (i915_gem_object_has_pages(obj) &&
1125
	    i915_gem_object_is_tiled(obj) &&
1126
	    i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
1127 1128
		if (obj->mm.madv == I915_MADV_WILLNEED) {
			GEM_BUG_ON(!obj->mm.quirked);
C
Chris Wilson 已提交
1129
			__i915_gem_object_unpin_pages(obj);
1130 1131 1132
			obj->mm.quirked = false;
		}
		if (args->madv == I915_MADV_WILLNEED) {
1133
			GEM_BUG_ON(obj->mm.quirked);
C
Chris Wilson 已提交
1134
			__i915_gem_object_pin_pages(obj);
1135 1136
			obj->mm.quirked = true;
		}
1137 1138
	}

C
Chris Wilson 已提交
1139 1140
	if (obj->mm.madv != __I915_MADV_PURGED)
		obj->mm.madv = args->madv;
1141

1142 1143 1144
	if (i915_gem_object_has_pages(obj)) {
		struct list_head *list;

1145
		if (i915_gem_object_is_shrinkable(obj)) {
1146 1147 1148 1149
			unsigned long flags;

			spin_lock_irqsave(&i915->mm.obj_lock, flags);

1150 1151 1152
			if (obj->mm.madv != I915_MADV_WILLNEED)
				list = &i915->mm.purge_list;
			else
1153
				list = &i915->mm.shrink_list;
1154
			list_move_tail(&obj->mm.link, list);
1155 1156

			spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
1157
		}
1158 1159
	}

C
Chris Wilson 已提交
1160
	/* if the object is no longer attached, discard its backing storage */
1161 1162
	if (obj->mm.madv == I915_MADV_DONTNEED &&
	    !i915_gem_object_has_pages(obj))
1163
		i915_gem_object_truncate(obj);
1164

C
Chris Wilson 已提交
1165
	args->retained = obj->mm.madv != __I915_MADV_PURGED;
1166
	mutex_unlock(&obj->mm.lock);
C
Chris Wilson 已提交
1167

1168
out:
1169
	i915_gem_object_put(obj);
1170
	return err;
1171 1172
}

1173 1174
void i915_gem_sanitize(struct drm_i915_private *i915)
{
1175 1176
	intel_wakeref_t wakeref;

1177 1178
	GEM_TRACE("\n");

1179
	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1180
	intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
1181 1182 1183 1184 1185 1186 1187

	/*
	 * As we have just resumed the machine and woken the device up from
	 * deep PCI sleep (presumably D3_cold), assume the HW has been reset
	 * back to defaults, recovering from whatever wedged state we left it
	 * in and so worth trying to use the device once more.
	 */
1188
	if (i915_terminally_wedged(i915))
1189 1190
		i915_gem_unset_wedged(i915);

1191 1192 1193 1194 1195 1196
	/*
	 * If we inherit context state from the BIOS or earlier occupants
	 * of the GPU, the GPU may be in an inconsistent state when we
	 * try to take over. The only way to remove the earlier state
	 * is by resetting. However, resetting on earlier gen is tricky as
	 * it may impact the display and we are uncertain about the stability
1197
	 * of the reset, so this could be applied to even earlier gen.
1198
	 */
1199
	intel_gt_sanitize(i915, false);
1200

1201
	intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
1202
	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1203 1204
}

1205
static void init_unused_ring(struct intel_gt *gt, u32 base)
1206
{
1207 1208 1209 1210 1211 1212
	struct intel_uncore *uncore = gt->uncore;

	intel_uncore_write(uncore, RING_CTL(base), 0);
	intel_uncore_write(uncore, RING_HEAD(base), 0);
	intel_uncore_write(uncore, RING_TAIL(base), 0);
	intel_uncore_write(uncore, RING_START(base), 0);
1213 1214
}

1215
static void init_unused_rings(struct intel_gt *gt)
1216
{
1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
	struct drm_i915_private *i915 = gt->i915;

	if (IS_I830(i915)) {
		init_unused_ring(gt, PRB1_BASE);
		init_unused_ring(gt, SRB0_BASE);
		init_unused_ring(gt, SRB1_BASE);
		init_unused_ring(gt, SRB2_BASE);
		init_unused_ring(gt, SRB3_BASE);
	} else if (IS_GEN(i915, 2)) {
		init_unused_ring(gt, SRB0_BASE);
		init_unused_ring(gt, SRB1_BASE);
	} else if (IS_GEN(i915, 3)) {
		init_unused_ring(gt, PRB1_BASE);
		init_unused_ring(gt, PRB2_BASE);
1231 1232 1233
	}
}

1234 1235
int i915_gem_init_hw(struct drm_i915_private *dev_priv)
{
C
Chris Wilson 已提交
1236
	int ret;
1237

1238 1239
	dev_priv->gt.last_init_time = ktime_get();

1240
	/* Double layer security blanket, see i915_gem_init() */
1241
	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1242

1243
	if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
1244
		I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
1245

1246
	if (IS_HASWELL(dev_priv))
1247
		I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
1248
			   LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
1249

1250
	/* Apply the GT workarounds... */
1251
	intel_gt_apply_workarounds(&dev_priv->gt);
1252
	/* ...and determine whether they are sticking. */
1253
	intel_gt_verify_workarounds(&dev_priv->gt, "init");
1254

1255
	intel_gt_init_swizzling(&dev_priv->gt);
1256

1257 1258 1259 1260 1261 1262
	/*
	 * At least 830 can leave some of the unused rings
	 * "active" (ie. head != tail) after resume which
	 * will prevent c3 entry. Makes sure all unused rings
	 * are totally idle.
	 */
1263
	init_unused_rings(&dev_priv->gt);
1264

1265
	BUG_ON(!dev_priv->kernel_context);
1266 1267
	ret = i915_terminally_wedged(dev_priv);
	if (ret)
1268
		goto out;
1269

1270
	ret = i915_ppgtt_init_hw(&dev_priv->gt);
1271
	if (ret) {
1272
		DRM_ERROR("Enabling PPGTT failed (%d)\n", ret);
1273 1274 1275
		goto out;
	}

1276 1277 1278 1279 1280 1281
	ret = intel_wopcm_init_hw(&dev_priv->wopcm);
	if (ret) {
		DRM_ERROR("Enabling WOPCM failed (%d)\n", ret);
		goto out;
	}

1282 1283
	/* We can't enable contexts until all firmware is loaded */
	ret = intel_uc_init_hw(dev_priv);
1284 1285
	if (ret) {
		DRM_ERROR("Enabling uc failed (%d)\n", ret);
1286
		goto out;
1287
	}
1288

1289
	intel_mocs_init_l3cc_table(&dev_priv->gt);
1290

1291
	/* Only when the HW is re-initialised, can we replay the requests */
1292
	ret = intel_engines_resume(dev_priv);
1293 1294
	if (ret)
		goto cleanup_uc;
1295

1296
	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1297

1298
	intel_engines_set_scheduler_caps(dev_priv);
1299
	return 0;
1300 1301 1302

cleanup_uc:
	intel_uc_fini_hw(dev_priv);
1303
out:
1304
	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1305 1306

	return ret;
1307 1308
}

1309 1310 1311
static int __intel_engines_record_defaults(struct drm_i915_private *i915)
{
	struct intel_engine_cs *engine;
1312 1313
	struct i915_gem_context *ctx;
	struct i915_gem_engines *e;
1314
	enum intel_engine_id id;
1315
	int err = 0;
1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329

	/*
	 * As we reset the gpu during very early sanitisation, the current
	 * register state on the GPU should reflect its defaults values.
	 * We load a context onto the hw (with restore-inhibit), then switch
	 * over to a second context to save that default register state. We
	 * can then prime every new context with that state so they all start
	 * from the same default HW values.
	 */

	ctx = i915_gem_context_create_kernel(i915, 0);
	if (IS_ERR(ctx))
		return PTR_ERR(ctx);

1330 1331
	e = i915_gem_context_lock_engines(ctx);

1332
	for_each_engine(engine, i915, id) {
1333
		struct intel_context *ce = e->engines[id];
1334
		struct i915_request *rq;
1335

1336
		rq = intel_context_create_request(ce);
1337 1338
		if (IS_ERR(rq)) {
			err = PTR_ERR(rq);
1339
			goto err_active;
1340 1341
		}

1342
		err = 0;
1343 1344
		if (rq->engine->init_context)
			err = rq->engine->init_context(rq);
1345

1346
		i915_request_add(rq);
1347 1348 1349 1350
		if (err)
			goto err_active;
	}

1351
	/* Flush the default context image to memory, and enable powersaving. */
1352
	if (!i915_gem_load_power_context(i915)) {
1353
		err = -EIO;
1354
		goto err_active;
1355
	}
1356 1357

	for_each_engine(engine, i915, id) {
1358 1359
		struct intel_context *ce = e->engines[id];
		struct i915_vma *state = ce->state;
1360
		void *vaddr;
1361 1362 1363 1364

		if (!state)
			continue;

1365
		GEM_BUG_ON(intel_context_is_pinned(ce));
1366

1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378
		/*
		 * As we will hold a reference to the logical state, it will
		 * not be torn down with the context, and importantly the
		 * object will hold onto its vma (making it possible for a
		 * stray GTT write to corrupt our defaults). Unmap the vma
		 * from the GTT to prevent such accidents and reclaim the
		 * space.
		 */
		err = i915_vma_unbind(state);
		if (err)
			goto err_active;

1379
		i915_gem_object_lock(state->obj);
1380
		err = i915_gem_object_set_to_cpu_domain(state->obj, false);
1381
		i915_gem_object_unlock(state->obj);
1382 1383 1384 1385
		if (err)
			goto err_active;

		engine->default_state = i915_gem_object_get(state->obj);
1386 1387
		i915_gem_object_set_cache_coherency(engine->default_state,
						    I915_CACHE_LLC);
1388 1389 1390

		/* Check we can acquire the image of the context state */
		vaddr = i915_gem_object_pin_map(engine->default_state,
1391
						I915_MAP_FORCE_WB);
1392 1393 1394 1395 1396 1397
		if (IS_ERR(vaddr)) {
			err = PTR_ERR(vaddr);
			goto err_active;
		}

		i915_gem_object_unpin_map(engine->default_state);
1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418
	}

	if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
		unsigned int found = intel_engines_has_context_isolation(i915);

		/*
		 * Make sure that classes with multiple engine instances all
		 * share the same basic configuration.
		 */
		for_each_engine(engine, i915, id) {
			unsigned int bit = BIT(engine->uabi_class);
			unsigned int expected = engine->default_state ? bit : 0;

			if ((found & bit) != expected) {
				DRM_ERROR("mismatching default context state for class %d on engine %s\n",
					  engine->uabi_class, engine->name);
			}
		}
	}

out_ctx:
1419
	i915_gem_context_unlock_engines(ctx);
1420 1421 1422 1423 1424 1425 1426
	i915_gem_context_set_closed(ctx);
	i915_gem_context_put(ctx);
	return err;

err_active:
	/*
	 * If we have to abandon now, we expect the engines to be idle
1427 1428
	 * and ready to be torn-down. The quickest way we can accomplish
	 * this is by declaring ourselves wedged.
1429
	 */
1430
	i915_gem_set_wedged(i915);
1431 1432 1433
	goto out_ctx;
}

1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471
static int
i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
{
	struct drm_i915_gem_object *obj;
	struct i915_vma *vma;
	int ret;

	obj = i915_gem_object_create_stolen(i915, size);
	if (!obj)
		obj = i915_gem_object_create_internal(i915, size);
	if (IS_ERR(obj)) {
		DRM_ERROR("Failed to allocate scratch page\n");
		return PTR_ERR(obj);
	}

	vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto err_unref;
	}

	ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
	if (ret)
		goto err_unref;

	i915->gt.scratch = vma;
	return 0;

err_unref:
	i915_gem_object_put(obj);
	return ret;
}

static void i915_gem_fini_scratch(struct drm_i915_private *i915)
{
	i915_vma_unpin_and_release(&i915->gt.scratch, 0);
}

1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488
static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
{
	struct intel_engine_cs *engine;
	enum intel_engine_id id;
	int err = 0;

	if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
		return 0;

	for_each_engine(engine, i915, id) {
		if (intel_engine_verify_workarounds(engine, "load"))
			err = -EIO;
	}

	return err;
}

1489
int i915_gem_init(struct drm_i915_private *dev_priv)
1490 1491 1492
{
	int ret;

1493 1494
	/* We need to fallback to 4K pages if host doesn't support huge gtt. */
	if (intel_vgpu_active(dev_priv) && !intel_vgpu_has_huge_gtt(dev_priv))
1495 1496 1497
		mkwrite_device_info(dev_priv)->page_sizes =
			I915_GTT_PAGE_SIZE_4K;

1498
	dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
1499

1500 1501
	i915_timelines_init(dev_priv);

1502 1503 1504 1505
	ret = i915_gem_init_userptr(dev_priv);
	if (ret)
		return ret;

1506
	ret = intel_uc_init_misc(dev_priv);
1507 1508 1509
	if (ret)
		return ret;

1510
	ret = intel_wopcm_init(&dev_priv->wopcm);
1511
	if (ret)
1512
		goto err_uc_misc;
1513

1514 1515 1516 1517 1518 1519
	/* This is just a security blanket to placate dragons.
	 * On some systems, we very sporadically observe that the first TLBs
	 * used by the CS may be stale, despite us poking the TLB reset. If
	 * we hold the forcewake during initialisation these problems
	 * just magically go away.
	 */
1520
	mutex_lock(&dev_priv->drm.struct_mutex);
1521
	intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
1522

1523
	ret = i915_gem_init_ggtt(dev_priv);
1524 1525 1526 1527
	if (ret) {
		GEM_BUG_ON(ret == -EIO);
		goto err_unlock;
	}
1528

1529
	ret = i915_gem_init_scratch(dev_priv,
1530
				    IS_GEN(dev_priv, 2) ? SZ_256K : PAGE_SIZE);
1531 1532 1533 1534
	if (ret) {
		GEM_BUG_ON(ret == -EIO);
		goto err_ggtt;
	}
1535

1536 1537 1538 1539 1540 1541
	ret = intel_engines_setup(dev_priv);
	if (ret) {
		GEM_BUG_ON(ret == -EIO);
		goto err_unlock;
	}

1542 1543 1544 1545 1546 1547
	ret = i915_gem_contexts_init(dev_priv);
	if (ret) {
		GEM_BUG_ON(ret == -EIO);
		goto err_scratch;
	}

1548
	ret = intel_engines_init(dev_priv);
1549 1550 1551 1552
	if (ret) {
		GEM_BUG_ON(ret == -EIO);
		goto err_context;
	}
1553

1554 1555
	intel_init_gt_powersave(dev_priv);

1556
	ret = intel_uc_init(dev_priv);
1557
	if (ret)
1558
		goto err_pm;
1559

1560 1561 1562 1563
	ret = i915_gem_init_hw(dev_priv);
	if (ret)
		goto err_uc_init;

1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
	/*
	 * Despite its name intel_init_clock_gating applies both display
	 * clock gating workarounds; GT mmio workarounds and the occasional
	 * GT power context workaround. Worse, sometimes it includes a context
	 * register workaround which we need to apply before we record the
	 * default HW state for all contexts.
	 *
	 * FIXME: break up the workarounds and apply them at the right time!
	 */
	intel_init_clock_gating(dev_priv);

1575 1576 1577 1578
	ret = intel_engines_verify_workarounds(dev_priv);
	if (ret)
		goto err_init_hw;

1579
	ret = __intel_engines_record_defaults(dev_priv);
1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592
	if (ret)
		goto err_init_hw;

	if (i915_inject_load_failure()) {
		ret = -ENODEV;
		goto err_init_hw;
	}

	if (i915_inject_load_failure()) {
		ret = -EIO;
		goto err_init_hw;
	}

1593
	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604
	mutex_unlock(&dev_priv->drm.struct_mutex);

	return 0;

	/*
	 * Unwinding is complicated by that we want to handle -EIO to mean
	 * disable GPU submission but keep KMS alive. We want to mark the
	 * HW as irrevisibly wedged, but keep enough state around that the
	 * driver doesn't explode during runtime.
	 */
err_init_hw:
1605 1606
	mutex_unlock(&dev_priv->drm.struct_mutex);

1607
	i915_gem_set_wedged(dev_priv);
1608
	i915_gem_suspend(dev_priv);
1609 1610
	i915_gem_suspend_late(dev_priv);

1611 1612
	i915_gem_drain_workqueue(dev_priv);

1613
	mutex_lock(&dev_priv->drm.struct_mutex);
1614
	intel_uc_fini_hw(dev_priv);
1615 1616
err_uc_init:
	intel_uc_fini(dev_priv);
1617 1618 1619
err_pm:
	if (ret != -EIO) {
		intel_cleanup_gt_powersave(dev_priv);
1620
		intel_engines_cleanup(dev_priv);
1621 1622 1623 1624
	}
err_context:
	if (ret != -EIO)
		i915_gem_contexts_fini(dev_priv);
1625 1626
err_scratch:
	i915_gem_fini_scratch(dev_priv);
1627 1628
err_ggtt:
err_unlock:
1629
	intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
1630 1631
	mutex_unlock(&dev_priv->drm.struct_mutex);

1632
err_uc_misc:
1633
	intel_uc_fini_misc(dev_priv);
1634

1635
	if (ret != -EIO) {
1636
		i915_gem_cleanup_userptr(dev_priv);
1637 1638
		i915_timelines_fini(dev_priv);
	}
1639

1640
	if (ret == -EIO) {
1641 1642
		mutex_lock(&dev_priv->drm.struct_mutex);

1643 1644
		/*
		 * Allow engine initialisation to fail by marking the GPU as
1645 1646 1647
		 * wedged. But we only want to do this where the GPU is angry,
		 * for all other failure, such as an allocation failure, bail.
		 */
1648
		if (!i915_reset_failed(dev_priv)) {
1649 1650
			i915_load_error(dev_priv,
					"Failed to initialize GPU, declaring it wedged!\n");
1651 1652
			i915_gem_set_wedged(dev_priv);
		}
1653 1654 1655 1656 1657 1658 1659 1660

		/* Minimal basic recovery for KMS */
		ret = i915_ggtt_enable_hw(dev_priv);
		i915_gem_restore_gtt_mappings(dev_priv);
		i915_gem_restore_fences(dev_priv);
		intel_init_clock_gating(dev_priv);

		mutex_unlock(&dev_priv->drm.struct_mutex);
1661 1662
	}

1663
	i915_gem_drain_freed_objects(dev_priv);
1664
	return ret;
1665 1666
}

1667
void i915_gem_fini_hw(struct drm_i915_private *dev_priv)
1668
{
1669 1670
	GEM_BUG_ON(dev_priv->gt.awake);

1671
	intel_wakeref_auto_fini(&dev_priv->ggtt.userfault_wakeref);
1672

1673
	i915_gem_suspend_late(dev_priv);
1674
	intel_disable_gt_powersave(dev_priv);
1675 1676 1677 1678 1679 1680 1681

	/* Flush any outstanding unpin_work. */
	i915_gem_drain_workqueue(dev_priv);

	mutex_lock(&dev_priv->drm.struct_mutex);
	intel_uc_fini_hw(dev_priv);
	intel_uc_fini(dev_priv);
1682 1683 1684 1685 1686 1687 1688 1689
	mutex_unlock(&dev_priv->drm.struct_mutex);

	i915_gem_drain_freed_objects(dev_priv);
}

void i915_gem_fini(struct drm_i915_private *dev_priv)
{
	mutex_lock(&dev_priv->drm.struct_mutex);
1690
	intel_engines_cleanup(dev_priv);
1691
	i915_gem_contexts_fini(dev_priv);
1692
	i915_gem_fini_scratch(dev_priv);
1693 1694
	mutex_unlock(&dev_priv->drm.struct_mutex);

1695 1696
	intel_wa_list_free(&dev_priv->gt_wa_list);

1697 1698
	intel_cleanup_gt_powersave(dev_priv);

1699 1700
	intel_uc_fini_misc(dev_priv);
	i915_gem_cleanup_userptr(dev_priv);
1701
	i915_timelines_fini(dev_priv);
1702 1703 1704 1705 1706 1707

	i915_gem_drain_freed_objects(dev_priv);

	WARN_ON(!list_empty(&dev_priv->contexts.list));
}

1708 1709 1710 1711 1712
void i915_gem_init_mmio(struct drm_i915_private *i915)
{
	i915_gem_sanitize(i915);
}

1713 1714 1715 1716 1717 1718 1719
static void i915_gem_init__mm(struct drm_i915_private *i915)
{
	spin_lock_init(&i915->mm.obj_lock);
	spin_lock_init(&i915->mm.free_lock);

	init_llist_head(&i915->mm.free_list);

1720
	INIT_LIST_HEAD(&i915->mm.purge_list);
1721
	INIT_LIST_HEAD(&i915->mm.shrink_list);
1722

1723
	i915_gem_init__objects(i915);
1724 1725
}

1726
int i915_gem_init_early(struct drm_i915_private *dev_priv)
1727
{
1728
	int err;
1729

1730
	i915_gem_init__mm(dev_priv);
1731
	i915_gem_init__pm(dev_priv);
1732

1733
	init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
1734
	init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
1735
	mutex_init(&dev_priv->gpu_error.wedge_mutex);
1736
	init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
1737

1738 1739
	atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);

1740
	spin_lock_init(&dev_priv->fb_tracking.lock);
1741

M
Matthew Auld 已提交
1742 1743 1744 1745
	err = i915_gemfs_init(dev_priv);
	if (err)
		DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);

1746
	return 0;
1747
}
1748

1749
void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
1750
{
1751
	i915_gem_drain_freed_objects(dev_priv);
1752 1753
	GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
	GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
1754
	WARN_ON(dev_priv->mm.shrink_count);
1755

1756 1757
	cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);

M
Matthew Auld 已提交
1758
	i915_gemfs_fini(dev_priv);
1759 1760
}

1761 1762
int i915_gem_freeze(struct drm_i915_private *dev_priv)
{
1763 1764 1765
	/* Discard all purgeable objects, let userspace recover those as
	 * required after resuming.
	 */
1766 1767 1768 1769 1770
	i915_gem_shrink_all(dev_priv);

	return 0;
}

1771
int i915_gem_freeze_late(struct drm_i915_private *i915)
1772 1773
{
	struct drm_i915_gem_object *obj;
1774
	intel_wakeref_t wakeref;
1775

1776 1777
	/*
	 * Called just before we write the hibernation image.
1778 1779 1780 1781 1782 1783 1784 1785
	 *
	 * We need to update the domain tracking to reflect that the CPU
	 * will be accessing all the pages to create and restore from the
	 * hibernation, and so upon restoration those pages will be in the
	 * CPU domain.
	 *
	 * To make sure the hibernation image contains the latest state,
	 * we update that state just before writing out the image.
1786 1787
	 *
	 * To try and reduce the hibernation image, we manually shrink
1788
	 * the objects as well, see i915_gem_freeze()
1789 1790
	 */

1791
	wakeref = intel_runtime_pm_get(&i915->runtime_pm);
1792 1793

	i915_gem_shrink(i915, -1UL, NULL, ~0);
1794
	i915_gem_drain_freed_objects(i915);
1795

1796 1797 1798 1799
	list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
		i915_gem_object_lock(obj);
		WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
		i915_gem_object_unlock(obj);
1800
	}
1801

1802
	intel_runtime_pm_put(&i915->runtime_pm, wakeref);
1803 1804 1805 1806

	return 0;
}

1807
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
1808
{
1809
	struct drm_i915_file_private *file_priv = file->driver_priv;
1810
	struct i915_request *request;
1811 1812 1813 1814 1815

	/* Clean up our request list when the client is going away, so that
	 * later retire_requests won't dereference our soon-to-be-gone
	 * file_priv.
	 */
1816
	spin_lock(&file_priv->mm.lock);
1817
	list_for_each_entry(request, &file_priv->mm.request_list, client_link)
1818
		request->file_priv = NULL;
1819
	spin_unlock(&file_priv->mm.lock);
1820 1821
}

1822
int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
1823 1824
{
	struct drm_i915_file_private *file_priv;
1825
	int ret;
1826

1827
	DRM_DEBUG("\n");
1828 1829 1830 1831 1832 1833

	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
	if (!file_priv)
		return -ENOMEM;

	file->driver_priv = file_priv;
1834
	file_priv->dev_priv = i915;
1835
	file_priv->file = file;
1836 1837 1838 1839

	spin_lock_init(&file_priv->mm.lock);
	INIT_LIST_HEAD(&file_priv->mm.request_list);

1840
	file_priv->bsd_engine = -1;
1841
	file_priv->hang_timestamp = jiffies;
1842

1843
	ret = i915_gem_context_open(i915, file);
1844 1845
	if (ret)
		kfree(file_priv);
1846

1847
	return ret;
1848 1849
}

1850 1851
/**
 * i915_gem_track_fb - update frontbuffer tracking
1852 1853 1854
 * @old: current GEM buffer for the frontbuffer slots
 * @new: new GEM buffer for the frontbuffer slots
 * @frontbuffer_bits: bitmask of frontbuffer slots
1855 1856 1857 1858
 *
 * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
 * from @old and setting them in @new. Both @old and @new can be NULL.
 */
1859 1860 1861 1862
void i915_gem_track_fb(struct drm_i915_gem_object *old,
		       struct drm_i915_gem_object *new,
		       unsigned frontbuffer_bits)
{
1863 1864 1865 1866 1867 1868 1869
	/* Control of individual bits within the mask are guarded by
	 * the owning plane->mutex, i.e. we can never see concurrent
	 * manipulation of individual bits. But since the bitfield as a whole
	 * is updated using RMW, we need to use atomics in order to update
	 * the bits.
	 */
	BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
1870
		     BITS_PER_TYPE(atomic_t));
1871

1872
	if (old) {
1873 1874
		WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
		atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
1875 1876 1877
	}

	if (new) {
1878 1879
		WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
		atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
1880 1881 1882
	}
}

1883
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1884
#include "selftests/mock_gem_device.c"
1885
#include "selftests/i915_gem.c"
1886
#endif