i915_gem_gtt.c 101.9 KB
Newer Older
1 2
/*
 * Copyright © 2010 Daniel Vetter
3
 * Copyright © 2011-2014 Intel Corporation
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

26 27 28
#include <linux/slab.h> /* fault-inject.h is not standalone! */

#include <linux/fault-inject.h>
29
#include <linux/log2.h>
30
#include <linux/random.h>
31
#include <linux/seq_file.h>
32
#include <linux/stop_machine.h>
33

L
Laura Abbott 已提交
34 35
#include <asm/set_memory.h>

36
#include <drm/i915_drm.h>
37

38
#include "i915_drv.h"
39
#include "i915_vgpu.h"
40
#include "i915_reset.h"
41 42
#include "i915_trace.h"
#include "intel_drv.h"
43
#include "intel_frontbuffer.h"
44

45
#define I915_GFP_ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
46

47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
/**
 * DOC: Global GTT views
 *
 * Background and previous state
 *
 * Historically objects could exists (be bound) in global GTT space only as
 * singular instances with a view representing all of the object's backing pages
 * in a linear fashion. This view will be called a normal view.
 *
 * To support multiple views of the same object, where the number of mapped
 * pages is not equal to the backing store, or where the layout of the pages
 * is not linear, concept of a GGTT view was added.
 *
 * One example of an alternative view is a stereo display driven by a single
 * image. In this case we would have a framebuffer looking like this
 * (2x2 pages):
 *
 *    12
 *    34
 *
 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
 * rendering. In contrast, fed to the display engine would be an alternative
 * view which could look something like this:
 *
 *   1212
 *   3434
 *
 * In this example both the size and layout of pages in the alternative view is
 * different from the normal view.
 *
 * Implementation and usage
 *
 * GGTT views are implemented using VMAs and are distinguished via enum
 * i915_ggtt_view_type and struct i915_ggtt_view.
 *
 * A new flavour of core GEM functions which work with GGTT bound objects were
83 84 85
 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
 * renaming  in large amounts of code. They take the struct i915_ggtt_view
 * parameter encapsulating all metadata required to implement a view.
86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
 *
 * As a helper for callers which are only interested in the normal view,
 * globally const i915_ggtt_view_normal singleton instance exists. All old core
 * GEM API functions, the ones not taking the view parameter, are operating on,
 * or with the normal GGTT view.
 *
 * Code wanting to add or use a new GGTT view needs to:
 *
 * 1. Add a new enum with a suitable name.
 * 2. Extend the metadata in the i915_ggtt_view structure if required.
 * 3. Add support to i915_get_vma_pages().
 *
 * New views are required to build a scatter-gather table from within the
 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
 * exists for the lifetime of an VMA.
 *
 * Core API is designed to have copy semantics which means that passed in
 * struct i915_ggtt_view does not need to be persistent (left around after
 * calling the core API functions).
 *
 */

108 109 110
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);

111 112
static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
{
113 114
	/*
	 * Note that as an uncached mmio write, this will flush the
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
	 * WCB of the writes into the GGTT before it triggers the invalidate.
	 */
	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
}

static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
{
	gen6_ggtt_invalidate(dev_priv);
	I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
}

static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
{
	intel_gtt_chipset_flush();
}

static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
{
	i915->ggtt.invalidate(i915);
}

136 137 138
static int ppgtt_bind_vma(struct i915_vma *vma,
			  enum i915_cache_level cache_level,
			  u32 unused)
139
{
140
	u32 pte_flags;
141 142 143 144 145 146 147 148
	int err;

	if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
		err = vma->vm->allocate_va_range(vma->vm,
						 vma->node.start, vma->size);
		if (err)
			return err;
	}
149

150
	/* Applicable to VLV, and gen8+ */
151
	pte_flags = 0;
152
	if (i915_gem_object_is_readonly(vma->obj))
153 154
		pte_flags |= PTE_READ_ONLY;

155
	vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
156 157

	return 0;
158 159 160 161
}

static void ppgtt_unbind_vma(struct i915_vma *vma)
{
162
	vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
163
}
164

165 166 167 168 169 170
static int ppgtt_set_pages(struct i915_vma *vma)
{
	GEM_BUG_ON(vma->pages);

	vma->pages = vma->obj->mm.pages;

171 172
	vma->page_sizes = vma->obj->mm.page_sizes;

173 174 175 176 177 178 179 180 181 182 183 184
	return 0;
}

static void clear_pages(struct i915_vma *vma)
{
	GEM_BUG_ON(!vma->pages);

	if (vma->pages != vma->obj->mm.pages) {
		sg_free_table(vma->pages);
		kfree(vma->pages);
	}
	vma->pages = NULL;
185 186

	memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
187 188
}

189 190 191
static u64 gen8_pte_encode(dma_addr_t addr,
			   enum i915_cache_level level,
			   u32 flags)
B
Ben Widawsky 已提交
192
{
193 194 195 196
	gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;

	if (unlikely(flags & PTE_READ_ONLY))
		pte &= ~_PAGE_RW;
197 198 199

	switch (level) {
	case I915_CACHE_NONE:
200
		pte |= PPAT_UNCACHED;
201 202
		break;
	case I915_CACHE_WT:
203
		pte |= PPAT_DISPLAY_ELLC;
204 205
		break;
	default:
206
		pte |= PPAT_CACHED;
207 208 209
		break;
	}

B
Ben Widawsky 已提交
210 211 212
	return pte;
}

213 214
static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
				  const enum i915_cache_level level)
B
Ben Widawsky 已提交
215
{
216
	gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
B
Ben Widawsky 已提交
217 218
	pde |= addr;
	if (level != I915_CACHE_NONE)
219
		pde |= PPAT_CACHED_PDE;
B
Ben Widawsky 已提交
220
	else
221
		pde |= PPAT_UNCACHED;
B
Ben Widawsky 已提交
222 223 224
	return pde;
}

225 226 227
#define gen8_pdpe_encode gen8_pde_encode
#define gen8_pml4e_encode gen8_pde_encode

228 229 230
static u64 snb_pte_encode(dma_addr_t addr,
			  enum i915_cache_level level,
			  u32 flags)
231
{
232
	gen6_pte_t pte = GEN6_PTE_VALID;
233
	pte |= GEN6_PTE_ADDR_ENCODE(addr);
234 235

	switch (level) {
236 237 238 239 240 241 242 243
	case I915_CACHE_L3_LLC:
	case I915_CACHE_LLC:
		pte |= GEN6_PTE_CACHE_LLC;
		break;
	case I915_CACHE_NONE:
		pte |= GEN6_PTE_UNCACHED;
		break;
	default:
244
		MISSING_CASE(level);
245 246 247 248 249
	}

	return pte;
}

250 251 252
static u64 ivb_pte_encode(dma_addr_t addr,
			  enum i915_cache_level level,
			  u32 flags)
253
{
254
	gen6_pte_t pte = GEN6_PTE_VALID;
255 256 257 258 259
	pte |= GEN6_PTE_ADDR_ENCODE(addr);

	switch (level) {
	case I915_CACHE_L3_LLC:
		pte |= GEN7_PTE_CACHE_L3_LLC;
260 261 262 263 264
		break;
	case I915_CACHE_LLC:
		pte |= GEN6_PTE_CACHE_LLC;
		break;
	case I915_CACHE_NONE:
265
		pte |= GEN6_PTE_UNCACHED;
266 267
		break;
	default:
268
		MISSING_CASE(level);
269 270
	}

271 272 273
	return pte;
}

274 275 276
static u64 byt_pte_encode(dma_addr_t addr,
			  enum i915_cache_level level,
			  u32 flags)
277
{
278
	gen6_pte_t pte = GEN6_PTE_VALID;
279 280
	pte |= GEN6_PTE_ADDR_ENCODE(addr);

281 282
	if (!(flags & PTE_READ_ONLY))
		pte |= BYT_PTE_WRITEABLE;
283 284 285 286 287 288 289

	if (level != I915_CACHE_NONE)
		pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;

	return pte;
}

290 291 292
static u64 hsw_pte_encode(dma_addr_t addr,
			  enum i915_cache_level level,
			  u32 flags)
293
{
294
	gen6_pte_t pte = GEN6_PTE_VALID;
295
	pte |= HSW_PTE_ADDR_ENCODE(addr);
296 297

	if (level != I915_CACHE_NONE)
298
		pte |= HSW_WB_LLC_AGE3;
299 300 301 302

	return pte;
}

303 304 305
static u64 iris_pte_encode(dma_addr_t addr,
			   enum i915_cache_level level,
			   u32 flags)
306
{
307
	gen6_pte_t pte = GEN6_PTE_VALID;
308 309
	pte |= HSW_PTE_ADDR_ENCODE(addr);

310 311 312 313
	switch (level) {
	case I915_CACHE_NONE:
		break;
	case I915_CACHE_WT:
314
		pte |= HSW_WT_ELLC_LLC_AGE3;
315 316
		break;
	default:
317
		pte |= HSW_WB_ELLC_LLC_AGE3;
318 319
		break;
	}
320 321 322 323

	return pte;
}

324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358
static void stash_init(struct pagestash *stash)
{
	pagevec_init(&stash->pvec);
	spin_lock_init(&stash->lock);
}

static struct page *stash_pop_page(struct pagestash *stash)
{
	struct page *page = NULL;

	spin_lock(&stash->lock);
	if (likely(stash->pvec.nr))
		page = stash->pvec.pages[--stash->pvec.nr];
	spin_unlock(&stash->lock);

	return page;
}

static void stash_push_pagevec(struct pagestash *stash, struct pagevec *pvec)
{
	int nr;

	spin_lock_nested(&stash->lock, SINGLE_DEPTH_NESTING);

	nr = min_t(int, pvec->nr, pagevec_space(&stash->pvec));
	memcpy(stash->pvec.pages + stash->pvec.nr,
	       pvec->pages + pvec->nr - nr,
	       sizeof(pvec->pages[0]) * nr);
	stash->pvec.nr += nr;

	spin_unlock(&stash->lock);

	pvec->nr -= nr;
}

359
static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
360
{
361 362
	struct pagevec stack;
	struct page *page;
363

364 365
	if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
		i915_gem_shrink_all(vm->i915);
366

367 368 369
	page = stash_pop_page(&vm->free_pages);
	if (page)
		return page;
370 371 372 373 374

	if (!vm->pt_kmap_wc)
		return alloc_page(gfp);

	/* Look in our global stash of WC pages... */
375 376 377
	page = stash_pop_page(&vm->i915->mm.wc_stash);
	if (page)
		return page;
378

379
	/*
380
	 * Otherwise batch allocate pages to amortize cost of set_pages_wc.
381 382 383 384 385 386
	 *
	 * We have to be careful as page allocation may trigger the shrinker
	 * (via direct reclaim) which will fill up the WC stash underneath us.
	 * So we add our WB pages into a temporary pvec on the stack and merge
	 * them into the WC stash after all the allocations are complete.
	 */
387
	pagevec_init(&stack);
388 389
	do {
		struct page *page;
390

391 392 393 394
		page = alloc_page(gfp);
		if (unlikely(!page))
			break;

395 396
		stack.pages[stack.nr++] = page;
	} while (pagevec_space(&stack));
397

398 399
	if (stack.nr && !set_pages_array_wc(stack.pages, stack.nr)) {
		page = stack.pages[--stack.nr];
400

401 402
		/* Merge spare WC pages to the global stash */
		stash_push_pagevec(&vm->i915->mm.wc_stash, &stack);
403

404 405 406
		/* Push any surplus WC pages onto the local VM stash */
		if (stack.nr)
			stash_push_pagevec(&vm->free_pages, &stack);
407
	}
408

409 410 411 412 413 414 415
	/* Return unwanted leftovers */
	if (unlikely(stack.nr)) {
		WARN_ON_ONCE(set_pages_array_wb(stack.pages, stack.nr));
		__pagevec_release(&stack);
	}

	return page;
416 417
}

418 419
static void vm_free_pages_release(struct i915_address_space *vm,
				  bool immediate)
420
{
421 422
	struct pagevec *pvec = &vm->free_pages.pvec;
	struct pagevec stack;
423

424
	lockdep_assert_held(&vm->free_pages.lock);
425
	GEM_BUG_ON(!pagevec_count(pvec));
426

427
	if (vm->pt_kmap_wc) {
428 429
		/*
		 * When we use WC, first fill up the global stash and then
430 431
		 * only if full immediately free the overflow.
		 */
432
		stash_push_pagevec(&vm->i915->mm.wc_stash, pvec);
433

434 435 436 437 438 439 440 441
		/*
		 * As we have made some room in the VM's free_pages,
		 * we can wait for it to fill again. Unless we are
		 * inside i915_address_space_fini() and must
		 * immediately release the pages!
		 */
		if (pvec->nr <= (immediate ? 0 : PAGEVEC_SIZE - 1))
			return;
442

443 444 445 446 447 448 449 450 451 452
		/*
		 * We have to drop the lock to allow ourselves to sleep,
		 * so take a copy of the pvec and clear the stash for
		 * others to use it as we sleep.
		 */
		stack = *pvec;
		pagevec_reinit(pvec);
		spin_unlock(&vm->free_pages.lock);

		pvec = &stack;
453
		set_pages_array_wb(pvec->pages, pvec->nr);
454 455

		spin_lock(&vm->free_pages.lock);
456 457 458
	}

	__pagevec_release(pvec);
459 460 461 462
}

static void vm_free_page(struct i915_address_space *vm, struct page *page)
{
463 464 465 466 467 468 469 470
	/*
	 * On !llc, we need to change the pages back to WB. We only do so
	 * in bulk, so we rarely need to change the page attributes here,
	 * but doing so requires a stop_machine() from deep inside arch/x86/mm.
	 * To make detection of the possible sleep more likely, use an
	 * unconditional might_sleep() for everybody.
	 */
	might_sleep();
471 472
	spin_lock(&vm->free_pages.lock);
	if (!pagevec_add(&vm->free_pages.pvec, page))
473
		vm_free_pages_release(vm, false);
474 475 476
	spin_unlock(&vm->free_pages.lock);
}

477
static void i915_address_space_init(struct i915_address_space *vm, int subclass)
478
{
479 480 481 482 483 484
	/*
	 * The vm->mutex must be reclaim safe (for use in the shrinker).
	 * Do a dummy acquire now under fs_reclaim so that any allocation
	 * attempt holding the lock is immediately reported by lockdep.
	 */
	mutex_init(&vm->mutex);
485
	lockdep_set_subclass(&vm->mutex, subclass);
486
	i915_gem_shrinker_taints_mutex(vm->i915, &vm->mutex);
487

488 489 490 491 492 493 494
	GEM_BUG_ON(!vm->total);
	drm_mm_init(&vm->mm, 0, vm->total);
	vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;

	stash_init(&vm->free_pages);

	INIT_LIST_HEAD(&vm->unbound_list);
495
	INIT_LIST_HEAD(&vm->bound_list);
496 497 498 499 500 501 502 503 504 505 506
}

static void i915_address_space_fini(struct i915_address_space *vm)
{
	spin_lock(&vm->free_pages.lock);
	if (pagevec_count(&vm->free_pages.pvec))
		vm_free_pages_release(vm, true);
	GEM_BUG_ON(pagevec_count(&vm->free_pages.pvec));
	spin_unlock(&vm->free_pages.lock);

	drm_mm_takedown(&vm->mm);
507 508

	mutex_destroy(&vm->mutex);
509
}
510

511 512 513 514
static int __setup_page_dma(struct i915_address_space *vm,
			    struct i915_page_dma *p,
			    gfp_t gfp)
{
515
	p->page = vm_alloc_page(vm, gfp | I915_GFP_ALLOW_FAIL);
516 517
	if (unlikely(!p->page))
		return -ENOMEM;
518

519 520 521
	p->daddr = dma_map_page_attrs(vm->dma,
				      p->page, 0, PAGE_SIZE,
				      PCI_DMA_BIDIRECTIONAL,
522
				      DMA_ATTR_SKIP_CPU_SYNC |
523
				      DMA_ATTR_NO_WARN);
524 525 526
	if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
		vm_free_page(vm, p->page);
		return -ENOMEM;
527
	}
528 529

	return 0;
530 531
}

532
static int setup_page_dma(struct i915_address_space *vm,
533
			  struct i915_page_dma *p)
534
{
535
	return __setup_page_dma(vm, p, __GFP_HIGHMEM);
536 537
}

538
static void cleanup_page_dma(struct i915_address_space *vm,
539
			     struct i915_page_dma *p)
540
{
541 542
	dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
	vm_free_page(vm, p->page);
543 544
}

545
#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
546

547 548
#define setup_px(vm, px) setup_page_dma((vm), px_base(px))
#define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
549 550
#define fill_px(vm, px, v) fill_page_dma((vm), px_base(px), (v))
#define fill32_px(vm, px, v) fill_page_dma_32((vm), px_base(px), (v))
551

552 553 554
static void fill_page_dma(struct i915_address_space *vm,
			  struct i915_page_dma *p,
			  const u64 val)
555
{
556
	u64 * const vaddr = kmap_atomic(p->page);
557

558
	memset64(vaddr, val, PAGE_SIZE / sizeof(val));
559

560
	kunmap_atomic(vaddr);
561 562
}

563 564 565
static void fill_page_dma_32(struct i915_address_space *vm,
			     struct i915_page_dma *p,
			     const u32 v)
566
{
567
	fill_page_dma(vm, p, (u64)v << 32 | v);
568 569
}

570
static int
571
setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
572
{
573
	unsigned long size;
574

575 576 577 578 579 580 581
	/*
	 * In order to utilize 64K pages for an object with a size < 2M, we will
	 * need to support a 64K scratch page, given that every 16th entry for a
	 * page-table operating in 64K mode must point to a properly aligned 64K
	 * region, including any PTEs which happen to point to scratch.
	 *
	 * This is only relevant for the 48b PPGTT where we support
582 583 584
	 * huge-gtt-pages, see also i915_vma_insert(). However, as we share the
	 * scratch (read-only) between all vm, we create one 64k scratch page
	 * for all.
585
	 */
586
	size = I915_GTT_PAGE_SIZE_4K;
587
	if (i915_vm_is_4lvl(vm) &&
588
	    HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
589 590
		size = I915_GTT_PAGE_SIZE_64K;
		gfp |= __GFP_NOWARN;
591
	}
592 593 594 595 596 597
	gfp |= __GFP_ZERO | __GFP_RETRY_MAYFAIL;

	do {
		int order = get_order(size);
		struct page *page;
		dma_addr_t addr;
598

599
		page = alloc_pages(gfp, order);
600
		if (unlikely(!page))
601
			goto skip;
602

603 604 605
		addr = dma_map_page_attrs(vm->dma,
					  page, 0, size,
					  PCI_DMA_BIDIRECTIONAL,
606
					  DMA_ATTR_SKIP_CPU_SYNC |
607
					  DMA_ATTR_NO_WARN);
608 609
		if (unlikely(dma_mapping_error(vm->dma, addr)))
			goto free_page;
610

611 612
		if (unlikely(!IS_ALIGNED(addr, size)))
			goto unmap_page;
613

614 615
		vm->scratch_page.page = page;
		vm->scratch_page.daddr = addr;
616
		vm->scratch_order = order;
617 618 619 620 621 622 623 624 625 626 627 628 629
		return 0;

unmap_page:
		dma_unmap_page(vm->dma, addr, size, PCI_DMA_BIDIRECTIONAL);
free_page:
		__free_pages(page, order);
skip:
		if (size == I915_GTT_PAGE_SIZE_4K)
			return -ENOMEM;

		size = I915_GTT_PAGE_SIZE_4K;
		gfp &= ~__GFP_NOWARN;
	} while (1);
630 631
}

632
static void cleanup_scratch_page(struct i915_address_space *vm)
633
{
634
	struct i915_page_dma *p = &vm->scratch_page;
635
	int order = vm->scratch_order;
636

637
	dma_unmap_page(vm->dma, p->daddr, BIT(order) << PAGE_SHIFT,
638
		       PCI_DMA_BIDIRECTIONAL);
639
	__free_pages(p->page, order);
640 641
}

642
static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
643
{
644
	struct i915_page_table *pt;
645

646
	pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
647
	if (unlikely(!pt))
648 649
		return ERR_PTR(-ENOMEM);

650 651 652 653
	if (unlikely(setup_px(vm, pt))) {
		kfree(pt);
		return ERR_PTR(-ENOMEM);
	}
654

655
	pt->used_ptes = 0;
656 657 658
	return pt;
}

659
static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
660
{
661
	cleanup_px(vm, pt);
662 663 664 665 666 667
	kfree(pt);
}

static void gen8_initialize_pt(struct i915_address_space *vm,
			       struct i915_page_table *pt)
{
668
	fill_px(vm, pt, vm->scratch_pte);
669 670
}

671
static void gen6_initialize_pt(struct i915_address_space *vm,
672 673
			       struct i915_page_table *pt)
{
674
	fill32_px(vm, pt, vm->scratch_pte);
675 676
}

677
static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
678
{
679
	struct i915_page_directory *pd;
680

681
	pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
682
	if (unlikely(!pd))
683 684
		return ERR_PTR(-ENOMEM);

685 686 687 688
	if (unlikely(setup_px(vm, pd))) {
		kfree(pd);
		return ERR_PTR(-ENOMEM);
	}
689

690
	pd->used_pdes = 0;
691 692 693
	return pd;
}

694
static void free_pd(struct i915_address_space *vm,
695
		    struct i915_page_directory *pd)
696
{
697 698
	cleanup_px(vm, pd);
	kfree(pd);
699 700 701 702 703
}

static void gen8_initialize_pd(struct i915_address_space *vm,
			       struct i915_page_directory *pd)
{
704 705
	fill_px(vm, pd,
		gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
706
	memset_p((void **)pd->page_table, vm->scratch_pt, I915_PDES);
707 708
}

709
static int __pdp_init(struct i915_address_space *vm,
710 711
		      struct i915_page_directory_pointer *pdp)
{
712
	const unsigned int pdpes = i915_pdpes_per_pdp(vm);
713

714
	pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
715
					    I915_GFP_ALLOW_FAIL);
716
	if (unlikely(!pdp->page_directory))
717 718
		return -ENOMEM;

719
	memset_p((void **)pdp->page_directory, vm->scratch_pd, pdpes);
720

721 722 723 724 725 726 727 728 729
	return 0;
}

static void __pdp_fini(struct i915_page_directory_pointer *pdp)
{
	kfree(pdp->page_directory);
	pdp->page_directory = NULL;
}

730 731
static struct i915_page_directory_pointer *
alloc_pdp(struct i915_address_space *vm)
732 733 734 735
{
	struct i915_page_directory_pointer *pdp;
	int ret = -ENOMEM;

736
	GEM_BUG_ON(!i915_vm_is_4lvl(vm));
737 738 739 740 741

	pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
	if (!pdp)
		return ERR_PTR(-ENOMEM);

742
	ret = __pdp_init(vm, pdp);
743 744 745
	if (ret)
		goto fail_bitmap;

746
	ret = setup_px(vm, pdp);
747 748 749 750 751 752 753 754 755 756 757 758 759
	if (ret)
		goto fail_page_m;

	return pdp;

fail_page_m:
	__pdp_fini(pdp);
fail_bitmap:
	kfree(pdp);

	return ERR_PTR(ret);
}

760
static void free_pdp(struct i915_address_space *vm,
761 762 763
		     struct i915_page_directory_pointer *pdp)
{
	__pdp_fini(pdp);
764

765
	if (!i915_vm_is_4lvl(vm))
766 767 768 769
		return;

	cleanup_px(vm, pdp);
	kfree(pdp);
770 771
}

772 773 774 775 776 777 778
static void gen8_initialize_pdp(struct i915_address_space *vm,
				struct i915_page_directory_pointer *pdp)
{
	gen8_ppgtt_pdpe_t scratch_pdpe;

	scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);

779
	fill_px(vm, pdp, scratch_pdpe);
780 781 782 783 784
}

static void gen8_initialize_pml4(struct i915_address_space *vm,
				 struct i915_pml4 *pml4)
{
785 786
	fill_px(vm, pml4,
		gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
787
	memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4);
788 789
}

790 791
/*
 * PDE TLBs are a pain to invalidate on GEN8+. When we modify
792 793 794 795 796 797
 * the page table structures, we mark them dirty so that
 * context switching/execlist queuing code takes extra steps
 * to ensure that tlbs are flushed.
 */
static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
{
798
	ppgtt->pd_dirty_engines = ALL_ENGINES;
799 800
}

801 802 803
/* Removes entries from a single page table, releasing it if it's empty.
 * Caller can use the return value to update higher-level entries.
 */
804
static bool gen8_ppgtt_clear_pt(const struct i915_address_space *vm,
805
				struct i915_page_table *pt,
806
				u64 start, u64 length)
807
{
808
	unsigned int num_entries = gen8_pte_count(start, length);
809
	gen8_pte_t *vaddr;
810

811
	GEM_BUG_ON(num_entries > pt->used_ptes);
M
Mika Kuoppala 已提交
812

813 814 815
	pt->used_ptes -= num_entries;
	if (!pt->used_ptes)
		return true;
816

817
	vaddr = kmap_atomic_px(pt);
818
	memset64(vaddr + gen8_pte_index(start), vm->scratch_pte, num_entries);
819
	kunmap_atomic(vaddr);
820 821

	return false;
822
}
823

824 825 826 827 828 829 830 831 832 833 834 835 836 837
static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
			       struct i915_page_directory *pd,
			       struct i915_page_table *pt,
			       unsigned int pde)
{
	gen8_pde_t *vaddr;

	pd->page_table[pde] = pt;

	vaddr = kmap_atomic_px(pd);
	vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
	kunmap_atomic(vaddr);
}

838
static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
839
				struct i915_page_directory *pd,
840
				u64 start, u64 length)
841 842
{
	struct i915_page_table *pt;
843
	u32 pde;
844 845

	gen8_for_each_pde(pt, pd, start, length, pde) {
846 847
		GEM_BUG_ON(pt == vm->scratch_pt);

848 849
		if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
			continue;
850

851
		gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
852
		GEM_BUG_ON(!pd->used_pdes);
853
		pd->used_pdes--;
854 855

		free_pt(vm, pt);
856 857
	}

858 859
	return !pd->used_pdes;
}
860

861 862 863 864 865 866 867 868
static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
				struct i915_page_directory_pointer *pdp,
				struct i915_page_directory *pd,
				unsigned int pdpe)
{
	gen8_ppgtt_pdpe_t *vaddr;

	pdp->page_directory[pdpe] = pd;
869
	if (!i915_vm_is_4lvl(vm))
870 871 872 873 874
		return;

	vaddr = kmap_atomic_px(pdp);
	vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
	kunmap_atomic(vaddr);
875
}
876

877 878 879 880
/* Removes entries from a single page dir pointer, releasing it if it's empty.
 * Caller can use the return value to update higher-level entries
 */
static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
881
				 struct i915_page_directory_pointer *pdp,
882
				 u64 start, u64 length)
883 884
{
	struct i915_page_directory *pd;
885
	unsigned int pdpe;
886

887
	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
888 889
		GEM_BUG_ON(pd == vm->scratch_pd);

890 891
		if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
			continue;
892

893
		gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
894
		GEM_BUG_ON(!pdp->used_pdpes);
895
		pdp->used_pdpes--;
896

897 898
		free_pd(vm, pd);
	}
899

900
	return !pdp->used_pdpes;
901
}
902

903 904 905 906 907 908
static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
				  u64 start, u64 length)
{
	gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length);
}

909 910 911 912 913 914 915 916 917 918 919 920 921
static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4,
				 struct i915_page_directory_pointer *pdp,
				 unsigned int pml4e)
{
	gen8_ppgtt_pml4e_t *vaddr;

	pml4->pdps[pml4e] = pdp;

	vaddr = kmap_atomic_px(pml4);
	vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
	kunmap_atomic(vaddr);
}

922 923 924 925
/* Removes entries from a single pml4.
 * This is the top-level structure in 4-level page tables used on gen8+.
 * Empty entries are always scratch pml4e.
 */
926 927
static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
				  u64 start, u64 length)
928
{
929 930
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
	struct i915_pml4 *pml4 = &ppgtt->pml4;
931
	struct i915_page_directory_pointer *pdp;
932
	unsigned int pml4e;
933

934
	GEM_BUG_ON(!i915_vm_is_4lvl(vm));
935

936
	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
937 938
		GEM_BUG_ON(pdp == vm->scratch_pdp);

939 940
		if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
			continue;
941

942 943 944
		gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);

		free_pdp(vm, pdp);
945 946 947
	}
}

948
static inline struct sgt_dma {
949 950
	struct scatterlist *sg;
	dma_addr_t dma, max;
951 952 953 954 955
} sgt_dma(struct i915_vma *vma) {
	struct scatterlist *sg = vma->pages->sgl;
	dma_addr_t addr = sg_dma_address(sg);
	return (struct sgt_dma) { sg, addr, addr + sg->length };
}
956

957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973
struct gen8_insert_pte {
	u16 pml4e;
	u16 pdpe;
	u16 pde;
	u16 pte;
};

static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
{
	return (struct gen8_insert_pte) {
		 gen8_pml4e_index(start),
		 gen8_pdpe_index(start),
		 gen8_pde_index(start),
		 gen8_pte_index(start),
	};
}

974 975
static __always_inline bool
gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
976
			      struct i915_page_directory_pointer *pdp,
977
			      struct sgt_dma *iter,
978
			      struct gen8_insert_pte *idx,
979 980
			      enum i915_cache_level cache_level,
			      u32 flags)
981
{
982
	struct i915_page_directory *pd;
983
	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
984 985
	gen8_pte_t *vaddr;
	bool ret;
986

987
	GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
988 989
	pd = pdp->page_directory[idx->pdpe];
	vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
990
	do {
991 992
		vaddr[idx->pte] = pte_encode | iter->dma;

993
		iter->dma += I915_GTT_PAGE_SIZE;
994 995 996 997 998 999
		if (iter->dma >= iter->max) {
			iter->sg = __sg_next(iter->sg);
			if (!iter->sg) {
				ret = false;
				break;
			}
1000

1001 1002
			iter->dma = sg_dma_address(iter->sg);
			iter->max = iter->dma + iter->sg->length;
B
Ben Widawsky 已提交
1003
		}
1004

1005 1006 1007 1008 1009 1010
		if (++idx->pte == GEN8_PTES) {
			idx->pte = 0;

			if (++idx->pde == I915_PDES) {
				idx->pde = 0;

1011
				/* Limited by sg length for 3lvl */
1012 1013
				if (++idx->pdpe == GEN8_PML4ES_PER_PML4) {
					idx->pdpe = 0;
1014
					ret = true;
1015
					break;
1016 1017
				}

1018
				GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->vm));
1019
				pd = pdp->page_directory[idx->pdpe];
1020
			}
1021

1022
			kunmap_atomic(vaddr);
1023
			vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
1024
		}
1025
	} while (1);
1026
	kunmap_atomic(vaddr);
1027

1028
	return ret;
1029 1030
}

1031
static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
1032
				   struct i915_vma *vma,
1033
				   enum i915_cache_level cache_level,
1034
				   u32 flags)
1035
{
1036
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1037
	struct sgt_dma iter = sgt_dma(vma);
1038
	struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
1039

1040
	gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
1041
				      cache_level, flags);
1042 1043

	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1044
}
1045

1046 1047 1048
static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
					   struct i915_page_directory_pointer **pdps,
					   struct sgt_dma *iter,
1049 1050
					   enum i915_cache_level cache_level,
					   u32 flags)
1051
{
1052
	const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level, flags);
1053 1054 1055 1056 1057 1058 1059 1060
	u64 start = vma->node.start;
	dma_addr_t rem = iter->sg->length;

	do {
		struct gen8_insert_pte idx = gen8_insert_pte(start);
		struct i915_page_directory_pointer *pdp = pdps[idx.pml4e];
		struct i915_page_directory *pd = pdp->page_directory[idx.pdpe];
		unsigned int page_size;
1061
		bool maybe_64K = false;
1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082
		gen8_pte_t encode = pte_encode;
		gen8_pte_t *vaddr;
		u16 index, max;

		if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
		    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
		    rem >= I915_GTT_PAGE_SIZE_2M && !idx.pte) {
			index = idx.pde;
			max = I915_PDES;
			page_size = I915_GTT_PAGE_SIZE_2M;

			encode |= GEN8_PDE_PS_2M;

			vaddr = kmap_atomic_px(pd);
		} else {
			struct i915_page_table *pt = pd->page_table[idx.pde];

			index = idx.pte;
			max = GEN8_PTES;
			page_size = I915_GTT_PAGE_SIZE;

1083 1084 1085 1086
			if (!index &&
			    vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
			    IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
			    (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1087
			     rem >= (max - index) * I915_GTT_PAGE_SIZE))
1088 1089
				maybe_64K = true;

1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108
			vaddr = kmap_atomic_px(pt);
		}

		do {
			GEM_BUG_ON(iter->sg->length < page_size);
			vaddr[index++] = encode | iter->dma;

			start += page_size;
			iter->dma += page_size;
			rem -= page_size;
			if (iter->dma >= iter->max) {
				iter->sg = __sg_next(iter->sg);
				if (!iter->sg)
					break;

				rem = iter->sg->length;
				iter->dma = sg_dma_address(iter->sg);
				iter->max = iter->dma + rem;

1109 1110 1111
				if (maybe_64K && index < max &&
				    !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
				      (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1112
				       rem >= (max - index) * I915_GTT_PAGE_SIZE)))
1113 1114
					maybe_64K = false;

1115 1116 1117 1118 1119 1120
				if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
					break;
			}
		} while (rem >= page_size && index < max);

		kunmap_atomic(vaddr);
1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136

		/*
		 * Is it safe to mark the 2M block as 64K? -- Either we have
		 * filled whole page-table with 64K entries, or filled part of
		 * it and have reached the end of the sg table and we have
		 * enough padding.
		 */
		if (maybe_64K &&
		    (index == max ||
		     (i915_vm_has_scratch_64K(vma->vm) &&
		      !iter->sg && IS_ALIGNED(vma->node.start +
					      vma->node.size,
					      I915_GTT_PAGE_SIZE_2M)))) {
			vaddr = kmap_atomic_px(pd);
			vaddr[idx.pde] |= GEN8_PDE_IPS_64K;
			kunmap_atomic(vaddr);
1137
			page_size = I915_GTT_PAGE_SIZE_64K;
M
Matthew Auld 已提交
1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150

			/*
			 * We write all 4K page entries, even when using 64K
			 * pages. In order to verify that the HW isn't cheating
			 * by using the 4K PTE instead of the 64K PTE, we want
			 * to remove all the surplus entries. If the HW skipped
			 * the 64K PTE, it will read/write into the scratch page
			 * instead - which we detect as missing results during
			 * selftests.
			 */
			if (I915_SELFTEST_ONLY(vma->vm->scrub_64K)) {
				u16 i;

1151
				encode = vma->vm->scratch_pte;
M
Matthew Auld 已提交
1152 1153 1154 1155 1156 1157 1158
				vaddr = kmap_atomic_px(pd->page_table[idx.pde]);

				for (i = 1; i < index; i += 16)
					memset64(vaddr + i, encode, 15);

				kunmap_atomic(vaddr);
			}
1159
		}
1160 1161

		vma->page_sizes.gtt |= page_size;
1162 1163 1164
	} while (iter->sg);
}

1165
static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
1166
				   struct i915_vma *vma,
1167
				   enum i915_cache_level cache_level,
1168
				   u32 flags)
1169 1170
{
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1171
	struct sgt_dma iter = sgt_dma(vma);
1172
	struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
1173

1174
	if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
1175 1176
		gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level,
					       flags);
1177 1178 1179 1180
	} else {
		struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);

		while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
1181 1182
						     &iter, &idx, cache_level,
						     flags))
1183
			GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
1184 1185

		vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
1186
	}
1187 1188
}

1189
static void gen8_free_page_tables(struct i915_address_space *vm,
1190
				  struct i915_page_directory *pd)
1191 1192 1193
{
	int i;

1194 1195 1196
	for (i = 0; i < I915_PDES; i++) {
		if (pd->page_table[i] != vm->scratch_pt)
			free_pt(vm, pd->page_table[i]);
1197
	}
B
Ben Widawsky 已提交
1198 1199
}

1200 1201
static int gen8_init_scratch(struct i915_address_space *vm)
{
1202
	int ret;
1203

1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215
	/*
	 * If everybody agrees to not to write into the scratch page,
	 * we can reuse it for all vm, keeping contexts and processes separate.
	 */
	if (vm->has_read_only &&
	    vm->i915->kernel_context &&
	    vm->i915->kernel_context->ppgtt) {
		struct i915_address_space *clone =
			&vm->i915->kernel_context->ppgtt->vm;

		GEM_BUG_ON(!clone->has_read_only);

1216
		vm->scratch_order = clone->scratch_order;
1217 1218 1219 1220 1221 1222 1223
		vm->scratch_pte = clone->scratch_pte;
		vm->scratch_pt  = clone->scratch_pt;
		vm->scratch_pd  = clone->scratch_pd;
		vm->scratch_pdp = clone->scratch_pdp;
		return 0;
	}

1224
	ret = setup_scratch_page(vm, __GFP_HIGHMEM);
1225 1226
	if (ret)
		return ret;
1227

1228 1229 1230 1231 1232
	vm->scratch_pte =
		gen8_pte_encode(vm->scratch_page.daddr,
				I915_CACHE_LLC,
				PTE_READ_ONLY);

1233
	vm->scratch_pt = alloc_pt(vm);
1234
	if (IS_ERR(vm->scratch_pt)) {
1235 1236
		ret = PTR_ERR(vm->scratch_pt);
		goto free_scratch_page;
1237 1238
	}

1239
	vm->scratch_pd = alloc_pd(vm);
1240
	if (IS_ERR(vm->scratch_pd)) {
1241 1242
		ret = PTR_ERR(vm->scratch_pd);
		goto free_pt;
1243 1244
	}

1245
	if (i915_vm_is_4lvl(vm)) {
1246
		vm->scratch_pdp = alloc_pdp(vm);
1247
		if (IS_ERR(vm->scratch_pdp)) {
1248 1249
			ret = PTR_ERR(vm->scratch_pdp);
			goto free_pd;
1250 1251 1252
		}
	}

1253 1254
	gen8_initialize_pt(vm, vm->scratch_pt);
	gen8_initialize_pd(vm, vm->scratch_pd);
1255
	if (i915_vm_is_4lvl(vm))
1256
		gen8_initialize_pdp(vm, vm->scratch_pdp);
1257 1258

	return 0;
1259 1260

free_pd:
1261
	free_pd(vm, vm->scratch_pd);
1262
free_pt:
1263
	free_pt(vm, vm->scratch_pt);
1264
free_scratch_page:
1265
	cleanup_scratch_page(vm);
1266 1267

	return ret;
1268 1269
}

1270 1271
static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
{
1272
	struct i915_address_space *vm = &ppgtt->vm;
1273
	struct drm_i915_private *dev_priv = vm->i915;
1274 1275 1276
	enum vgt_g2v_type msg;
	int i;

1277
	if (i915_vm_is_4lvl(vm)) {
1278
		const u64 daddr = px_dma(&ppgtt->pml4);
1279

1280 1281
		I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
		I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
1282 1283 1284 1285

		msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
				VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
	} else {
1286
		for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1287
			const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
1288

1289 1290
			I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
			I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301
		}

		msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
				VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
	}

	I915_WRITE(vgtif_reg(g2v_notify), msg);

	return 0;
}

1302 1303
static void gen8_free_scratch(struct i915_address_space *vm)
{
1304 1305 1306
	if (!vm->scratch_page.daddr)
		return;

1307
	if (i915_vm_is_4lvl(vm))
1308 1309 1310 1311
		free_pdp(vm, vm->scratch_pdp);
	free_pd(vm, vm->scratch_pd);
	free_pt(vm, vm->scratch_pt);
	cleanup_scratch_page(vm);
1312 1313
}

1314
static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
1315
				    struct i915_page_directory_pointer *pdp)
1316
{
1317
	const unsigned int pdpes = i915_pdpes_per_pdp(vm);
1318 1319
	int i;

1320
	for (i = 0; i < pdpes; i++) {
1321
		if (pdp->page_directory[i] == vm->scratch_pd)
1322 1323
			continue;

1324 1325
		gen8_free_page_tables(vm, pdp->page_directory[i]);
		free_pd(vm, pdp->page_directory[i]);
1326
	}
1327

1328
	free_pdp(vm, pdp);
1329 1330 1331 1332 1333 1334
}

static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
{
	int i;

1335
	for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
1336
		if (ppgtt->pml4.pdps[i] == ppgtt->vm.scratch_pdp)
1337 1338
			continue;

1339
		gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, ppgtt->pml4.pdps[i]);
1340 1341
	}

1342
	cleanup_px(&ppgtt->vm, &ppgtt->pml4);
1343 1344 1345 1346
}

static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
1347
	struct drm_i915_private *dev_priv = vm->i915;
1348
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1349

1350
	if (intel_vgpu_active(dev_priv))
1351 1352
		gen8_ppgtt_notify_vgt(ppgtt, false);

1353
	if (i915_vm_is_4lvl(vm))
1354
		gen8_ppgtt_cleanup_4lvl(ppgtt);
1355
	else
1356
		gen8_ppgtt_cleanup_3lvl(&ppgtt->vm, &ppgtt->pdp);
1357

1358
	gen8_free_scratch(vm);
1359 1360
}

1361 1362 1363
static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
			       struct i915_page_directory *pd,
			       u64 start, u64 length)
1364
{
1365
	struct i915_page_table *pt;
1366
	u64 from = start;
1367
	unsigned int pde;
1368

1369
	gen8_for_each_pde(pt, pd, start, length, pde) {
1370 1371
		int count = gen8_pte_count(start, length);

1372
		if (pt == vm->scratch_pt) {
1373 1374
			pd->used_pdes++;

1375
			pt = alloc_pt(vm);
1376 1377
			if (IS_ERR(pt)) {
				pd->used_pdes--;
1378
				goto unwind;
1379
			}
1380

1381
			if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
1382
				gen8_initialize_pt(vm, pt);
1383 1384

			gen8_ppgtt_set_pde(vm, pd, pt, pde);
1385
			GEM_BUG_ON(pd->used_pdes > I915_PDES);
1386
		}
1387

1388
		pt->used_ptes += count;
1389
	}
1390
	return 0;
1391

1392 1393
unwind:
	gen8_ppgtt_clear_pd(vm, pd, from, start - from);
B
Ben Widawsky 已提交
1394
	return -ENOMEM;
1395 1396
}

1397 1398 1399
static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
				struct i915_page_directory_pointer *pdp,
				u64 start, u64 length)
1400
{
1401
	struct i915_page_directory *pd;
1402 1403
	u64 from = start;
	unsigned int pdpe;
1404 1405
	int ret;

1406
	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1407
		if (pd == vm->scratch_pd) {
1408 1409
			pdp->used_pdpes++;

1410
			pd = alloc_pd(vm);
1411 1412
			if (IS_ERR(pd)) {
				pdp->used_pdpes--;
1413
				goto unwind;
1414
			}
1415

1416
			gen8_initialize_pd(vm, pd);
1417
			gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1418
			GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
1419 1420 1421
		}

		ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
1422 1423
		if (unlikely(ret))
			goto unwind_pd;
1424
	}
1425

B
Ben Widawsky 已提交
1426
	return 0;
1427

1428 1429 1430 1431 1432 1433 1434
unwind_pd:
	if (!pd->used_pdes) {
		gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
		GEM_BUG_ON(!pdp->used_pdpes);
		pdp->used_pdpes--;
		free_pd(vm, pd);
	}
1435 1436 1437
unwind:
	gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
	return -ENOMEM;
1438 1439
}

1440 1441
static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
				 u64 start, u64 length)
1442
{
1443 1444 1445
	return gen8_ppgtt_alloc_pdp(vm,
				    &i915_vm_to_ppgtt(vm)->pdp, start, length);
}
1446

1447 1448 1449 1450 1451 1452 1453 1454 1455
static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
				 u64 start, u64 length)
{
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
	struct i915_pml4 *pml4 = &ppgtt->pml4;
	struct i915_page_directory_pointer *pdp;
	u64 from = start;
	u32 pml4e;
	int ret;
1456

1457
	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1458 1459 1460 1461
		if (pml4->pdps[pml4e] == vm->scratch_pdp) {
			pdp = alloc_pdp(vm);
			if (IS_ERR(pdp))
				goto unwind;
1462

1463 1464 1465
			gen8_initialize_pdp(vm, pdp);
			gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
		}
1466

1467
		ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
1468 1469
		if (unlikely(ret))
			goto unwind_pdp;
1470 1471 1472 1473
	}

	return 0;

1474 1475 1476 1477 1478
unwind_pdp:
	if (!pdp->used_pdpes) {
		gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
		free_pdp(vm, pdp);
	}
1479 1480 1481
unwind:
	gen8_ppgtt_clear_4lvl(vm, from, start - from);
	return -ENOMEM;
1482 1483
}

1484
static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
1485
{
1486
	struct i915_address_space *vm = &ppgtt->vm;
1487 1488
	struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
	struct i915_page_directory *pd;
1489
	u64 start = 0, length = ppgtt->vm.total;
1490 1491
	u64 from = start;
	unsigned int pdpe;
1492

1493 1494 1495 1496
	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
		pd = alloc_pd(vm);
		if (IS_ERR(pd))
			goto unwind;
1497

1498 1499 1500 1501
		gen8_initialize_pd(vm, pd);
		gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
		pdp->used_pdpes++;
	}
1502

1503 1504
	pdp->used_pdpes++; /* never remove */
	return 0;
1505

1506 1507 1508 1509 1510 1511 1512 1513
unwind:
	start -= from;
	gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
		gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
		free_pd(vm, pd);
	}
	pdp->used_pdpes = 0;
	return -ENOMEM;
1514 1515
}

1516
/*
1517 1518 1519 1520
 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
 * with a net effect resembling a 2-level page table in normal x86 terms. Each
 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
 * space.
B
Ben Widawsky 已提交
1521
 *
1522
 */
1523
static struct i915_hw_ppgtt *gen8_ppgtt_create(struct drm_i915_private *i915)
B
Ben Widawsky 已提交
1524
{
1525 1526 1527 1528 1529 1530 1531
	struct i915_hw_ppgtt *ppgtt;
	int err;

	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
	if (!ppgtt)
		return ERR_PTR(-ENOMEM);

1532 1533
	kref_init(&ppgtt->ref);

1534 1535
	ppgtt->vm.i915 = i915;
	ppgtt->vm.dma = &i915->drm.pdev->dev;
1536
	ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
1537

1538 1539
	/* From bdw, there is support for read-only pages in the PPGTT. */
	ppgtt->vm.has_read_only = true;
1540

1541
	i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
1542

1543 1544 1545
	/* There are only few exceptions for gen >=6. chv and bxt.
	 * And we are not sure about the latter so play safe for now.
	 */
1546
	if (IS_CHERRYVIEW(i915) || IS_BROXTON(i915))
1547
		ppgtt->vm.pt_kmap_wc = true;
1548

1549 1550 1551
	err = gen8_init_scratch(&ppgtt->vm);
	if (err)
		goto err_free;
1552

1553
	if (i915_vm_is_4lvl(&ppgtt->vm)) {
1554 1555 1556
		err = setup_px(&ppgtt->vm, &ppgtt->pml4);
		if (err)
			goto err_scratch;
1557

1558
		gen8_initialize_pml4(&ppgtt->vm, &ppgtt->pml4);
1559

1560 1561 1562
		ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_4lvl;
		ppgtt->vm.insert_entries = gen8_ppgtt_insert_4lvl;
		ppgtt->vm.clear_range = gen8_ppgtt_clear_4lvl;
1563
	} else {
1564 1565 1566
		err = __pdp_init(&ppgtt->vm, &ppgtt->pdp);
		if (err)
			goto err_scratch;
1567

1568 1569 1570
		if (intel_vgpu_active(i915)) {
			err = gen8_preallocate_top_level_pdp(ppgtt);
			if (err) {
1571
				__pdp_fini(&ppgtt->pdp);
1572
				goto err_scratch;
1573
			}
1574
		}
1575

1576 1577 1578
		ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc_3lvl;
		ppgtt->vm.insert_entries = gen8_ppgtt_insert_3lvl;
		ppgtt->vm.clear_range = gen8_ppgtt_clear_3lvl;
1579
	}
1580

1581
	if (intel_vgpu_active(i915))
1582 1583
		gen8_ppgtt_notify_vgt(ppgtt, true);

1584
	ppgtt->vm.cleanup = gen8_ppgtt_cleanup;
1585

1586
	ppgtt->vm.vma_ops.bind_vma    = ppgtt_bind_vma;
1587 1588 1589 1590
	ppgtt->vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
	ppgtt->vm.vma_ops.set_pages   = ppgtt_set_pages;
	ppgtt->vm.vma_ops.clear_pages = clear_pages;

1591
	return ppgtt;
1592

1593
err_scratch:
1594
	gen8_free_scratch(&ppgtt->vm);
1595 1596 1597
err_free:
	kfree(ppgtt);
	return ERR_PTR(err);
1598 1599
}

1600
/* Write pde (index) from the page directory @pd to the page table @pt */
1601
static inline void gen6_write_pde(const struct gen6_hw_ppgtt *ppgtt,
C
Chris Wilson 已提交
1602 1603
				  const unsigned int pde,
				  const struct i915_page_table *pt)
B
Ben Widawsky 已提交
1604
{
1605
	/* Caller needs to make sure the write completes if necessary */
1606 1607
	iowrite32(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
		  ppgtt->pd_addr + pde);
1608
}
B
Ben Widawsky 已提交
1609

1610
static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
B
Ben Widawsky 已提交
1611
{
1612
	struct intel_engine_cs *engine;
1613
	u32 ecochk, ecobits;
1614
	enum intel_engine_id id;
B
Ben Widawsky 已提交
1615

1616 1617
	ecobits = I915_READ(GAC_ECO_BITS);
	I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
1618

1619
	ecochk = I915_READ(GAM_ECOCHK);
1620
	if (IS_HASWELL(dev_priv)) {
1621 1622 1623 1624 1625 1626
		ecochk |= ECOCHK_PPGTT_WB_HSW;
	} else {
		ecochk |= ECOCHK_PPGTT_LLC_IVB;
		ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
	}
	I915_WRITE(GAM_ECOCHK, ecochk);
1627

1628
	for_each_engine(engine, dev_priv, id) {
B
Ben Widawsky 已提交
1629
		/* GFX_MODE is per-ring on gen7+ */
1630
		I915_WRITE(RING_MODE_GEN7(engine),
1631
			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
B
Ben Widawsky 已提交
1632
	}
1633
}
B
Ben Widawsky 已提交
1634

1635
static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
1636
{
1637
	u32 ecochk, gab_ctl, ecobits;
1638

1639 1640 1641
	ecobits = I915_READ(GAC_ECO_BITS);
	I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
		   ECOBITS_PPGTT_CACHE64B);
B
Ben Widawsky 已提交
1642

1643 1644 1645 1646 1647 1648
	gab_ctl = I915_READ(GAB_CTL);
	I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);

	ecochk = I915_READ(GAM_ECOCHK);
	I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);

1649 1650
	if (HAS_PPGTT(dev_priv)) /* may be disabled for VT-d */
		I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
B
Ben Widawsky 已提交
1651 1652
}

1653
/* PPGTT support for Sandybdrige/Gen6 and later */
1654
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
1655
				   u64 start, u64 length)
1656
{
1657
	struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1658
	unsigned int first_entry = start / I915_GTT_PAGE_SIZE;
1659 1660
	unsigned int pde = first_entry / GEN6_PTES;
	unsigned int pte = first_entry % GEN6_PTES;
1661
	unsigned int num_entries = length / I915_GTT_PAGE_SIZE;
1662
	const gen6_pte_t scratch_pte = vm->scratch_pte;
1663

1664
	while (num_entries) {
1665
		struct i915_page_table *pt = ppgtt->base.pd.page_table[pde++];
1666
		const unsigned int count = min(num_entries, GEN6_PTES - pte);
1667
		gen6_pte_t *vaddr;
1668

1669 1670 1671 1672 1673 1674 1675 1676
		GEM_BUG_ON(pt == vm->scratch_pt);

		num_entries -= count;

		GEM_BUG_ON(count > pt->used_ptes);
		pt->used_ptes -= count;
		if (!pt->used_ptes)
			ppgtt->scan_for_unused_pt = true;
1677

1678 1679
		/*
		 * Note that the hw doesn't support removing PDE on the fly
1680 1681 1682 1683
		 * (they are cached inside the context with no means to
		 * invalidate the cache), so we can only reset the PTE
		 * entries back to scratch.
		 */
1684

1685
		vaddr = kmap_atomic_px(pt);
1686
		memset32(vaddr + pte, scratch_pte, count);
1687
		kunmap_atomic(vaddr);
1688

1689
		pte = 0;
1690
	}
1691 1692
}

1693
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
1694
				      struct i915_vma *vma,
1695 1696
				      enum i915_cache_level cache_level,
				      u32 flags)
D
Daniel Vetter 已提交
1697
{
1698
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1699
	unsigned first_entry = vma->node.start / I915_GTT_PAGE_SIZE;
1700 1701
	unsigned act_pt = first_entry / GEN6_PTES;
	unsigned act_pte = first_entry % GEN6_PTES;
1702
	const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
1703
	struct sgt_dma iter = sgt_dma(vma);
1704 1705
	gen6_pte_t *vaddr;

1706 1707
	GEM_BUG_ON(ppgtt->pd.page_table[act_pt] == vm->scratch_pt);

1708
	vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
1709 1710
	do {
		vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
1711

1712
		iter.dma += I915_GTT_PAGE_SIZE;
1713 1714 1715 1716
		if (iter.dma == iter.max) {
			iter.sg = __sg_next(iter.sg);
			if (!iter.sg)
				break;
1717

1718 1719 1720
			iter.dma = sg_dma_address(iter.sg);
			iter.max = iter.dma + iter.sg->length;
		}
1721

1722
		if (++act_pte == GEN6_PTES) {
1723 1724
			kunmap_atomic(vaddr);
			vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]);
1725
			act_pte = 0;
D
Daniel Vetter 已提交
1726
		}
1727
	} while (1);
1728
	kunmap_atomic(vaddr);
1729 1730

	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
D
Daniel Vetter 已提交
1731 1732
}

1733
static int gen6_alloc_va_range(struct i915_address_space *vm,
1734
			       u64 start, u64 length)
1735
{
1736
	struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1737
	struct i915_page_table *pt;
1738 1739 1740
	u64 from = start;
	unsigned int pde;
	bool flush = false;
1741

1742
	gen6_for_each_pde(pt, &ppgtt->base.pd, start, length, pde) {
1743 1744
		const unsigned int count = gen6_pte_count(start, length);

1745 1746 1747 1748
		if (pt == vm->scratch_pt) {
			pt = alloc_pt(vm);
			if (IS_ERR(pt))
				goto unwind_out;
1749

1750
			gen6_initialize_pt(vm, pt);
1751
			ppgtt->base.pd.page_table[pde] = pt;
1752 1753 1754 1755 1756 1757

			if (i915_vma_is_bound(ppgtt->vma,
					      I915_VMA_GLOBAL_BIND)) {
				gen6_write_pde(ppgtt, pde, pt);
				flush = true;
			}
1758 1759

			GEM_BUG_ON(pt->used_ptes);
1760
		}
1761 1762

		pt->used_ptes += count;
1763 1764
	}

1765
	if (flush) {
1766 1767
		mark_tlbs_dirty(&ppgtt->base);
		gen6_ggtt_invalidate(ppgtt->base.vm.i915);
1768 1769 1770
	}

	return 0;
1771 1772

unwind_out:
1773
	gen6_ppgtt_clear_range(vm, from, start - from);
1774
	return -ENOMEM;
1775 1776
}

1777
static int gen6_ppgtt_init_scratch(struct gen6_hw_ppgtt *ppgtt)
1778
{
1779 1780 1781
	struct i915_address_space * const vm = &ppgtt->base.vm;
	struct i915_page_table *unused;
	u32 pde;
1782
	int ret;
1783

1784
	ret = setup_scratch_page(vm, __GFP_HIGHMEM);
1785 1786
	if (ret)
		return ret;
1787

1788 1789 1790
	vm->scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
					 I915_CACHE_NONE,
					 PTE_READ_ONLY);
1791

1792
	vm->scratch_pt = alloc_pt(vm);
1793
	if (IS_ERR(vm->scratch_pt)) {
1794
		cleanup_scratch_page(vm);
1795 1796 1797
		return PTR_ERR(vm->scratch_pt);
	}

1798
	gen6_initialize_pt(vm, vm->scratch_pt);
1799 1800
	gen6_for_all_pdes(unused, &ppgtt->base.pd, pde)
		ppgtt->base.pd.page_table[pde] = vm->scratch_pt;
1801 1802 1803 1804

	return 0;
}

1805
static void gen6_ppgtt_free_scratch(struct i915_address_space *vm)
1806
{
1807 1808
	free_pt(vm, vm->scratch_pt);
	cleanup_scratch_page(vm);
1809 1810
}

1811
static void gen6_ppgtt_free_pd(struct gen6_hw_ppgtt *ppgtt)
1812
{
1813
	struct i915_page_table *pt;
1814
	u32 pde;
1815

1816
	gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
1817 1818 1819 1820 1821 1822 1823
		if (pt != ppgtt->base.vm.scratch_pt)
			free_pt(&ppgtt->base.vm, pt);
}

static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
	struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm));
1824

1825
	i915_vma_destroy(ppgtt->vma);
1826 1827 1828

	gen6_ppgtt_free_pd(ppgtt);
	gen6_ppgtt_free_scratch(vm);
1829 1830
}

1831
static int pd_vma_set_pages(struct i915_vma *vma)
1832
{
1833 1834 1835
	vma->pages = ERR_PTR(-ENODEV);
	return 0;
}
1836

1837 1838 1839
static void pd_vma_clear_pages(struct i915_vma *vma)
{
	GEM_BUG_ON(!vma->pages);
1840

1841 1842 1843 1844 1845 1846 1847 1848 1849
	vma->pages = NULL;
}

static int pd_vma_bind(struct i915_vma *vma,
		       enum i915_cache_level cache_level,
		       u32 unused)
{
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vma->vm);
	struct gen6_hw_ppgtt *ppgtt = vma->private;
1850
	u32 ggtt_offset = i915_ggtt_offset(vma) / I915_GTT_PAGE_SIZE;
1851 1852
	struct i915_page_table *pt;
	unsigned int pde;
1853

1854 1855
	ppgtt->base.pd.base.ggtt_offset = ggtt_offset * sizeof(gen6_pte_t);
	ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + ggtt_offset;
1856

1857 1858
	gen6_for_all_pdes(pt, &ppgtt->base.pd, pde)
		gen6_write_pde(ppgtt, pde, pt);
1859

1860 1861
	mark_tlbs_dirty(&ppgtt->base);
	gen6_ggtt_invalidate(ppgtt->base.vm.i915);
1862

1863
	return 0;
1864
}
1865

1866
static void pd_vma_unbind(struct i915_vma *vma)
1867
{
1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885
	struct gen6_hw_ppgtt *ppgtt = vma->private;
	struct i915_page_table * const scratch_pt = ppgtt->base.vm.scratch_pt;
	struct i915_page_table *pt;
	unsigned int pde;

	if (!ppgtt->scan_for_unused_pt)
		return;

	/* Free all no longer used page tables */
	gen6_for_all_pdes(pt, &ppgtt->base.pd, pde) {
		if (pt->used_ptes || pt == scratch_pt)
			continue;

		free_pt(&ppgtt->base.vm, pt);
		ppgtt->base.pd.page_table[pde] = scratch_pt;
	}

	ppgtt->scan_for_unused_pt = false;
1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903
}

static const struct i915_vma_ops pd_vma_ops = {
	.set_pages = pd_vma_set_pages,
	.clear_pages = pd_vma_clear_pages,
	.bind_vma = pd_vma_bind,
	.unbind_vma = pd_vma_unbind,
};

static struct i915_vma *pd_vma_create(struct gen6_hw_ppgtt *ppgtt, int size)
{
	struct drm_i915_private *i915 = ppgtt->base.vm.i915;
	struct i915_ggtt *ggtt = &i915->ggtt;
	struct i915_vma *vma;

	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
	GEM_BUG_ON(size > ggtt->vm.total);

1904
	vma = i915_vma_alloc();
1905 1906 1907
	if (!vma)
		return ERR_PTR(-ENOMEM);

1908
	i915_active_init(i915, &vma->active, NULL);
1909
	INIT_ACTIVE_REQUEST(&vma->last_fence);
1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920

	vma->vm = &ggtt->vm;
	vma->ops = &pd_vma_ops;
	vma->private = ppgtt;

	vma->size = size;
	vma->fence_size = size;
	vma->flags = I915_VMA_GGTT;
	vma->ggtt_view.type = I915_GGTT_VIEW_ROTATED; /* prevent fencing */

	INIT_LIST_HEAD(&vma->obj_link);
1921 1922

	mutex_lock(&vma->vm->mutex);
1923
	list_add(&vma->vm_link, &vma->vm->unbound_list);
1924
	mutex_unlock(&vma->vm->mutex);
1925 1926 1927

	return vma;
}
1928

1929
int gen6_ppgtt_pin(struct i915_hw_ppgtt *base)
1930 1931
{
	struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);
1932
	int err;
1933

1934 1935 1936 1937 1938 1939 1940 1941 1942
	/*
	 * Workaround the limited maximum vma->pin_count and the aliasing_ppgtt
	 * which will be pinned into every active context.
	 * (When vma->pin_count becomes atomic, I expect we will naturally
	 * need a larger, unpacked, type and kill this redundancy.)
	 */
	if (ppgtt->pin_count++)
		return 0;

1943 1944 1945 1946 1947
	/*
	 * PPGTT PDEs reside in the GGTT and consists of 512 entries. The
	 * allocator works in address space sizes, so it's multiplied by page
	 * size. We allocate at the top of the GTT to avoid fragmentation.
	 */
1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958
	err = i915_vma_pin(ppgtt->vma,
			   0, GEN6_PD_ALIGN,
			   PIN_GLOBAL | PIN_HIGH);
	if (err)
		goto unpin;

	return 0;

unpin:
	ppgtt->pin_count = 0;
	return err;
1959 1960
}

1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971
void gen6_ppgtt_unpin(struct i915_hw_ppgtt *base)
{
	struct gen6_hw_ppgtt *ppgtt = to_gen6_ppgtt(base);

	GEM_BUG_ON(!ppgtt->pin_count);
	if (--ppgtt->pin_count)
		return;

	i915_vma_unpin(ppgtt->vma);
}

1972
static struct i915_hw_ppgtt *gen6_ppgtt_create(struct drm_i915_private *i915)
1973
{
1974
	struct i915_ggtt * const ggtt = &i915->ggtt;
1975
	struct gen6_hw_ppgtt *ppgtt;
1976 1977 1978 1979 1980 1981
	int err;

	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
	if (!ppgtt)
		return ERR_PTR(-ENOMEM);

1982 1983
	kref_init(&ppgtt->base.ref);

1984 1985
	ppgtt->base.vm.i915 = i915;
	ppgtt->base.vm.dma = &i915->drm.pdev->dev;
1986
	ppgtt->base.vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
1987

1988
	i915_address_space_init(&ppgtt->base.vm, VM_CLASS_PPGTT);
1989

1990
	ppgtt->base.vm.allocate_va_range = gen6_alloc_va_range;
1991 1992 1993
	ppgtt->base.vm.clear_range = gen6_ppgtt_clear_range;
	ppgtt->base.vm.insert_entries = gen6_ppgtt_insert_entries;
	ppgtt->base.vm.cleanup = gen6_ppgtt_cleanup;
1994

1995
	ppgtt->base.vm.vma_ops.bind_vma    = ppgtt_bind_vma;
1996 1997 1998
	ppgtt->base.vm.vma_ops.unbind_vma  = ppgtt_unbind_vma;
	ppgtt->base.vm.vma_ops.set_pages   = ppgtt_set_pages;
	ppgtt->base.vm.vma_ops.clear_pages = clear_pages;
1999

2000 2001
	ppgtt->base.vm.pte_encode = ggtt->vm.pte_encode;

2002
	err = gen6_ppgtt_init_scratch(ppgtt);
2003 2004 2005
	if (err)
		goto err_free;

2006 2007 2008
	ppgtt->vma = pd_vma_create(ppgtt, GEN6_PD_SIZE);
	if (IS_ERR(ppgtt->vma)) {
		err = PTR_ERR(ppgtt->vma);
2009
		goto err_scratch;
2010
	}
2011

2012
	return &ppgtt->base;
2013

2014 2015
err_scratch:
	gen6_ppgtt_free_scratch(&ppgtt->base.vm);
2016 2017 2018
err_free:
	kfree(ppgtt);
	return ERR_PTR(err);
2019
}
2020

2021
static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
2022 2023 2024 2025 2026
{
	/* This function is for gtt related workarounds. This function is
	 * called on driver load and after a GPU reset, so you can place
	 * workarounds here even if they get overwritten by GPU reset.
	 */
2027
	/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl,icl */
2028
	if (IS_BROADWELL(dev_priv))
2029
		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
2030
	else if (IS_CHERRYVIEW(dev_priv))
2031
		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
2032
	else if (IS_GEN9_LP(dev_priv))
2033
		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
2034 2035
	else if (INTEL_GEN(dev_priv) >= 9)
		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052

	/*
	 * To support 64K PTEs we need to first enable the use of the
	 * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
	 * mmio, otherwise the page-walker will simply ignore the IPS bit. This
	 * shouldn't be needed after GEN10.
	 *
	 * 64K pages were first introduced from BDW+, although technically they
	 * only *work* from gen9+. For pre-BDW we instead have the option for
	 * 32K pages, but we don't currently have any support for it in our
	 * driver.
	 */
	if (HAS_PAGE_SIZES(dev_priv, I915_GTT_PAGE_SIZE_64K) &&
	    INTEL_GEN(dev_priv) <= 10)
		I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA,
			   I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) |
			   GAMW_ECO_ENABLE_64K_IPS_FIELD);
2053 2054
}

2055
int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
2056
{
2057
	gtt_write_workarounds(dev_priv);
2058

2059
	if (IS_GEN(dev_priv, 6))
2060
		gen6_ppgtt_enable(dev_priv);
2061
	else if (IS_GEN(dev_priv, 7))
2062
		gen7_ppgtt_enable(dev_priv);
2063

2064 2065
	return 0;
}
2066

2067 2068 2069 2070 2071 2072 2073 2074 2075
static struct i915_hw_ppgtt *
__hw_ppgtt_create(struct drm_i915_private *i915)
{
	if (INTEL_GEN(i915) < 8)
		return gen6_ppgtt_create(i915);
	else
		return gen8_ppgtt_create(i915);
}

2076
struct i915_hw_ppgtt *
2077
i915_ppgtt_create(struct drm_i915_private *i915,
2078
		  struct drm_i915_file_private *fpriv)
2079 2080 2081
{
	struct i915_hw_ppgtt *ppgtt;

2082 2083 2084
	ppgtt = __hw_ppgtt_create(i915);
	if (IS_ERR(ppgtt))
		return ppgtt;
2085

2086
	ppgtt->vm.file = fpriv;
2087

2088
	trace_i915_ppgtt_create(&ppgtt->vm);
2089

2090 2091 2092
	return ppgtt;
}

2093
void i915_ppgtt_close(struct i915_address_space *vm)
2094 2095 2096 2097 2098 2099
{
	GEM_BUG_ON(vm->closed);
	vm->closed = true;
}

static void ppgtt_destroy_vma(struct i915_address_space *vm)
2100 2101
{
	struct list_head *phases[] = {
2102
		&vm->bound_list,
2103 2104 2105 2106 2107 2108 2109 2110 2111
		&vm->unbound_list,
		NULL,
	}, **phase;

	vm->closed = true;
	for (phase = phases; *phase; phase++) {
		struct i915_vma *vma, *vn;

		list_for_each_entry_safe(vma, vn, *phase, vm_link)
2112
			i915_vma_destroy(vma);
2113 2114 2115
	}
}

2116
void i915_ppgtt_release(struct kref *kref)
2117 2118 2119 2120
{
	struct i915_hw_ppgtt *ppgtt =
		container_of(kref, struct i915_hw_ppgtt, ref);

2121
	trace_i915_ppgtt_release(&ppgtt->vm);
2122

2123
	ppgtt_destroy_vma(&ppgtt->vm);
2124

2125
	GEM_BUG_ON(!list_empty(&ppgtt->vm.bound_list));
2126
	GEM_BUG_ON(!list_empty(&ppgtt->vm.unbound_list));
2127

2128 2129
	ppgtt->vm.cleanup(&ppgtt->vm);
	i915_address_space_fini(&ppgtt->vm);
2130 2131
	kfree(ppgtt);
}
2132

2133 2134 2135
/* Certain Gen5 chipsets require require idling the GPU before
 * unmapping anything from the GTT when VT-d is enabled.
 */
2136
static bool needs_idle_maps(struct drm_i915_private *dev_priv)
2137 2138 2139 2140
{
	/* Query intel_iommu to see if we need the workaround. Presumably that
	 * was loaded first.
	 */
2141
	return IS_GEN(dev_priv, 5) && IS_MOBILE(dev_priv) && intel_vtd_active();
2142 2143
}

2144
static void gen6_check_faults(struct drm_i915_private *dev_priv)
2145
{
2146
	struct intel_engine_cs *engine;
2147
	enum intel_engine_id id;
2148
	u32 fault;
2149

2150
	for_each_engine(engine, dev_priv, id) {
2151 2152
		fault = I915_READ(RING_FAULT_REG(engine));
		if (fault & RING_FAULT_VALID) {
2153
			DRM_DEBUG_DRIVER("Unexpected fault\n"
2154
					 "\tAddr: 0x%08lx\n"
2155 2156 2157
					 "\tAddress space: %s\n"
					 "\tSource ID: %d\n"
					 "\tType: %d\n",
2158 2159 2160 2161
					 fault & PAGE_MASK,
					 fault & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
					 RING_FAULT_SRCID(fault),
					 RING_FAULT_FAULT_TYPE(fault));
2162 2163
		}
	}
2164 2165
}

2166
static void gen8_check_faults(struct drm_i915_private *dev_priv)
2167 2168 2169 2170
{
	u32 fault = I915_READ(GEN8_RING_FAULT_REG);

	if (fault & RING_FAULT_VALID) {
2171 2172 2173 2174 2175 2176 2177 2178
		u32 fault_data0, fault_data1;
		u64 fault_addr;

		fault_data0 = I915_READ(GEN8_FAULT_TLB_DATA0);
		fault_data1 = I915_READ(GEN8_FAULT_TLB_DATA1);
		fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
			     ((u64)fault_data0 << 12);

2179
		DRM_DEBUG_DRIVER("Unexpected fault\n"
2180 2181
				 "\tAddr: 0x%08x_%08x\n"
				 "\tAddress space: %s\n"
2182 2183 2184
				 "\tEngine ID: %d\n"
				 "\tSource ID: %d\n"
				 "\tType: %d\n",
2185 2186 2187
				 upper_32_bits(fault_addr),
				 lower_32_bits(fault_addr),
				 fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
2188 2189 2190 2191 2192 2193 2194 2195 2196 2197
				 GEN8_RING_FAULT_ENGINE_ID(fault),
				 RING_FAULT_SRCID(fault),
				 RING_FAULT_FAULT_TYPE(fault));
	}
}

void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
{
	/* From GEN8 onwards we only have one 'All Engine Fault Register' */
	if (INTEL_GEN(dev_priv) >= 8)
2198
		gen8_check_faults(dev_priv);
2199
	else if (INTEL_GEN(dev_priv) >= 6)
2200
		gen6_check_faults(dev_priv);
2201 2202
	else
		return;
2203 2204

	i915_clear_error_registers(dev_priv);
2205 2206
}

2207
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
2208
{
2209
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2210 2211 2212 2213

	/* Don't bother messing with faults pre GEN6 as we have little
	 * documentation supporting that it's a good idea.
	 */
2214
	if (INTEL_GEN(dev_priv) < 6)
2215 2216
		return;

2217
	i915_check_and_clear_faults(dev_priv);
2218

2219
	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
2220

2221
	i915_ggtt_invalidate(dev_priv);
2222 2223
}

2224 2225
int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
			       struct sg_table *pages)
2226
{
2227
	do {
2228 2229 2230 2231
		if (dma_map_sg_attrs(&obj->base.dev->pdev->dev,
				     pages->sgl, pages->nents,
				     PCI_DMA_BIDIRECTIONAL,
				     DMA_ATTR_NO_WARN))
2232 2233
			return 0;

2234 2235
		/*
		 * If the DMA remap fails, one cause can be that we have
2236 2237 2238 2239 2240 2241 2242
		 * too many objects pinned in a small remapping table,
		 * such as swiotlb. Incrementally purge all other objects and
		 * try again - if there are no more pages to remove from
		 * the DMA remapper, i915_gem_shrink will return 0.
		 */
		GEM_BUG_ON(obj->mm.pages == pages);
	} while (i915_gem_shrink(to_i915(obj->base.dev),
2243
				 obj->base.size >> PAGE_SHIFT, NULL,
2244
				 I915_SHRINK_BOUND |
2245
				 I915_SHRINK_UNBOUND));
2246

2247
	return -ENOSPC;
2248 2249
}

2250
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
B
Ben Widawsky 已提交
2251 2252 2253 2254
{
	writeq(pte, addr);
}

2255 2256
static void gen8_ggtt_insert_page(struct i915_address_space *vm,
				  dma_addr_t addr,
2257
				  u64 offset,
2258 2259 2260
				  enum i915_cache_level level,
				  u32 unused)
{
2261
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2262
	gen8_pte_t __iomem *pte =
2263
		(gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
2264

2265
	gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
2266

2267
	ggtt->invalidate(vm->i915);
2268 2269
}

B
Ben Widawsky 已提交
2270
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2271
				     struct i915_vma *vma,
2272
				     enum i915_cache_level level,
2273
				     u32 flags)
B
Ben Widawsky 已提交
2274
{
2275
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2276 2277
	struct sgt_iter sgt_iter;
	gen8_pte_t __iomem *gtt_entries;
2278
	const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
2279
	dma_addr_t addr;
2280

2281 2282 2283 2284
	/*
	 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
	 * not to allow the user to override access to a read only page.
	 */
2285

2286
	gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
2287
	gtt_entries += vma->node.start / I915_GTT_PAGE_SIZE;
2288
	for_each_sgt_dma(addr, sgt_iter, vma->pages)
2289
		gen8_set_pte(gtt_entries++, pte_encode | addr);
2290

2291 2292 2293
	/*
	 * We want to flush the TLBs only after we're certain all the PTE
	 * updates have finished.
B
Ben Widawsky 已提交
2294
	 */
2295
	ggtt->invalidate(vm->i915);
B
Ben Widawsky 已提交
2296 2297
}

2298 2299
static void gen6_ggtt_insert_page(struct i915_address_space *vm,
				  dma_addr_t addr,
2300
				  u64 offset,
2301 2302 2303
				  enum i915_cache_level level,
				  u32 flags)
{
2304
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2305
	gen6_pte_t __iomem *pte =
2306
		(gen6_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
2307

2308
	iowrite32(vm->pte_encode(addr, level, flags), pte);
2309

2310
	ggtt->invalidate(vm->i915);
2311 2312
}

2313 2314 2315 2316 2317 2318
/*
 * Binds an object into the global gtt with the specified cache level. The object
 * will be accessible to the GPU via commands whose operands reference offsets
 * within the global GTT as well as accessible by the GPU through the GMADR
 * mapped BAR (dev_priv->mm.gtt->gtt).
 */
2319
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2320
				     struct i915_vma *vma,
2321 2322
				     enum i915_cache_level level,
				     u32 flags)
2323
{
2324
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2325
	gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
2326
	unsigned int i = vma->node.start / I915_GTT_PAGE_SIZE;
2327
	struct sgt_iter iter;
2328
	dma_addr_t addr;
2329
	for_each_sgt_dma(addr, iter, vma->pages)
2330
		iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
2331

2332 2333 2334
	/*
	 * We want to flush the TLBs only after we're certain all the PTE
	 * updates have finished.
2335
	 */
2336
	ggtt->invalidate(vm->i915);
2337 2338
}

2339
static void nop_clear_range(struct i915_address_space *vm,
2340
			    u64 start, u64 length)
2341 2342 2343
{
}

B
Ben Widawsky 已提交
2344
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2345
				  u64 start, u64 length)
B
Ben Widawsky 已提交
2346
{
2347
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2348 2349
	unsigned first_entry = start / I915_GTT_PAGE_SIZE;
	unsigned num_entries = length / I915_GTT_PAGE_SIZE;
2350
	const gen8_pte_t scratch_pte = vm->scratch_pte;
2351
	gen8_pte_t __iomem *gtt_base =
2352 2353
		(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
B
Ben Widawsky 已提交
2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364
	int i;

	if (WARN(num_entries > max_entries,
		 "First entry = %d; Num entries = %d (max=%d)\n",
		 first_entry, num_entries, max_entries))
		num_entries = max_entries;

	for (i = 0; i < num_entries; i++)
		gen8_set_pte(&gtt_base[i], scratch_pte);
}

2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408
static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
{
	struct drm_i915_private *dev_priv = vm->i915;

	/*
	 * Make sure the internal GAM fifo has been cleared of all GTT
	 * writes before exiting stop_machine(). This guarantees that
	 * any aperture accesses waiting to start in another process
	 * cannot back up behind the GTT writes causing a hang.
	 * The register can be any arbitrary GAM register.
	 */
	POSTING_READ(GFX_FLSH_CNTL_GEN6);
}

struct insert_page {
	struct i915_address_space *vm;
	dma_addr_t addr;
	u64 offset;
	enum i915_cache_level level;
};

static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
{
	struct insert_page *arg = _arg;

	gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
	bxt_vtd_ggtt_wa(arg->vm);

	return 0;
}

static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
					  dma_addr_t addr,
					  u64 offset,
					  enum i915_cache_level level,
					  u32 unused)
{
	struct insert_page arg = { vm, addr, offset, level };

	stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
}

struct insert_entries {
	struct i915_address_space *vm;
2409
	struct i915_vma *vma;
2410
	enum i915_cache_level level;
2411
	u32 flags;
2412 2413 2414 2415 2416 2417
};

static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
{
	struct insert_entries *arg = _arg;

2418
	gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, arg->flags);
2419 2420 2421 2422 2423 2424
	bxt_vtd_ggtt_wa(arg->vm);

	return 0;
}

static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2425
					     struct i915_vma *vma,
2426
					     enum i915_cache_level level,
2427
					     u32 flags)
2428
{
2429
	struct insert_entries arg = { vm, vma, level, flags };
2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458

	stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
}

struct clear_range {
	struct i915_address_space *vm;
	u64 start;
	u64 length;
};

static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
{
	struct clear_range *arg = _arg;

	gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
	bxt_vtd_ggtt_wa(arg->vm);

	return 0;
}

static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
					  u64 start,
					  u64 length)
{
	struct clear_range arg = { vm, start, length };

	stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
}

2459
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2460
				  u64 start, u64 length)
2461
{
2462
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2463 2464
	unsigned first_entry = start / I915_GTT_PAGE_SIZE;
	unsigned num_entries = length / I915_GTT_PAGE_SIZE;
2465
	gen6_pte_t scratch_pte, __iomem *gtt_base =
2466 2467
		(gen6_pte_t __iomem *)ggtt->gsm + first_entry;
	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
2468 2469 2470 2471 2472 2473 2474
	int i;

	if (WARN(num_entries > max_entries,
		 "First entry = %d; Num entries = %d (max=%d)\n",
		 first_entry, num_entries, max_entries))
		num_entries = max_entries;

2475
	scratch_pte = vm->scratch_pte;
2476

2477 2478 2479 2480
	for (i = 0; i < num_entries; i++)
		iowrite32(scratch_pte, &gtt_base[i]);
}

2481 2482
static void i915_ggtt_insert_page(struct i915_address_space *vm,
				  dma_addr_t addr,
2483
				  u64 offset,
2484 2485 2486 2487 2488 2489 2490 2491 2492
				  enum i915_cache_level cache_level,
				  u32 unused)
{
	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;

	intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
}

2493
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2494
				     struct i915_vma *vma,
2495 2496
				     enum i915_cache_level cache_level,
				     u32 unused)
2497 2498 2499 2500
{
	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;

2501 2502
	intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
				    flags);
2503 2504
}

2505
static void i915_ggtt_clear_range(struct i915_address_space *vm,
2506
				  u64 start, u64 length)
2507
{
2508
	intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
2509 2510
}

2511 2512 2513
static int ggtt_bind_vma(struct i915_vma *vma,
			 enum i915_cache_level cache_level,
			 u32 flags)
2514
{
2515
	struct drm_i915_private *i915 = vma->vm->i915;
2516
	struct drm_i915_gem_object *obj = vma->obj;
2517
	intel_wakeref_t wakeref;
2518
	u32 pte_flags;
2519

2520
	/* Applicable to VLV (gen8+ do not support RO in the GGTT) */
2521
	pte_flags = 0;
2522
	if (i915_gem_object_is_readonly(obj))
2523 2524
		pte_flags |= PTE_READ_ONLY;

2525 2526
	with_intel_runtime_pm(i915, wakeref)
		vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
2527

2528 2529
	vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;

2530 2531 2532 2533 2534
	/*
	 * Without aliasing PPGTT there's no difference between
	 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
	 * upgrade to both bound if we bind either to avoid double-binding.
	 */
2535
	vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
2536 2537 2538 2539

	return 0;
}

2540 2541 2542
static void ggtt_unbind_vma(struct i915_vma *vma)
{
	struct drm_i915_private *i915 = vma->vm->i915;
2543
	intel_wakeref_t wakeref;
2544

2545 2546
	with_intel_runtime_pm(i915, wakeref)
		vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2547 2548
}

2549 2550 2551
static int aliasing_gtt_bind_vma(struct i915_vma *vma,
				 enum i915_cache_level cache_level,
				 u32 flags)
2552
{
2553
	struct drm_i915_private *i915 = vma->vm->i915;
2554
	u32 pte_flags;
2555
	int ret;
2556

2557
	/* Currently applicable only to VLV */
2558
	pte_flags = 0;
2559
	if (i915_gem_object_is_readonly(vma->obj))
2560
		pte_flags |= PTE_READ_ONLY;
2561

2562 2563 2564
	if (flags & I915_VMA_LOCAL_BIND) {
		struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;

2565
		if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
2566 2567 2568
			ret = appgtt->vm.allocate_va_range(&appgtt->vm,
							   vma->node.start,
							   vma->size);
2569
			if (ret)
2570
				return ret;
2571 2572
		}

2573 2574
		appgtt->vm.insert_entries(&appgtt->vm, vma, cache_level,
					  pte_flags);
2575 2576
	}

2577
	if (flags & I915_VMA_GLOBAL_BIND) {
2578 2579
		intel_wakeref_t wakeref;

2580 2581 2582 2583
		with_intel_runtime_pm(i915, wakeref) {
			vma->vm->insert_entries(vma->vm, vma,
						cache_level, pte_flags);
		}
2584
	}
2585

2586
	return 0;
2587 2588
}

2589
static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
2590
{
2591
	struct drm_i915_private *i915 = vma->vm->i915;
2592

2593
	if (vma->flags & I915_VMA_GLOBAL_BIND) {
2594
		struct i915_address_space *vm = vma->vm;
2595 2596
		intel_wakeref_t wakeref;

2597 2598
		with_intel_runtime_pm(i915, wakeref)
			vm->clear_range(vm, vma->node.start, vma->size);
2599
	}
2600

2601
	if (vma->flags & I915_VMA_LOCAL_BIND) {
2602
		struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->vm;
2603 2604 2605

		vm->clear_range(vm, vma->node.start, vma->size);
	}
2606 2607
}

2608 2609
void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
			       struct sg_table *pages)
2610
{
D
David Weinehall 已提交
2611 2612
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
	struct device *kdev = &dev_priv->drm.pdev->dev;
2613
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
B
Ben Widawsky 已提交
2614

2615
	if (unlikely(ggtt->do_idle_maps)) {
2616
		if (i915_gem_wait_for_idle(dev_priv, 0, MAX_SCHEDULE_TIMEOUT)) {
2617 2618 2619 2620 2621
			DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
			/* Wait a bit, in hopes it avoids the hang */
			udelay(10);
		}
	}
B
Ben Widawsky 已提交
2622

2623
	dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
2624
}
2625

2626 2627 2628 2629 2630 2631 2632 2633 2634 2635
static int ggtt_set_pages(struct i915_vma *vma)
{
	int ret;

	GEM_BUG_ON(vma->pages);

	ret = i915_get_ggtt_vma_pages(vma);
	if (ret)
		return ret;

2636 2637
	vma->page_sizes = vma->obj->mm.page_sizes;

2638 2639 2640
	return 0;
}

C
Chris Wilson 已提交
2641
static void i915_gtt_color_adjust(const struct drm_mm_node *node,
2642
				  unsigned long color,
2643 2644
				  u64 *start,
				  u64 *end)
2645
{
2646
	if (node->allocated && node->color != color)
2647
		*start += I915_GTT_PAGE_SIZE;
2648

2649 2650 2651 2652 2653
	/* Also leave a space between the unallocated reserved node after the
	 * GTT and any objects within the GTT, i.e. we use the color adjustment
	 * to insert a guard page to prevent prefetches crossing over the
	 * GTT boundary.
	 */
2654
	node = list_next_entry(node, node_list);
2655
	if (node->color != color)
2656
		*end -= I915_GTT_PAGE_SIZE;
2657
}
B
Ben Widawsky 已提交
2658

2659 2660 2661 2662 2663 2664
int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
{
	struct i915_ggtt *ggtt = &i915->ggtt;
	struct i915_hw_ppgtt *ppgtt;
	int err;

2665
	ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM));
2666 2667
	if (IS_ERR(ppgtt))
		return PTR_ERR(ppgtt);
2668

2669
	if (GEM_WARN_ON(ppgtt->vm.total < ggtt->vm.total)) {
2670 2671 2672 2673
		err = -ENODEV;
		goto err_ppgtt;
	}

2674 2675 2676 2677 2678 2679 2680 2681 2682
	/*
	 * Note we only pre-allocate as far as the end of the global
	 * GTT. On 48b / 4-level page-tables, the difference is very,
	 * very significant! We have to preallocate as GVT/vgpu does
	 * not like the page directory disappearing.
	 */
	err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, ggtt->vm.total);
	if (err)
		goto err_ppgtt;
2683 2684

	i915->mm.aliasing_ppgtt = ppgtt;
2685

2686 2687
	GEM_BUG_ON(ggtt->vm.vma_ops.bind_vma != ggtt_bind_vma);
	ggtt->vm.vma_ops.bind_vma = aliasing_gtt_bind_vma;
2688

2689 2690
	GEM_BUG_ON(ggtt->vm.vma_ops.unbind_vma != ggtt_unbind_vma);
	ggtt->vm.vma_ops.unbind_vma = aliasing_gtt_unbind_vma;
2691

2692 2693 2694
	return 0;

err_ppgtt:
2695
	i915_ppgtt_put(ppgtt);
2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707
	return err;
}

void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
{
	struct i915_ggtt *ggtt = &i915->ggtt;
	struct i915_hw_ppgtt *ppgtt;

	ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
	if (!ppgtt)
		return;

2708
	i915_ppgtt_put(ppgtt);
2709

2710 2711
	ggtt->vm.vma_ops.bind_vma   = ggtt_bind_vma;
	ggtt->vm.vma_ops.unbind_vma = ggtt_unbind_vma;
2712 2713
}

2714
int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
2715
{
2716 2717 2718 2719 2720 2721 2722 2723 2724
	/* Let GEM Manage all of the aperture.
	 *
	 * However, leave one page at the end still bound to the scratch page.
	 * There are a number of places where the hardware apparently prefetches
	 * past the end of the object, and we've seen multiple hangs with the
	 * GPU head pointer stuck in a batchbuffer bound at the last page of the
	 * aperture.  One page should be enough to keep any prefetching inside
	 * of the aperture.
	 */
2725
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2726
	unsigned long hole_start, hole_end;
2727
	struct drm_mm_node *entry;
2728
	int ret;
2729

2730 2731 2732 2733 2734 2735 2736 2737 2738
	/*
	 * GuC requires all resources that we're sharing with it to be placed in
	 * non-WOPCM memory. If GuC is not present or not in use we still need a
	 * small bias as ring wraparound at offset 0 sometimes hangs. No idea
	 * why.
	 */
	ggtt->pin_bias = max_t(u32, I915_GTT_PAGE_SIZE,
			       intel_guc_reserved_gtt_size(&dev_priv->guc));

2739 2740 2741
	ret = intel_vgt_balloon(dev_priv);
	if (ret)
		return ret;
2742

2743
	/* Reserve a mappable slot for our lockless error capture */
2744
	ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, &ggtt->error_capture,
2745 2746 2747
					  PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
					  0, ggtt->mappable_end,
					  DRM_MM_INSERT_LOW);
2748 2749 2750
	if (ret)
		return ret;

2751
	/* Clear any non-preallocated blocks */
2752
	drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
2753 2754
		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
			      hole_start, hole_end);
2755 2756
		ggtt->vm.clear_range(&ggtt->vm, hole_start,
				     hole_end - hole_start);
2757 2758 2759
	}

	/* And finally clear the reserved guard page */
2760
	ggtt->vm.clear_range(&ggtt->vm, ggtt->vm.total - PAGE_SIZE, PAGE_SIZE);
2761

2762
	if (INTEL_PPGTT(dev_priv) == INTEL_PPGTT_ALIASING) {
2763
		ret = i915_gem_init_aliasing_ppgtt(dev_priv);
2764
		if (ret)
2765
			goto err;
2766 2767
	}

2768
	return 0;
2769 2770 2771 2772

err:
	drm_mm_remove_node(&ggtt->error_capture);
	return ret;
2773 2774
}

2775 2776
/**
 * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
2777
 * @dev_priv: i915 device
2778
 */
2779
void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
2780
{
2781
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2782
	struct i915_vma *vma, *vn;
2783
	struct pagevec *pvec;
2784

2785
	ggtt->vm.closed = true;
2786 2787

	mutex_lock(&dev_priv->drm.struct_mutex);
2788 2789
	i915_gem_fini_aliasing_ppgtt(dev_priv);

2790
	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link)
2791
		WARN_ON(i915_vma_unbind(vma));
2792

2793 2794 2795
	if (drm_mm_node_allocated(&ggtt->error_capture))
		drm_mm_remove_node(&ggtt->error_capture);

2796
	if (drm_mm_initialized(&ggtt->vm.mm)) {
2797
		intel_vgt_deballoon(dev_priv);
2798
		i915_address_space_fini(&ggtt->vm);
2799 2800
	}

2801
	ggtt->vm.cleanup(&ggtt->vm);
2802

2803
	pvec = &dev_priv->mm.wc_stash.pvec;
2804 2805 2806 2807 2808
	if (pvec->nr) {
		set_pages_array_wb(pvec->pages, pvec->nr);
		__pagevec_release(pvec);
	}

2809
	mutex_unlock(&dev_priv->drm.struct_mutex);
2810 2811

	arch_phys_wc_del(ggtt->mtrr);
2812
	io_mapping_fini(&ggtt->iomap);
2813

2814
	i915_gem_cleanup_stolen(dev_priv);
2815
}
2816

2817
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
2818 2819 2820 2821 2822 2823
{
	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
	snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
	return snb_gmch_ctl << 20;
}

2824
static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
2825 2826 2827 2828 2829
{
	bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
	bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
	if (bdw_gmch_ctl)
		bdw_gmch_ctl = 1 << bdw_gmch_ctl;
2830 2831

#ifdef CONFIG_X86_32
2832
	/* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * I915_GTT_PAGE_SIZE */
2833 2834 2835 2836
	if (bdw_gmch_ctl > 4)
		bdw_gmch_ctl = 4;
#endif

2837 2838 2839
	return bdw_gmch_ctl << 20;
}

2840
static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
2841 2842 2843 2844 2845 2846 2847 2848 2849 2850
{
	gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
	gmch_ctrl &= SNB_GMCH_GGMS_MASK;

	if (gmch_ctrl)
		return 1 << (20 + gmch_ctrl);

	return 0;
}

2851
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
B
Ben Widawsky 已提交
2852
{
2853
	struct drm_i915_private *dev_priv = ggtt->vm.i915;
2854
	struct pci_dev *pdev = dev_priv->drm.pdev;
2855
	phys_addr_t phys_addr;
2856
	int ret;
B
Ben Widawsky 已提交
2857 2858

	/* For Modern GENs the PTEs and register space are split in the BAR */
2859
	phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
B
Ben Widawsky 已提交
2860

I
Imre Deak 已提交
2861
	/*
2862 2863 2864
	 * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
	 * will be dropped. For WC mappings in general we have 64 byte burst
	 * writes when the WC buffer is flushed, so we can't use it, but have to
I
Imre Deak 已提交
2865 2866 2867
	 * resort to an uncached mapping. The WC issue is easily caught by the
	 * readback check when writing GTT PTE entries.
	 */
2868
	if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10)
2869
		ggtt->gsm = ioremap_nocache(phys_addr, size);
I
Imre Deak 已提交
2870
	else
2871
		ggtt->gsm = ioremap_wc(phys_addr, size);
2872
	if (!ggtt->gsm) {
2873
		DRM_ERROR("Failed to map the ggtt page table\n");
B
Ben Widawsky 已提交
2874 2875 2876
		return -ENOMEM;
	}

2877
	ret = setup_scratch_page(&ggtt->vm, GFP_DMA32);
2878
	if (ret) {
B
Ben Widawsky 已提交
2879 2880
		DRM_ERROR("Scratch setup failed\n");
		/* iounmap will also get called at remove, but meh */
2881
		iounmap(ggtt->gsm);
2882
		return ret;
B
Ben Widawsky 已提交
2883 2884
	}

2885 2886 2887 2888
	ggtt->vm.scratch_pte =
		ggtt->vm.pte_encode(ggtt->vm.scratch_page.daddr,
				    I915_CACHE_NONE, 0);

2889
	return 0;
B
Ben Widawsky 已提交
2890 2891
}

2892 2893
static struct intel_ppat_entry *
__alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value)
R
Rodrigo Vivi 已提交
2894
{
2895 2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927 2928 2929 2930 2931 2932 2933 2934 2935 2936 2937
	struct intel_ppat_entry *entry = &ppat->entries[index];

	GEM_BUG_ON(index >= ppat->max_entries);
	GEM_BUG_ON(test_bit(index, ppat->used));

	entry->ppat = ppat;
	entry->value = value;
	kref_init(&entry->ref);
	set_bit(index, ppat->used);
	set_bit(index, ppat->dirty);

	return entry;
}

static void __free_ppat_entry(struct intel_ppat_entry *entry)
{
	struct intel_ppat *ppat = entry->ppat;
	unsigned int index = entry - ppat->entries;

	GEM_BUG_ON(index >= ppat->max_entries);
	GEM_BUG_ON(!test_bit(index, ppat->used));

	entry->value = ppat->clear_value;
	clear_bit(index, ppat->used);
	set_bit(index, ppat->dirty);
}

/**
 * intel_ppat_get - get a usable PPAT entry
 * @i915: i915 device instance
 * @value: the PPAT value required by the caller
 *
 * The function tries to search if there is an existing PPAT entry which
 * matches with the required value. If perfectly matched, the existing PPAT
 * entry will be used. If only partially matched, it will try to check if
 * there is any available PPAT index. If yes, it will allocate a new PPAT
 * index for the required entry and update the HW. If not, the partially
 * matched entry will be used.
 */
const struct intel_ppat_entry *
intel_ppat_get(struct drm_i915_private *i915, u8 value)
{
	struct intel_ppat *ppat = &i915->ppat;
2938
	struct intel_ppat_entry *entry = NULL;
2939 2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957 2958 2959 2960
	unsigned int scanned, best_score;
	int i;

	GEM_BUG_ON(!ppat->max_entries);

	scanned = best_score = 0;
	for_each_set_bit(i, ppat->used, ppat->max_entries) {
		unsigned int score;

		score = ppat->match(ppat->entries[i].value, value);
		if (score > best_score) {
			entry = &ppat->entries[i];
			if (score == INTEL_PPAT_PERFECT_MATCH) {
				kref_get(&entry->ref);
				return entry;
			}
			best_score = score;
		}
		scanned++;
	}

	if (scanned == ppat->max_entries) {
2961
		if (!entry)
2962 2963 2964 2965 2966 2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998 2999 3000 3001 3002 3003 3004 3005 3006 3007 3008 3009 3010 3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026 3027 3028 3029 3030 3031 3032 3033 3034 3035 3036 3037
			return ERR_PTR(-ENOSPC);

		kref_get(&entry->ref);
		return entry;
	}

	i = find_first_zero_bit(ppat->used, ppat->max_entries);
	entry = __alloc_ppat_entry(ppat, i, value);
	ppat->update_hw(i915);
	return entry;
}

static void release_ppat(struct kref *kref)
{
	struct intel_ppat_entry *entry =
		container_of(kref, struct intel_ppat_entry, ref);
	struct drm_i915_private *i915 = entry->ppat->i915;

	__free_ppat_entry(entry);
	entry->ppat->update_hw(i915);
}

/**
 * intel_ppat_put - put back the PPAT entry got from intel_ppat_get()
 * @entry: an intel PPAT entry
 *
 * Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the
 * entry is dynamically allocated, its reference count will be decreased. Once
 * the reference count becomes into zero, the PPAT index becomes free again.
 */
void intel_ppat_put(const struct intel_ppat_entry *entry)
{
	struct intel_ppat *ppat = entry->ppat;
	unsigned int index = entry - ppat->entries;

	GEM_BUG_ON(!ppat->max_entries);

	kref_put(&ppat->entries[index].ref, release_ppat);
}

static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv)
{
	struct intel_ppat *ppat = &dev_priv->ppat;
	int i;

	for_each_set_bit(i, ppat->dirty, ppat->max_entries) {
		I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value);
		clear_bit(i, ppat->dirty);
	}
}

static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv)
{
	struct intel_ppat *ppat = &dev_priv->ppat;
	u64 pat = 0;
	int i;

	for (i = 0; i < ppat->max_entries; i++)
		pat |= GEN8_PPAT(i, ppat->entries[i].value);

	bitmap_clear(ppat->dirty, 0, ppat->max_entries);

	I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
	I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
}

static unsigned int bdw_private_pat_match(u8 src, u8 dst)
{
	unsigned int score = 0;
	enum {
		AGE_MATCH = BIT(0),
		TC_MATCH = BIT(1),
		CA_MATCH = BIT(2),
	};

	/* Cache attribute has to be matched. */
3038
	if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst))
3039 3040 3041 3042 3043 3044 3045 3046 3047 3048 3049 3050 3051 3052 3053 3054 3055 3056 3057 3058 3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069 3070 3071 3072 3073 3074 3075
		return 0;

	score |= CA_MATCH;

	if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst))
		score |= TC_MATCH;

	if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst))
		score |= AGE_MATCH;

	if (score == (AGE_MATCH | TC_MATCH | CA_MATCH))
		return INTEL_PPAT_PERFECT_MATCH;

	return score;
}

static unsigned int chv_private_pat_match(u8 src, u8 dst)
{
	return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ?
		INTEL_PPAT_PERFECT_MATCH : 0;
}

static void cnl_setup_private_ppat(struct intel_ppat *ppat)
{
	ppat->max_entries = 8;
	ppat->update_hw = cnl_private_pat_update_hw;
	ppat->match = bdw_private_pat_match;
	ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);

	__alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);
	__alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
	__alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
	__alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);
	__alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
	__alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
	__alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
	__alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
R
Rodrigo Vivi 已提交
3076 3077
}

B
Ben Widawsky 已提交
3078 3079 3080
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
 * bits. When using advanced contexts each context stores its own PAT, but
 * writing this data shouldn't be harmful even in those cases. */
3081
static void bdw_setup_private_ppat(struct intel_ppat *ppat)
B
Ben Widawsky 已提交
3082
{
3083 3084 3085 3086
	ppat->max_entries = 8;
	ppat->update_hw = bdw_private_pat_update_hw;
	ppat->match = bdw_private_pat_match;
	ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
B
Ben Widawsky 已提交
3087

3088
	if (!HAS_PPGTT(ppat->i915)) {
3089 3090 3091 3092 3093 3094 3095 3096 3097 3098 3099 3100 3101
		/* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
		 * so RTL will always use the value corresponding to
		 * pat_sel = 000".
		 * So let's disable cache for GGTT to avoid screen corruptions.
		 * MOCS still can be used though.
		 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
		 * before this patch, i.e. the same uncached + snooping access
		 * like on gen6/7 seems to be in effect.
		 * - So this just fixes blitter/render access. Again it looks
		 * like it's not just uncached access, but uncached + snooping.
		 * So we can still hold onto all our assumptions wrt cpu
		 * clflushing on LLC machines.
		 */
3102 3103 3104
		__alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
		return;
	}
3105

3106 3107 3108 3109 3110 3111 3112 3113
	__alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);      /* for normal objects, no eLLC */
	__alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);  /* for something pointing to ptes? */
	__alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);  /* for scanout with eLLC */
	__alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);                      /* Uncached objects, mostly for scanout */
	__alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
	__alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
	__alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
	__alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
B
Ben Widawsky 已提交
3114 3115
}

3116
static void chv_setup_private_ppat(struct intel_ppat *ppat)
3117
{
3118 3119 3120 3121
	ppat->max_entries = 8;
	ppat->update_hw = bdw_private_pat_update_hw;
	ppat->match = chv_private_pat_match;
	ppat->clear_value = CHV_PPAT_SNOOP;
3122 3123 3124 3125 3126 3127 3128

	/*
	 * Map WB on BDW to snooped on CHV.
	 *
	 * Only the snoop bit has meaning for CHV, the rest is
	 * ignored.
	 *
3129 3130 3131 3132 3133 3134 3135 3136 3137 3138 3139
	 * The hardware will never snoop for certain types of accesses:
	 * - CPU GTT (GMADR->GGTT->no snoop->memory)
	 * - PPGTT page tables
	 * - some other special cycles
	 *
	 * As with BDW, we also need to consider the following for GT accesses:
	 * "For GGTT, there is NO pat_sel[2:0] from the entry,
	 * so RTL will always use the value corresponding to
	 * pat_sel = 000".
	 * Which means we must set the snoop bit in PAT entry 0
	 * in order to keep the global status page working.
3140 3141
	 */

3142 3143 3144 3145 3146 3147 3148 3149
	__alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP);
	__alloc_ppat_entry(ppat, 1, 0);
	__alloc_ppat_entry(ppat, 2, 0);
	__alloc_ppat_entry(ppat, 3, 0);
	__alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP);
	__alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP);
	__alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP);
	__alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP);
3150 3151
}

3152 3153 3154 3155 3156
static void gen6_gmch_remove(struct i915_address_space *vm)
{
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);

	iounmap(ggtt->gsm);
3157
	cleanup_scratch_page(vm);
3158 3159
}

3160 3161
static void setup_private_pat(struct drm_i915_private *dev_priv)
{
3162 3163 3164 3165 3166
	struct intel_ppat *ppat = &dev_priv->ppat;
	int i;

	ppat->i915 = dev_priv;

3167
	if (INTEL_GEN(dev_priv) >= 10)
3168
		cnl_setup_private_ppat(ppat);
3169
	else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
3170
		chv_setup_private_ppat(ppat);
3171
	else
3172 3173 3174 3175 3176 3177 3178 3179 3180 3181 3182
		bdw_setup_private_ppat(ppat);

	GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES);

	for_each_clear_bit(i, ppat->used, ppat->max_entries) {
		ppat->entries[i].value = ppat->clear_value;
		ppat->entries[i].ppat = ppat;
		set_bit(i, ppat->dirty);
	}

	ppat->update_hw(dev_priv);
3183 3184
}

3185
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
B
Ben Widawsky 已提交
3186
{
3187
	struct drm_i915_private *dev_priv = ggtt->vm.i915;
3188
	struct pci_dev *pdev = dev_priv->drm.pdev;
3189
	unsigned int size;
B
Ben Widawsky 已提交
3190
	u16 snb_gmch_ctl;
3191
	int err;
B
Ben Widawsky 已提交
3192 3193

	/* TODO: We're not aware of mappable constraints on gen8 yet */
3194 3195 3196 3197
	ggtt->gmadr =
		(struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
						 pci_resource_len(pdev, 2));
	ggtt->mappable_end = resource_size(&ggtt->gmadr);
B
Ben Widawsky 已提交
3198

3199 3200 3201 3202 3203
	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
	if (!err)
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
	if (err)
		DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
B
Ben Widawsky 已提交
3204

3205
	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3206
	if (IS_CHERRYVIEW(dev_priv))
3207
		size = chv_get_total_gtt_size(snb_gmch_ctl);
3208
	else
3209
		size = gen8_get_total_gtt_size(snb_gmch_ctl);
B
Ben Widawsky 已提交
3210

3211
	ggtt->vm.total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
3212 3213 3214
	ggtt->vm.cleanup = gen6_gmch_remove;
	ggtt->vm.insert_page = gen8_ggtt_insert_page;
	ggtt->vm.clear_range = nop_clear_range;
3215
	if (intel_scanout_needs_vtd_wa(dev_priv))
3216
		ggtt->vm.clear_range = gen8_ggtt_clear_range;
3217

3218
	ggtt->vm.insert_entries = gen8_ggtt_insert_entries;
3219

3220
	/* Serialize GTT updates with aperture access on BXT if VT-d is on. */
3221 3222
	if (intel_ggtt_update_needs_vtd_wa(dev_priv) ||
	    IS_CHERRYVIEW(dev_priv) /* fails with concurrent use/update */) {
3223 3224 3225 3226
		ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
		ggtt->vm.insert_page    = bxt_vtd_ggtt_insert_page__BKL;
		if (ggtt->vm.clear_range != nop_clear_range)
			ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
3227 3228 3229 3230 3231

		/* Prevent recursively calling stop_machine() and deadlocks. */
		dev_info(dev_priv->drm.dev,
			 "Disabling error capture for VT-d workaround\n");
		i915_disable_error_state(dev_priv, -ENODEV);
3232 3233
	}

3234 3235
	ggtt->invalidate = gen6_ggtt_invalidate;

3236 3237 3238 3239 3240
	ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
	ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
	ggtt->vm.vma_ops.clear_pages = clear_pages;

3241 3242
	ggtt->vm.pte_encode = gen8_pte_encode;

3243 3244
	setup_private_pat(dev_priv);

3245
	return ggtt_probe_common(ggtt, size);
B
Ben Widawsky 已提交
3246 3247
}

3248
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
3249
{
3250
	struct drm_i915_private *dev_priv = ggtt->vm.i915;
3251
	struct pci_dev *pdev = dev_priv->drm.pdev;
3252
	unsigned int size;
3253
	u16 snb_gmch_ctl;
3254
	int err;
3255

3256 3257 3258 3259
	ggtt->gmadr =
		(struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
						 pci_resource_len(pdev, 2));
	ggtt->mappable_end = resource_size(&ggtt->gmadr);
3260

3261 3262
	/* 64/512MB is the current min/max we actually know of, but this is just
	 * a coarse sanity check.
3263
	 */
3264
	if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
3265
		DRM_ERROR("Unknown GMADR size (%pa)\n", &ggtt->mappable_end);
3266
		return -ENXIO;
3267 3268
	}

3269 3270 3271 3272 3273
	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
	if (!err)
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
	if (err)
		DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
3274
	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3275

3276
	size = gen6_get_total_gtt_size(snb_gmch_ctl);
3277
	ggtt->vm.total = (size / sizeof(gen6_pte_t)) * I915_GTT_PAGE_SIZE;
3278

3279 3280 3281 3282
	ggtt->vm.clear_range = gen6_ggtt_clear_range;
	ggtt->vm.insert_page = gen6_ggtt_insert_page;
	ggtt->vm.insert_entries = gen6_ggtt_insert_entries;
	ggtt->vm.cleanup = gen6_gmch_remove;
3283

3284 3285
	ggtt->invalidate = gen6_ggtt_invalidate;

3286
	if (HAS_EDRAM(dev_priv))
3287
		ggtt->vm.pte_encode = iris_pte_encode;
3288
	else if (IS_HASWELL(dev_priv))
3289
		ggtt->vm.pte_encode = hsw_pte_encode;
3290
	else if (IS_VALLEYVIEW(dev_priv))
3291
		ggtt->vm.pte_encode = byt_pte_encode;
3292
	else if (INTEL_GEN(dev_priv) >= 7)
3293
		ggtt->vm.pte_encode = ivb_pte_encode;
3294
	else
3295
		ggtt->vm.pte_encode = snb_pte_encode;
3296

3297 3298 3299 3300 3301
	ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
	ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
	ggtt->vm.vma_ops.clear_pages = clear_pages;

3302
	return ggtt_probe_common(ggtt, size);
3303 3304
}

3305
static void i915_gmch_remove(struct i915_address_space *vm)
3306
{
3307
	intel_gmch_remove();
3308
}
3309

3310
static int i915_gmch_probe(struct i915_ggtt *ggtt)
3311
{
3312
	struct drm_i915_private *dev_priv = ggtt->vm.i915;
3313
	phys_addr_t gmadr_base;
3314 3315
	int ret;

3316
	ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
3317 3318 3319 3320 3321
	if (!ret) {
		DRM_ERROR("failed to set up gmch\n");
		return -EIO;
	}

3322
	intel_gtt_get(&ggtt->vm.total, &gmadr_base, &ggtt->mappable_end);
3323

3324 3325 3326 3327
	ggtt->gmadr =
		(struct resource) DEFINE_RES_MEM(gmadr_base,
						 ggtt->mappable_end);

3328
	ggtt->do_idle_maps = needs_idle_maps(dev_priv);
3329 3330 3331 3332
	ggtt->vm.insert_page = i915_ggtt_insert_page;
	ggtt->vm.insert_entries = i915_ggtt_insert_entries;
	ggtt->vm.clear_range = i915_ggtt_clear_range;
	ggtt->vm.cleanup = i915_gmch_remove;
3333

3334 3335
	ggtt->invalidate = gmch_ggtt_invalidate;

3336 3337 3338 3339 3340
	ggtt->vm.vma_ops.bind_vma    = ggtt_bind_vma;
	ggtt->vm.vma_ops.unbind_vma  = ggtt_unbind_vma;
	ggtt->vm.vma_ops.set_pages   = ggtt_set_pages;
	ggtt->vm.vma_ops.clear_pages = clear_pages;

3341
	if (unlikely(ggtt->do_idle_maps))
3342 3343
		DRM_INFO("applying Ironlake quirks for intel_iommu\n");

3344 3345 3346
	return 0;
}

3347
/**
3348
 * i915_ggtt_probe_hw - Probe GGTT hardware location
3349
 * @dev_priv: i915 device
3350
 */
3351
int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
3352
{
3353
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
3354 3355
	int ret;

3356 3357
	ggtt->vm.i915 = dev_priv;
	ggtt->vm.dma = &dev_priv->drm.pdev->dev;
3358

3359 3360 3361 3362 3363 3364
	if (INTEL_GEN(dev_priv) <= 5)
		ret = i915_gmch_probe(ggtt);
	else if (INTEL_GEN(dev_priv) < 8)
		ret = gen6_gmch_probe(ggtt);
	else
		ret = gen8_gmch_probe(ggtt);
3365
	if (ret)
3366 3367
		return ret;

3368 3369 3370 3371 3372
	/* Trim the GGTT to fit the GuC mappable upper range (when enabled).
	 * This is easier than doing range restriction on the fly, as we
	 * currently don't have any bits spare to pass in this upper
	 * restriction!
	 */
3373
	if (USES_GUC(dev_priv)) {
3374 3375 3376
		ggtt->vm.total = min_t(u64, ggtt->vm.total, GUC_GGTT_TOP);
		ggtt->mappable_end =
			min_t(u64, ggtt->mappable_end, ggtt->vm.total);
3377 3378
	}

3379
	if ((ggtt->vm.total - 1) >> 32) {
3380
		DRM_ERROR("We never expected a Global GTT with more than 32bits"
3381
			  " of address space! Found %lldM!\n",
3382 3383 3384 3385
			  ggtt->vm.total >> 20);
		ggtt->vm.total = 1ULL << 32;
		ggtt->mappable_end =
			min_t(u64, ggtt->mappable_end, ggtt->vm.total);
3386 3387
	}

3388
	if (ggtt->mappable_end > ggtt->vm.total) {
3389
		DRM_ERROR("mappable aperture extends past end of GGTT,"
3390
			  " aperture=%pa, total=%llx\n",
3391 3392
			  &ggtt->mappable_end, ggtt->vm.total);
		ggtt->mappable_end = ggtt->vm.total;
3393 3394
	}

3395
	/* GMADR is the PCI mmio aperture into the global GTT. */
3396
	DRM_DEBUG_DRIVER("GGTT size = %lluM\n", ggtt->vm.total >> 20);
3397
	DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
3398
	DRM_DEBUG_DRIVER("DSM size = %lluM\n",
3399
			 (u64)resource_size(&intel_graphics_stolen_res) >> 20);
3400
	if (intel_vtd_active())
3401
		DRM_INFO("VT-d active for gfx access\n");
3402 3403

	return 0;
3404 3405 3406 3407
}

/**
 * i915_ggtt_init_hw - Initialize GGTT hardware
3408
 * @dev_priv: i915 device
3409
 */
3410
int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
3411 3412 3413 3414
{
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
	int ret;

3415 3416
	stash_init(&dev_priv->mm.wc_stash);

3417 3418 3419 3420
	/* Note that we use page colouring to enforce a guard page at the
	 * end of the address space. This is required as the CS may prefetch
	 * beyond the end of the batch buffer, across the page boundary,
	 * and beyond the end of the GTT if we do not provide a guard.
3421
	 */
C
Chris Wilson 已提交
3422
	mutex_lock(&dev_priv->drm.struct_mutex);
3423
	i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT);
3424

3425 3426
	ggtt->vm.is_ggtt = true;

3427 3428 3429
	/* Only VLV supports read-only GGTT mappings */
	ggtt->vm.has_read_only = IS_VALLEYVIEW(dev_priv);

3430
	if (!HAS_LLC(dev_priv) && !HAS_PPGTT(dev_priv))
3431
		ggtt->vm.mm.color_adjust = i915_gtt_color_adjust;
C
Chris Wilson 已提交
3432
	mutex_unlock(&dev_priv->drm.struct_mutex);
3433

3434 3435
	if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
				dev_priv->ggtt.gmadr.start,
3436
				dev_priv->ggtt.mappable_end)) {
3437 3438 3439 3440
		ret = -EIO;
		goto out_gtt_cleanup;
	}

3441
	ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
3442

3443 3444 3445 3446
	/*
	 * Initialise stolen early so that we may reserve preallocated
	 * objects for the BIOS to KMS transition.
	 */
3447
	ret = i915_gem_init_stolen(dev_priv);
3448 3449 3450 3451
	if (ret)
		goto out_gtt_cleanup;

	return 0;
3452 3453

out_gtt_cleanup:
3454
	ggtt->vm.cleanup(&ggtt->vm);
3455
	return ret;
3456
}
3457

3458
int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
3459
{
3460
	if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
3461 3462 3463 3464 3465
		return -EIO;

	return 0;
}

3466 3467
void i915_ggtt_enable_guc(struct drm_i915_private *i915)
{
3468 3469
	GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);

3470
	i915->ggtt.invalidate = guc_ggtt_invalidate;
3471 3472

	i915_ggtt_invalidate(i915);
3473 3474 3475 3476
}

void i915_ggtt_disable_guc(struct drm_i915_private *i915)
{
3477 3478 3479 3480
	/* XXX Temporary pardon for error unload */
	if (i915->ggtt.invalidate == gen6_ggtt_invalidate)
		return;

3481 3482 3483 3484
	/* We should only be called after i915_ggtt_enable_guc() */
	GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);

	i915->ggtt.invalidate = gen6_ggtt_invalidate;
3485 3486

	i915_ggtt_invalidate(i915);
3487 3488
}

3489
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
3490
{
3491
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
3492
	struct i915_vma *vma, *vn;
3493

3494
	i915_check_and_clear_faults(dev_priv);
3495

3496 3497
	mutex_lock(&ggtt->vm.mutex);

3498
	/* First fill our portion of the GTT with scratch pages */
3499 3500
	ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
	ggtt->vm.closed = true; /* skip rewriting PTE on VMA unbind */
3501 3502

	/* clflush objects bound into the GGTT and rebind them. */
3503
	list_for_each_entry_safe(vma, vn, &ggtt->vm.bound_list, vm_link) {
3504
		struct drm_i915_gem_object *obj = vma->obj;
3505

3506 3507
		if (!(vma->flags & I915_VMA_GLOBAL_BIND))
			continue;
3508

3509 3510
		mutex_unlock(&ggtt->vm.mutex);

3511
		if (!i915_vma_unbind(vma))
3512
			goto lock;
3513

3514 3515 3516 3517 3518
		WARN_ON(i915_vma_bind(vma,
				      obj ? obj->cache_level : 0,
				      PIN_UPDATE));
		if (obj)
			WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
3519 3520 3521

lock:
		mutex_lock(&ggtt->vm.mutex);
3522
	}
3523

3524
	ggtt->vm.closed = false;
3525
	i915_ggtt_invalidate(dev_priv);
3526

3527 3528
	mutex_unlock(&ggtt->vm.mutex);

3529
	if (INTEL_GEN(dev_priv) >= 8) {
3530
		struct intel_ppat *ppat = &dev_priv->ppat;
3531

3532 3533
		bitmap_set(ppat->dirty, 0, ppat->max_entries);
		dev_priv->ppat.update_hw(dev_priv);
3534 3535 3536 3537
		return;
	}
}

3538
static struct scatterlist *
3539
rotate_pages(struct drm_i915_gem_object *obj, unsigned int offset,
3540
	     unsigned int width, unsigned int height,
3541
	     unsigned int stride,
3542
	     struct sg_table *st, struct scatterlist *sg)
3543 3544 3545 3546 3547
{
	unsigned int column, row;
	unsigned int src_idx;

	for (column = 0; column < width; column++) {
3548
		src_idx = stride * (height - 1) + column + offset;
3549 3550 3551 3552 3553 3554
		for (row = 0; row < height; row++) {
			st->nents++;
			/* We don't need the pages, but need to initialize
			 * the entries so the sg list can be happily traversed.
			 * The only thing we need are DMA addresses.
			 */
3555
			sg_set_page(sg, NULL, I915_GTT_PAGE_SIZE, 0);
3556 3557
			sg_dma_address(sg) =
				i915_gem_object_get_dma_address(obj, src_idx);
3558
			sg_dma_len(sg) = I915_GTT_PAGE_SIZE;
3559
			sg = sg_next(sg);
3560
			src_idx -= stride;
3561 3562
		}
	}
3563 3564

	return sg;
3565 3566
}

3567 3568 3569
static noinline struct sg_table *
intel_rotate_pages(struct intel_rotation_info *rot_info,
		   struct drm_i915_gem_object *obj)
3570
{
3571
	unsigned int size = intel_rotation_info_size(rot_info);
3572
	struct sg_table *st;
3573
	struct scatterlist *sg;
3574
	int ret = -ENOMEM;
3575
	int i;
3576 3577 3578 3579 3580 3581

	/* Allocate target SG list. */
	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (!st)
		goto err_st_alloc;

3582
	ret = sg_alloc_table(st, size, GFP_KERNEL);
3583 3584 3585
	if (ret)
		goto err_sg_alloc;

3586 3587 3588
	st->nents = 0;
	sg = st->sgl;

3589
	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
3590
		sg = rotate_pages(obj, rot_info->plane[i].offset,
3591 3592
				  rot_info->plane[i].width, rot_info->plane[i].height,
				  rot_info->plane[i].stride, st, sg);
3593 3594
	}

3595 3596 3597 3598 3599 3600
	return st;

err_sg_alloc:
	kfree(st);
err_st_alloc:

3601 3602
	DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
			 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3603

3604 3605
	return ERR_PTR(ret);
}
3606

3607
static noinline struct sg_table *
3608 3609 3610 3611
intel_partial_pages(const struct i915_ggtt_view *view,
		    struct drm_i915_gem_object *obj)
{
	struct sg_table *st;
3612
	struct scatterlist *sg, *iter;
3613
	unsigned int count = view->partial.size;
3614
	unsigned int offset;
3615 3616 3617 3618 3619 3620
	int ret = -ENOMEM;

	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (!st)
		goto err_st_alloc;

3621
	ret = sg_alloc_table(st, count, GFP_KERNEL);
3622 3623 3624
	if (ret)
		goto err_sg_alloc;

3625
	iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
3626 3627
	GEM_BUG_ON(!iter);

3628 3629
	sg = st->sgl;
	st->nents = 0;
3630 3631
	do {
		unsigned int len;
3632

3633 3634 3635 3636 3637 3638
		len = min(iter->length - (offset << PAGE_SHIFT),
			  count << PAGE_SHIFT);
		sg_set_page(sg, NULL, len, 0);
		sg_dma_address(sg) =
			sg_dma_address(iter) + (offset << PAGE_SHIFT);
		sg_dma_len(sg) = len;
3639 3640

		st->nents++;
3641 3642 3643
		count -= len >> PAGE_SHIFT;
		if (count == 0) {
			sg_mark_end(sg);
3644 3645
			i915_sg_trim(st); /* Drop any unused tail entries. */

3646 3647
			return st;
		}
3648

3649 3650 3651 3652
		sg = __sg_next(sg);
		iter = __sg_next(iter);
		offset = 0;
	} while (1);
3653 3654 3655 3656 3657 3658 3659

err_sg_alloc:
	kfree(st);
err_st_alloc:
	return ERR_PTR(ret);
}

3660
static int
3661
i915_get_ggtt_vma_pages(struct i915_vma *vma)
3662
{
3663
	int ret;
3664

3665 3666 3667 3668 3669 3670 3671
	/* The vma->pages are only valid within the lifespan of the borrowed
	 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
	 * must be the vma->pages. A simple rule is that vma->pages must only
	 * be accessed when the obj->mm.pages are pinned.
	 */
	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));

3672
	switch (vma->ggtt_view.type) {
3673 3674 3675
	default:
		GEM_BUG_ON(vma->ggtt_view.type);
		/* fall through */
3676 3677
	case I915_GGTT_VIEW_NORMAL:
		vma->pages = vma->obj->mm.pages;
3678 3679
		return 0;

3680
	case I915_GGTT_VIEW_ROTATED:
3681
		vma->pages =
3682 3683 3684 3685
			intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
		break;

	case I915_GGTT_VIEW_PARTIAL:
3686
		vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
3687 3688
		break;
	}
3689

3690
	ret = 0;
3691
	if (IS_ERR(vma->pages)) {
3692 3693
		ret = PTR_ERR(vma->pages);
		vma->pages = NULL;
3694 3695
		DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
			  vma->ggtt_view.type, ret);
3696
	}
3697
	return ret;
3698 3699
}

3700 3701
/**
 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
3702 3703 3704 3705 3706 3707 3708 3709 3710 3711
 * @vm: the &struct i915_address_space
 * @node: the &struct drm_mm_node (typically i915_vma.mode)
 * @size: how much space to allocate inside the GTT,
 *        must be #I915_GTT_PAGE_SIZE aligned
 * @offset: where to insert inside the GTT,
 *          must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
 *          (@offset + @size) must fit within the address space
 * @color: color to apply to node, if this node is not from a VMA,
 *         color must be #I915_COLOR_UNEVICTABLE
 * @flags: control search and eviction behaviour
3712 3713 3714 3715 3716 3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735
 *
 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
 * the address space (using @size and @color). If the @node does not fit, it
 * tries to evict any overlapping nodes from the GTT, including any
 * neighbouring nodes if the colors do not match (to ensure guard pages between
 * differing domains). See i915_gem_evict_for_node() for the gory details
 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
 * evicting active overlapping objects, and any overlapping node that is pinned
 * or marked as unevictable will also result in failure.
 *
 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
 * asked to wait for eviction and interrupted.
 */
int i915_gem_gtt_reserve(struct i915_address_space *vm,
			 struct drm_mm_node *node,
			 u64 size, u64 offset, unsigned long color,
			 unsigned int flags)
{
	int err;

	GEM_BUG_ON(!size);
	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
	GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
	GEM_BUG_ON(range_overflows(offset, size, vm->total));
3736
	GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
3737
	GEM_BUG_ON(drm_mm_node_allocated(node));
3738 3739 3740 3741 3742 3743 3744 3745 3746

	node->size = size;
	node->start = offset;
	node->color = color;

	err = drm_mm_reserve_node(&vm->mm, node);
	if (err != -ENOSPC)
		return err;

3747 3748 3749
	if (flags & PIN_NOEVICT)
		return -ENOSPC;

3750 3751 3752 3753 3754 3755 3756
	err = i915_gem_evict_for_node(vm, node, flags);
	if (err == 0)
		err = drm_mm_reserve_node(&vm->mm, node);

	return err;
}

3757 3758 3759 3760 3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781
static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
{
	u64 range, addr;

	GEM_BUG_ON(range_overflows(start, len, end));
	GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));

	range = round_down(end - len, align) - round_up(start, align);
	if (range) {
		if (sizeof(unsigned long) == sizeof(u64)) {
			addr = get_random_long();
		} else {
			addr = get_random_int();
			if (range > U32_MAX) {
				addr <<= 32;
				addr |= get_random_int();
			}
		}
		div64_u64_rem(addr, range, &addr);
		start += addr;
	}

	return round_up(start, align);
}

3782 3783
/**
 * i915_gem_gtt_insert - insert a node into an address_space (GTT)
3784 3785 3786 3787 3788 3789 3790 3791 3792
 * @vm: the &struct i915_address_space
 * @node: the &struct drm_mm_node (typically i915_vma.node)
 * @size: how much space to allocate inside the GTT,
 *        must be #I915_GTT_PAGE_SIZE aligned
 * @alignment: required alignment of starting offset, may be 0 but
 *             if specified, this must be a power-of-two and at least
 *             #I915_GTT_MIN_ALIGNMENT
 * @color: color to apply to node
 * @start: start of any range restriction inside GTT (0 for all),
3793
 *         must be #I915_GTT_PAGE_SIZE aligned
3794 3795 3796
 * @end: end of any range restriction inside GTT (U64_MAX for all),
 *       must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
 * @flags: control search and eviction behaviour
3797 3798 3799 3800 3801 3802
 *
 * i915_gem_gtt_insert() first searches for an available hole into which
 * is can insert the node. The hole address is aligned to @alignment and
 * its @size must then fit entirely within the [@start, @end] bounds. The
 * nodes on either side of the hole must match @color, or else a guard page
 * will be inserted between the two nodes (or the node evicted). If no
3803 3804
 * suitable hole is found, first a victim is randomly selected and tested
 * for eviction, otherwise then the LRU list of objects within the GTT
3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818 3819 3820
 * is scanned to find the first set of replacement nodes to create the hole.
 * Those old overlapping nodes are evicted from the GTT (and so must be
 * rebound before any future use). Any node that is currently pinned cannot
 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
 * active and #PIN_NONBLOCK is specified, that node is also skipped when
 * searching for an eviction candidate. See i915_gem_evict_something() for
 * the gory details on the eviction algorithm.
 *
 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
 * asked to wait for eviction and interrupted.
 */
int i915_gem_gtt_insert(struct i915_address_space *vm,
			struct drm_mm_node *node,
			u64 size, u64 alignment, unsigned long color,
			u64 start, u64 end, unsigned int flags)
{
3821
	enum drm_mm_insert_mode mode;
3822
	u64 offset;
3823 3824 3825 3826 3827 3828 3829 3830 3831 3832
	int err;

	lockdep_assert_held(&vm->i915->drm.struct_mutex);
	GEM_BUG_ON(!size);
	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
	GEM_BUG_ON(alignment && !is_power_of_2(alignment));
	GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
	GEM_BUG_ON(start >= end);
	GEM_BUG_ON(start > 0  && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
	GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
3833
	GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
3834
	GEM_BUG_ON(drm_mm_node_allocated(node));
3835 3836 3837 3838 3839 3840 3841

	if (unlikely(range_overflows(start, size, end)))
		return -ENOSPC;

	if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
		return -ENOSPC;

3842 3843
	mode = DRM_MM_INSERT_BEST;
	if (flags & PIN_HIGH)
3844
		mode = DRM_MM_INSERT_HIGHEST;
3845 3846
	if (flags & PIN_MAPPABLE)
		mode = DRM_MM_INSERT_LOW;
3847 3848 3849 3850 3851 3852 3853 3854 3855 3856 3857

	/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
	 * so we know that we always have a minimum alignment of 4096.
	 * The drm_mm range manager is optimised to return results
	 * with zero alignment, so where possible use the optimal
	 * path.
	 */
	BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
	if (alignment <= I915_GTT_MIN_ALIGNMENT)
		alignment = 0;

3858 3859 3860
	err = drm_mm_insert_node_in_range(&vm->mm, node,
					  size, alignment, color,
					  start, end, mode);
3861 3862 3863
	if (err != -ENOSPC)
		return err;

3864 3865 3866 3867 3868 3869 3870 3871 3872
	if (mode & DRM_MM_INSERT_ONCE) {
		err = drm_mm_insert_node_in_range(&vm->mm, node,
						  size, alignment, color,
						  start, end,
						  DRM_MM_INSERT_BEST);
		if (err != -ENOSPC)
			return err;
	}

3873 3874 3875
	if (flags & PIN_NOEVICT)
		return -ENOSPC;

3876 3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892 3893 3894 3895 3896 3897 3898 3899 3900 3901 3902 3903 3904
	/* No free space, pick a slot at random.
	 *
	 * There is a pathological case here using a GTT shared between
	 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
	 *
	 *    |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
	 *         (64k objects)             (448k objects)
	 *
	 * Now imagine that the eviction LRU is ordered top-down (just because
	 * pathology meets real life), and that we need to evict an object to
	 * make room inside the aperture. The eviction scan then has to walk
	 * the 448k list before it finds one within range. And now imagine that
	 * it has to search for a new hole between every byte inside the memcpy,
	 * for several simultaneous clients.
	 *
	 * On a full-ppgtt system, if we have run out of available space, there
	 * will be lots and lots of objects in the eviction list! Again,
	 * searching that LRU list may be slow if we are also applying any
	 * range restrictions (e.g. restriction to low 4GiB) and so, for
	 * simplicity and similarilty between different GTT, try the single
	 * random replacement first.
	 */
	offset = random_offset(start, end,
			       size, alignment ?: I915_GTT_MIN_ALIGNMENT);
	err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
	if (err != -ENOSPC)
		return err;

	/* Randomly selected placement is pinned, do a search */
3905 3906 3907 3908 3909
	err = i915_gem_evict_something(vm, size, alignment, color,
				       start, end, flags);
	if (err)
		return err;

3910 3911 3912
	return drm_mm_insert_node_in_range(&vm->mm, node,
					   size, alignment, color,
					   start, end, DRM_MM_INSERT_EVICT);
3913
}
3914 3915 3916

#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
#include "selftests/mock_gtt.c"
3917
#include "selftests/i915_gem_gtt.c"
3918
#endif