i915_gem_gtt.c 100.4 KB
Newer Older
1 2
/*
 * Copyright © 2010 Daniel Vetter
3
 * Copyright © 2011-2014 Intel Corporation
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

26
#include <linux/log2.h>
27
#include <linux/random.h>
28
#include <linux/seq_file.h>
29
#include <linux/stop_machine.h>
30

31 32
#include <drm/drmP.h>
#include <drm/i915_drm.h>
33

34
#include "i915_drv.h"
35
#include "i915_vgpu.h"
36 37
#include "i915_trace.h"
#include "intel_drv.h"
38
#include "intel_frontbuffer.h"
39

40 41
#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)

42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
/**
 * DOC: Global GTT views
 *
 * Background and previous state
 *
 * Historically objects could exists (be bound) in global GTT space only as
 * singular instances with a view representing all of the object's backing pages
 * in a linear fashion. This view will be called a normal view.
 *
 * To support multiple views of the same object, where the number of mapped
 * pages is not equal to the backing store, or where the layout of the pages
 * is not linear, concept of a GGTT view was added.
 *
 * One example of an alternative view is a stereo display driven by a single
 * image. In this case we would have a framebuffer looking like this
 * (2x2 pages):
 *
 *    12
 *    34
 *
 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
 * rendering. In contrast, fed to the display engine would be an alternative
 * view which could look something like this:
 *
 *   1212
 *   3434
 *
 * In this example both the size and layout of pages in the alternative view is
 * different from the normal view.
 *
 * Implementation and usage
 *
 * GGTT views are implemented using VMAs and are distinguished via enum
 * i915_ggtt_view_type and struct i915_ggtt_view.
 *
 * A new flavour of core GEM functions which work with GGTT bound objects were
78 79 80
 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
 * renaming  in large amounts of code. They take the struct i915_ggtt_view
 * parameter encapsulating all metadata required to implement a view.
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
 *
 * As a helper for callers which are only interested in the normal view,
 * globally const i915_ggtt_view_normal singleton instance exists. All old core
 * GEM API functions, the ones not taking the view parameter, are operating on,
 * or with the normal GGTT view.
 *
 * Code wanting to add or use a new GGTT view needs to:
 *
 * 1. Add a new enum with a suitable name.
 * 2. Extend the metadata in the i915_ggtt_view structure if required.
 * 3. Add support to i915_get_vma_pages().
 *
 * New views are required to build a scatter-gather table from within the
 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
 * exists for the lifetime of an VMA.
 *
 * Core API is designed to have copy semantics which means that passed in
 * struct i915_ggtt_view does not need to be persistent (left around after
 * calling the core API functions).
 *
 */

103 104 105
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);

106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
{
	/* Note that as an uncached mmio write, this should flush the
	 * WCB of the writes into the GGTT before it triggers the invalidate.
	 */
	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
}

static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
{
	gen6_ggtt_invalidate(dev_priv);
	I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
}

static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
{
	intel_gtt_chipset_flush();
}

static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
{
	i915->ggtt.invalidate(i915);
}

130 131
int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
			       	int enable_ppgtt)
132
{
133 134
	bool has_aliasing_ppgtt;
	bool has_full_ppgtt;
135
	bool has_full_48bit_ppgtt;
136

137 138 139
	has_aliasing_ppgtt = dev_priv->info.has_aliasing_ppgtt;
	has_full_ppgtt = dev_priv->info.has_full_ppgtt;
	has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
140

141 142 143 144 145
	if (intel_vgpu_active(dev_priv)) {
		/* emulation is too hard */
		has_full_ppgtt = false;
		has_full_48bit_ppgtt = false;
	}
146

147 148 149
	if (!has_aliasing_ppgtt)
		return 0;

150 151 152 153
	/*
	 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
	 * execlists, the sole mechanism available to submit work.
	 */
154
	if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
155 156 157 158 159
		return 0;

	if (enable_ppgtt == 1)
		return 1;

160
	if (enable_ppgtt == 2 && has_full_ppgtt)
161 162
		return 2;

163 164 165
	if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
		return 3;

166 167
#ifdef CONFIG_INTEL_IOMMU
	/* Disable ppgtt on SNB if VT-d is on. */
168
	if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) {
169
		DRM_INFO("Disabling PPGTT because VT-d is on\n");
170
		return 0;
171 172 173
	}
#endif

174
	/* Early VLV doesn't have this */
175
	if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
176 177 178 179
		DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
		return 0;
	}

180
	if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists && has_full_ppgtt)
181
		return has_full_48bit_ppgtt ? 3 : 2;
182 183
	else
		return has_aliasing_ppgtt ? 1 : 0;
184 185
}

186 187 188
static int ppgtt_bind_vma(struct i915_vma *vma,
			  enum i915_cache_level cache_level,
			  u32 unused)
189 190 191
{
	u32 pte_flags = 0;

C
Chris Wilson 已提交
192
	vma->pages = vma->obj->mm.pages;
193

194 195 196 197
	/* Currently applicable only to VLV */
	if (vma->obj->gt_ro)
		pte_flags |= PTE_READ_ONLY;

198
	vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
199
				cache_level, pte_flags);
200 201

	return 0;
202 203 204 205 206 207
}

static void ppgtt_unbind_vma(struct i915_vma *vma)
{
	vma->vm->clear_range(vma->vm,
			     vma->node.start,
208
			     vma->size);
209
}
210

211
static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
212
				  enum i915_cache_level level)
B
Ben Widawsky 已提交
213
{
214
	gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
B
Ben Widawsky 已提交
215
	pte |= addr;
216 217 218

	switch (level) {
	case I915_CACHE_NONE:
B
Ben Widawsky 已提交
219
		pte |= PPAT_UNCACHED_INDEX;
220 221 222 223 224 225 226 227 228
		break;
	case I915_CACHE_WT:
		pte |= PPAT_DISPLAY_ELLC_INDEX;
		break;
	default:
		pte |= PPAT_CACHED_INDEX;
		break;
	}

B
Ben Widawsky 已提交
229 230 231
	return pte;
}

232 233
static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
				  const enum i915_cache_level level)
B
Ben Widawsky 已提交
234
{
235
	gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
B
Ben Widawsky 已提交
236 237 238 239 240 241 242 243
	pde |= addr;
	if (level != I915_CACHE_NONE)
		pde |= PPAT_CACHED_PDE_INDEX;
	else
		pde |= PPAT_UNCACHED_INDEX;
	return pde;
}

244 245 246
#define gen8_pdpe_encode gen8_pde_encode
#define gen8_pml4e_encode gen8_pde_encode

247 248
static gen6_pte_t snb_pte_encode(dma_addr_t addr,
				 enum i915_cache_level level,
249
				 u32 unused)
250
{
251
	gen6_pte_t pte = GEN6_PTE_VALID;
252
	pte |= GEN6_PTE_ADDR_ENCODE(addr);
253 254

	switch (level) {
255 256 257 258 259 260 261 262
	case I915_CACHE_L3_LLC:
	case I915_CACHE_LLC:
		pte |= GEN6_PTE_CACHE_LLC;
		break;
	case I915_CACHE_NONE:
		pte |= GEN6_PTE_UNCACHED;
		break;
	default:
263
		MISSING_CASE(level);
264 265 266 267 268
	}

	return pte;
}

269 270
static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
				 enum i915_cache_level level,
271
				 u32 unused)
272
{
273
	gen6_pte_t pte = GEN6_PTE_VALID;
274 275 276 277 278
	pte |= GEN6_PTE_ADDR_ENCODE(addr);

	switch (level) {
	case I915_CACHE_L3_LLC:
		pte |= GEN7_PTE_CACHE_L3_LLC;
279 280 281 282 283
		break;
	case I915_CACHE_LLC:
		pte |= GEN6_PTE_CACHE_LLC;
		break;
	case I915_CACHE_NONE:
284
		pte |= GEN6_PTE_UNCACHED;
285 286
		break;
	default:
287
		MISSING_CASE(level);
288 289
	}

290 291 292
	return pte;
}

293 294
static gen6_pte_t byt_pte_encode(dma_addr_t addr,
				 enum i915_cache_level level,
295
				 u32 flags)
296
{
297
	gen6_pte_t pte = GEN6_PTE_VALID;
298 299
	pte |= GEN6_PTE_ADDR_ENCODE(addr);

300 301
	if (!(flags & PTE_READ_ONLY))
		pte |= BYT_PTE_WRITEABLE;
302 303 304 305 306 307 308

	if (level != I915_CACHE_NONE)
		pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;

	return pte;
}

309 310
static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
				 enum i915_cache_level level,
311
				 u32 unused)
312
{
313
	gen6_pte_t pte = GEN6_PTE_VALID;
314
	pte |= HSW_PTE_ADDR_ENCODE(addr);
315 316

	if (level != I915_CACHE_NONE)
317
		pte |= HSW_WB_LLC_AGE3;
318 319 320 321

	return pte;
}

322 323
static gen6_pte_t iris_pte_encode(dma_addr_t addr,
				  enum i915_cache_level level,
324
				  u32 unused)
325
{
326
	gen6_pte_t pte = GEN6_PTE_VALID;
327 328
	pte |= HSW_PTE_ADDR_ENCODE(addr);

329 330 331 332
	switch (level) {
	case I915_CACHE_NONE:
		break;
	case I915_CACHE_WT:
333
		pte |= HSW_WT_ELLC_LLC_AGE3;
334 335
		break;
	default:
336
		pte |= HSW_WB_ELLC_LLC_AGE3;
337 338
		break;
	}
339 340 341 342

	return pte;
}

343
static int __setup_page_dma(struct drm_i915_private *dev_priv,
344
			    struct i915_page_dma *p, gfp_t flags)
345
{
346
	struct device *kdev = &dev_priv->drm.pdev->dev;
347

348
	p->page = alloc_page(flags);
349 350
	if (!p->page)
		return -ENOMEM;
351

352
	p->daddr = dma_map_page(kdev,
353
				p->page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
354

355
	if (dma_mapping_error(kdev, p->daddr)) {
356 357 358
		__free_page(p->page);
		return -EINVAL;
	}
359 360

	return 0;
361 362
}

363 364
static int setup_page_dma(struct drm_i915_private *dev_priv,
			  struct i915_page_dma *p)
365
{
366
	return __setup_page_dma(dev_priv, p, I915_GFP_DMA);
367 368
}

369 370
static void cleanup_page_dma(struct drm_i915_private *dev_priv,
			     struct i915_page_dma *p)
371
{
372
	struct pci_dev *pdev = dev_priv->drm.pdev;
D
David Weinehall 已提交
373

374
	if (WARN_ON(!p->page))
375
		return;
376

377
	dma_unmap_page(&pdev->dev, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
378 379 380 381
	__free_page(p->page);
	memset(p, 0, sizeof(*p));
}

382
static void *kmap_page_dma(struct i915_page_dma *p)
383
{
384 385
	return kmap_atomic(p->page);
}
386

387 388 389
/* We use the flushing unmap only with ppgtt structures:
 * page directories, page tables and scratch pages.
 */
390
static void kunmap_page_dma(struct drm_i915_private *dev_priv, void *vaddr)
391
{
392 393 394
	/* There are only few exceptions for gen >=6. chv and bxt.
	 * And we are not sure about the latter so play safe for now.
	 */
395
	if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
396 397 398 399 400
		drm_clflush_virt_range(vaddr, PAGE_SIZE);

	kunmap_atomic(vaddr);
}

401
#define kmap_px(px) kmap_page_dma(px_base(px))
402
#define kunmap_px(ppgtt, vaddr) \
403
		kunmap_page_dma((ppgtt)->base.i915, (vaddr))
404

405 406
#define setup_px(dev_priv, px) setup_page_dma((dev_priv), px_base(px))
#define cleanup_px(dev_priv, px) cleanup_page_dma((dev_priv), px_base(px))
407 408 409
#define fill_px(dev_priv, px, v) fill_page_dma((dev_priv), px_base(px), (v))
#define fill32_px(dev_priv, px, v) \
		fill_page_dma_32((dev_priv), px_base(px), (v))
410

411 412
static void fill_page_dma(struct drm_i915_private *dev_priv,
			  struct i915_page_dma *p, const uint64_t val)
413 414 415 416 417 418 419
{
	int i;
	uint64_t * const vaddr = kmap_page_dma(p);

	for (i = 0; i < 512; i++)
		vaddr[i] = val;

420
	kunmap_page_dma(dev_priv, vaddr);
421 422
}

423 424
static void fill_page_dma_32(struct drm_i915_private *dev_priv,
			     struct i915_page_dma *p, const uint32_t val32)
425 426 427 428 429
{
	uint64_t v = val32;

	v = v << 32 | val32;

430
	fill_page_dma(dev_priv, p, v);
431 432
}

433
static int
434
setup_scratch_page(struct drm_i915_private *dev_priv,
435 436
		   struct i915_page_dma *scratch,
		   gfp_t gfp)
437
{
438
	return __setup_page_dma(dev_priv, scratch, gfp | __GFP_ZERO);
439 440
}

441
static void cleanup_scratch_page(struct drm_i915_private *dev_priv,
442
				 struct i915_page_dma *scratch)
443
{
444
	cleanup_page_dma(dev_priv, scratch);
445 446
}

447
static struct i915_page_table *alloc_pt(struct drm_i915_private *dev_priv)
448
{
449
	struct i915_page_table *pt;
450
	const size_t count = INTEL_GEN(dev_priv) >= 8 ? GEN8_PTES : GEN6_PTES;
451
	int ret = -ENOMEM;
452 453 454 455 456

	pt = kzalloc(sizeof(*pt), GFP_KERNEL);
	if (!pt)
		return ERR_PTR(-ENOMEM);

457 458 459 460 461 462
	pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes),
				GFP_KERNEL);

	if (!pt->used_ptes)
		goto fail_bitmap;

463
	ret = setup_px(dev_priv, pt);
464
	if (ret)
465
		goto fail_page_m;
466 467

	return pt;
468

469
fail_page_m:
470 471 472 473 474
	kfree(pt->used_ptes);
fail_bitmap:
	kfree(pt);

	return ERR_PTR(ret);
475 476
}

477 478
static void free_pt(struct drm_i915_private *dev_priv,
		    struct i915_page_table *pt)
479
{
480
	cleanup_px(dev_priv, pt);
481 482 483 484 485 486 487 488 489
	kfree(pt->used_ptes);
	kfree(pt);
}

static void gen8_initialize_pt(struct i915_address_space *vm,
			       struct i915_page_table *pt)
{
	gen8_pte_t scratch_pte;

490
	scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
491
				      I915_CACHE_LLC);
492

493
	fill_px(vm->i915, pt, scratch_pte);
494 495 496 497 498 499 500
}

static void gen6_initialize_pt(struct i915_address_space *vm,
			       struct i915_page_table *pt)
{
	gen6_pte_t scratch_pte;

501
	WARN_ON(vm->scratch_page.daddr == 0);
502

503
	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
504
				     I915_CACHE_LLC, 0);
505

506
	fill32_px(vm->i915, pt, scratch_pte);
507 508
}

509
static struct i915_page_directory *alloc_pd(struct drm_i915_private *dev_priv)
510
{
511
	struct i915_page_directory *pd;
512
	int ret = -ENOMEM;
513 514 515 516 517

	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
	if (!pd)
		return ERR_PTR(-ENOMEM);

518 519 520
	pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES),
				sizeof(*pd->used_pdes), GFP_KERNEL);
	if (!pd->used_pdes)
521
		goto fail_bitmap;
522

523
	ret = setup_px(dev_priv, pd);
524
	if (ret)
525
		goto fail_page_m;
526

527
	return pd;
528

529
fail_page_m:
530
	kfree(pd->used_pdes);
531
fail_bitmap:
532 533 534
	kfree(pd);

	return ERR_PTR(ret);
535 536
}

537 538
static void free_pd(struct drm_i915_private *dev_priv,
		    struct i915_page_directory *pd)
539 540
{
	if (px_page(pd)) {
541
		cleanup_px(dev_priv, pd);
542 543 544 545 546 547 548 549 550 551 552 553
		kfree(pd->used_pdes);
		kfree(pd);
	}
}

static void gen8_initialize_pd(struct i915_address_space *vm,
			       struct i915_page_directory *pd)
{
	gen8_pde_t scratch_pde;

	scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);

554
	fill_px(vm->i915, pd, scratch_pde);
555 556
}

557
static int __pdp_init(struct drm_i915_private *dev_priv,
558 559
		      struct i915_page_directory_pointer *pdp)
{
560
	size_t pdpes = I915_PDPES_PER_PDP(dev_priv);
561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587

	pdp->used_pdpes = kcalloc(BITS_TO_LONGS(pdpes),
				  sizeof(unsigned long),
				  GFP_KERNEL);
	if (!pdp->used_pdpes)
		return -ENOMEM;

	pdp->page_directory = kcalloc(pdpes, sizeof(*pdp->page_directory),
				      GFP_KERNEL);
	if (!pdp->page_directory) {
		kfree(pdp->used_pdpes);
		/* the PDP might be the statically allocated top level. Keep it
		 * as clean as possible */
		pdp->used_pdpes = NULL;
		return -ENOMEM;
	}

	return 0;
}

static void __pdp_fini(struct i915_page_directory_pointer *pdp)
{
	kfree(pdp->used_pdpes);
	kfree(pdp->page_directory);
	pdp->page_directory = NULL;
}

588
static struct
589
i915_page_directory_pointer *alloc_pdp(struct drm_i915_private *dev_priv)
590 591 592 593
{
	struct i915_page_directory_pointer *pdp;
	int ret = -ENOMEM;

594
	WARN_ON(!USES_FULL_48BIT_PPGTT(dev_priv));
595 596 597 598 599

	pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
	if (!pdp)
		return ERR_PTR(-ENOMEM);

600
	ret = __pdp_init(dev_priv, pdp);
601 602 603
	if (ret)
		goto fail_bitmap;

604
	ret = setup_px(dev_priv, pdp);
605 606 607 608 609 610 611 612 613 614 615 616 617
	if (ret)
		goto fail_page_m;

	return pdp;

fail_page_m:
	__pdp_fini(pdp);
fail_bitmap:
	kfree(pdp);

	return ERR_PTR(ret);
}

618
static void free_pdp(struct drm_i915_private *dev_priv,
619 620 621
		     struct i915_page_directory_pointer *pdp)
{
	__pdp_fini(pdp);
622 623
	if (USES_FULL_48BIT_PPGTT(dev_priv)) {
		cleanup_px(dev_priv, pdp);
624 625 626 627
		kfree(pdp);
	}
}

628 629 630 631 632 633 634
static void gen8_initialize_pdp(struct i915_address_space *vm,
				struct i915_page_directory_pointer *pdp)
{
	gen8_ppgtt_pdpe_t scratch_pdpe;

	scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);

635
	fill_px(vm->i915, pdp, scratch_pdpe);
636 637 638 639 640 641 642 643 644 645
}

static void gen8_initialize_pml4(struct i915_address_space *vm,
				 struct i915_pml4 *pml4)
{
	gen8_ppgtt_pml4e_t scratch_pml4e;

	scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
					  I915_CACHE_LLC);

646
	fill_px(vm->i915, pml4, scratch_pml4e);
647 648
}

649
static void
650 651 652 653
gen8_setup_pdpe(struct i915_hw_ppgtt *ppgtt,
		struct i915_page_directory_pointer *pdp,
		struct i915_page_directory *pd,
		int index)
654 655 656
{
	gen8_ppgtt_pdpe_t *page_directorypo;

657
	if (!USES_FULL_48BIT_PPGTT(to_i915(ppgtt->base.dev)))
658 659 660 661 662 663 664 665
		return;

	page_directorypo = kmap_px(pdp);
	page_directorypo[index] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
	kunmap_px(ppgtt, page_directorypo);
}

static void
666 667 668 669
gen8_setup_pml4e(struct i915_hw_ppgtt *ppgtt,
		 struct i915_pml4 *pml4,
		 struct i915_page_directory_pointer *pdp,
		 int index)
670 671 672
{
	gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);

673
	WARN_ON(!USES_FULL_48BIT_PPGTT(to_i915(ppgtt->base.dev)));
674 675
	pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
	kunmap_px(ppgtt, pagemap);
676 677
}

678
/* Broadwell Page Directory Pointer Descriptors */
679
static int gen8_write_pdp(struct drm_i915_gem_request *req,
680 681
			  unsigned entry,
			  dma_addr_t addr)
682
{
683
	struct intel_ring *ring = req->ring;
684
	struct intel_engine_cs *engine = req->engine;
685 686 687 688
	int ret;

	BUG_ON(entry >= 4);

689
	ret = intel_ring_begin(req, 6);
690 691 692
	if (ret)
		return ret;

693 694 695 696 697 698 699
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
	intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, entry));
	intel_ring_emit(ring, upper_32_bits(addr));
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
	intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, entry));
	intel_ring_emit(ring, lower_32_bits(addr));
	intel_ring_advance(ring);
700 701 702 703

	return 0;
}

704 705
static int gen8_legacy_mm_switch(struct i915_hw_ppgtt *ppgtt,
				 struct drm_i915_gem_request *req)
706
{
707
	int i, ret;
708

709
	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
710 711
		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);

712
		ret = gen8_write_pdp(req, i, pd_daddr);
713 714
		if (ret)
			return ret;
715
	}
B
Ben Widawsky 已提交
716

717
	return 0;
718 719
}

720 721 722 723 724 725
static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
			      struct drm_i915_gem_request *req)
{
	return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
}

726 727 728 729 730 731 732
/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
 * the page table structures, we mark them dirty so that
 * context switching/execlist queuing code takes extra steps
 * to ensure that tlbs are flushed.
 */
static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
{
733
	ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
734 735
}

736 737 738 739
/* Removes entries from a single page table, releasing it if it's empty.
 * Caller can use the return value to update higher-level entries.
 */
static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
740 741 742
				struct i915_page_table *pt,
				uint64_t start,
				uint64_t length)
743
{
744
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
745
	unsigned int num_entries = gen8_pte_count(start, length);
M
Mika Kuoppala 已提交
746 747
	unsigned int pte = gen8_pte_index(start);
	unsigned int pte_end = pte + num_entries;
748
	gen8_pte_t *pt_vaddr;
749 750
	gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
						 I915_CACHE_LLC);
751

752
	if (WARN_ON(!px_page(pt)))
753
		return false;
754

M
Mika Kuoppala 已提交
755 756 757
	GEM_BUG_ON(pte_end > GEN8_PTES);

	bitmap_clear(pt->used_ptes, pte, num_entries);
758

759
	if (bitmap_empty(pt->used_ptes, GEN8_PTES))
760 761
		return true;

762 763
	pt_vaddr = kmap_px(pt);

M
Mika Kuoppala 已提交
764 765
	while (pte < pte_end)
		pt_vaddr[pte++] = scratch_pte;
766

767
	kunmap_px(ppgtt, pt_vaddr);
768 769

	return false;
770
}
771

772 773 774 775
/* Removes entries from a single page dir, releasing it if it's empty.
 * Caller can use the return value to update higher-level entries
 */
static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
776 777 778 779
				struct i915_page_directory *pd,
				uint64_t start,
				uint64_t length)
{
780
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
781 782
	struct i915_page_table *pt;
	uint64_t pde;
783 784 785
	gen8_pde_t *pde_vaddr;
	gen8_pde_t scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt),
						 I915_CACHE_LLC);
786 787

	gen8_for_each_pde(pt, pd, start, length, pde) {
788
		if (WARN_ON(!pd->page_table[pde]))
789
			break;
790

791 792 793 794 795
		if (gen8_ppgtt_clear_pt(vm, pt, start, length)) {
			__clear_bit(pde, pd->used_pdes);
			pde_vaddr = kmap_px(pd);
			pde_vaddr[pde] = scratch_pde;
			kunmap_px(ppgtt, pde_vaddr);
796
			free_pt(vm->i915, pt);
797 798 799
		}
	}

800
	if (bitmap_empty(pd->used_pdes, I915_PDES))
801 802 803
		return true;

	return false;
804
}
805

806 807 808 809
/* Removes entries from a single page dir pointer, releasing it if it's empty.
 * Caller can use the return value to update higher-level entries
 */
static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
810 811 812 813
				 struct i915_page_directory_pointer *pdp,
				 uint64_t start,
				 uint64_t length)
{
814
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
815 816
	struct i915_page_directory *pd;
	uint64_t pdpe;
817

818 819 820
	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
		if (WARN_ON(!pdp->page_directory[pdpe]))
			break;
821

822 823
		if (gen8_ppgtt_clear_pd(vm, pd, start, length)) {
			__clear_bit(pdpe, pdp->used_pdpes);
824
			gen8_setup_pdpe(ppgtt, pdp, vm->scratch_pd, pdpe);
825
			free_pd(vm->i915, pd);
826 827 828
		}
	}

829 830
	mark_tlbs_dirty(ppgtt);

831
	if (bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv)))
832 833 834
		return true;

	return false;
835
}
836

837 838 839 840
/* Removes entries from a single pml4.
 * This is the top-level structure in 4-level page tables used on gen8+.
 * Empty entries are always scratch pml4e.
 */
841 842 843 844 845
static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
				  struct i915_pml4 *pml4,
				  uint64_t start,
				  uint64_t length)
{
846
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
847 848
	struct i915_page_directory_pointer *pdp;
	uint64_t pml4e;
849

850
	GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(vm->i915));
851

852 853 854
	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
		if (WARN_ON(!pml4->pdps[pml4e]))
			break;
855

856 857
		if (gen8_ppgtt_clear_pdp(vm, pdp, start, length)) {
			__clear_bit(pml4e, pml4->used_pml4es);
858
			gen8_setup_pml4e(ppgtt, pml4, vm->scratch_pdp, pml4e);
859
			free_pdp(vm->i915, pdp);
860
		}
861 862 863
	}
}

864
static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
865
				   uint64_t start, uint64_t length)
866
{
867
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
868

869
	if (USES_FULL_48BIT_PPGTT(vm->i915))
870 871 872
		gen8_ppgtt_clear_pml4(vm, &ppgtt->pml4, start, length);
	else
		gen8_ppgtt_clear_pdp(vm, &ppgtt->pdp, start, length);
873 874 875 876 877
}

static void
gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
			      struct i915_page_directory_pointer *pdp,
878
			      struct sg_page_iter *sg_iter,
879 880 881
			      uint64_t start,
			      enum i915_cache_level cache_level)
{
882
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
883
	gen8_pte_t *pt_vaddr;
884 885 886
	unsigned pdpe = gen8_pdpe_index(start);
	unsigned pde = gen8_pde_index(start);
	unsigned pte = gen8_pte_index(start);
887

888
	pt_vaddr = NULL;
889

890
	while (__sg_page_iter_next(sg_iter)) {
B
Ben Widawsky 已提交
891
		if (pt_vaddr == NULL) {
892
			struct i915_page_directory *pd = pdp->page_directory[pdpe];
893
			struct i915_page_table *pt = pd->page_table[pde];
894
			pt_vaddr = kmap_px(pt);
B
Ben Widawsky 已提交
895
		}
896

897
		pt_vaddr[pte] =
898
			gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
899
					cache_level);
900
		if (++pte == GEN8_PTES) {
901
			kunmap_px(ppgtt, pt_vaddr);
902
			pt_vaddr = NULL;
903
			if (++pde == I915_PDES) {
904
				if (++pdpe == I915_PDPES_PER_PDP(vm->i915))
905
					break;
906 907 908
				pde = 0;
			}
			pte = 0;
909 910
		}
	}
911 912 913

	if (pt_vaddr)
		kunmap_px(ppgtt, pt_vaddr);
914 915
}

916 917 918 919 920 921
static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
				      struct sg_table *pages,
				      uint64_t start,
				      enum i915_cache_level cache_level,
				      u32 unused)
{
922
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
923
	struct sg_page_iter sg_iter;
924

925
	__sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
926

927
	if (!USES_FULL_48BIT_PPGTT(vm->i915)) {
928 929 930 931
		gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
					      cache_level);
	} else {
		struct i915_page_directory_pointer *pdp;
932
		uint64_t pml4e;
933 934
		uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT;

935
		gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
936 937 938 939
			gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
						      start, cache_level);
		}
	}
940 941
}

942
static void gen8_free_page_tables(struct drm_i915_private *dev_priv,
943
				  struct i915_page_directory *pd)
944 945 946
{
	int i;

947
	if (!px_page(pd))
948 949
		return;

950
	for_each_set_bit(i, pd->used_pdes, I915_PDES) {
951 952
		if (WARN_ON(!pd->page_table[i]))
			continue;
953

954
		free_pt(dev_priv, pd->page_table[i]);
955 956
		pd->page_table[i] = NULL;
	}
B
Ben Widawsky 已提交
957 958
}

959 960
static int gen8_init_scratch(struct i915_address_space *vm)
{
961
	struct drm_i915_private *dev_priv = vm->i915;
962
	int ret;
963

964
	ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA);
965 966
	if (ret)
		return ret;
967

968
	vm->scratch_pt = alloc_pt(dev_priv);
969
	if (IS_ERR(vm->scratch_pt)) {
970 971
		ret = PTR_ERR(vm->scratch_pt);
		goto free_scratch_page;
972 973
	}

974
	vm->scratch_pd = alloc_pd(dev_priv);
975
	if (IS_ERR(vm->scratch_pd)) {
976 977
		ret = PTR_ERR(vm->scratch_pd);
		goto free_pt;
978 979
	}

980 981
	if (USES_FULL_48BIT_PPGTT(dev_priv)) {
		vm->scratch_pdp = alloc_pdp(dev_priv);
982
		if (IS_ERR(vm->scratch_pdp)) {
983 984
			ret = PTR_ERR(vm->scratch_pdp);
			goto free_pd;
985 986 987
		}
	}

988 989
	gen8_initialize_pt(vm, vm->scratch_pt);
	gen8_initialize_pd(vm, vm->scratch_pd);
990
	if (USES_FULL_48BIT_PPGTT(dev_priv))
991
		gen8_initialize_pdp(vm, vm->scratch_pdp);
992 993

	return 0;
994 995

free_pd:
996
	free_pd(dev_priv, vm->scratch_pd);
997
free_pt:
998
	free_pt(dev_priv, vm->scratch_pt);
999
free_scratch_page:
1000
	cleanup_scratch_page(dev_priv, &vm->scratch_page);
1001 1002

	return ret;
1003 1004
}

1005 1006 1007
static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
{
	enum vgt_g2v_type msg;
1008
	struct drm_i915_private *dev_priv = ppgtt->base.i915;
1009 1010
	int i;

1011
	if (USES_FULL_48BIT_PPGTT(dev_priv)) {
1012 1013
		u64 daddr = px_dma(&ppgtt->pml4);

1014 1015
		I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
		I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
1016 1017 1018 1019 1020 1021 1022

		msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
				VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
	} else {
		for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
			u64 daddr = i915_page_dir_dma_addr(ppgtt, i);

1023 1024
			I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
			I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035
		}

		msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
				VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
	}

	I915_WRITE(vgtif_reg(g2v_notify), msg);

	return 0;
}

1036 1037
static void gen8_free_scratch(struct i915_address_space *vm)
{
1038
	struct drm_i915_private *dev_priv = vm->i915;
1039

1040 1041 1042 1043 1044
	if (USES_FULL_48BIT_PPGTT(dev_priv))
		free_pdp(dev_priv, vm->scratch_pdp);
	free_pd(dev_priv, vm->scratch_pd);
	free_pt(dev_priv, vm->scratch_pt);
	cleanup_scratch_page(dev_priv, &vm->scratch_page);
1045 1046
}

1047
static void gen8_ppgtt_cleanup_3lvl(struct drm_i915_private *dev_priv,
1048
				    struct i915_page_directory_pointer *pdp)
1049 1050 1051
{
	int i;

1052
	for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv)) {
1053
		if (WARN_ON(!pdp->page_directory[i]))
1054 1055
			continue;

1056 1057
		gen8_free_page_tables(dev_priv, pdp->page_directory[i]);
		free_pd(dev_priv, pdp->page_directory[i]);
1058
	}
1059

1060
	free_pdp(dev_priv, pdp);
1061 1062 1063 1064
}

static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
{
1065
	struct drm_i915_private *dev_priv = ppgtt->base.i915;
1066 1067 1068 1069 1070 1071
	int i;

	for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) {
		if (WARN_ON(!ppgtt->pml4.pdps[i]))
			continue;

1072
		gen8_ppgtt_cleanup_3lvl(dev_priv, ppgtt->pml4.pdps[i]);
1073 1074
	}

1075
	cleanup_px(dev_priv, &ppgtt->pml4);
1076 1077 1078 1079
}

static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
1080
	struct drm_i915_private *dev_priv = vm->i915;
1081
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1082

1083
	if (intel_vgpu_active(dev_priv))
1084 1085
		gen8_ppgtt_notify_vgt(ppgtt, false);

1086 1087
	if (!USES_FULL_48BIT_PPGTT(dev_priv))
		gen8_ppgtt_cleanup_3lvl(dev_priv, &ppgtt->pdp);
1088 1089
	else
		gen8_ppgtt_cleanup_4lvl(ppgtt);
1090

1091
	gen8_free_scratch(vm);
1092 1093
}

1094 1095
/**
 * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range.
1096 1097
 * @vm:	Master vm structure.
 * @pd:	Page directory for this address range.
1098
 * @start:	Starting virtual address to begin allocations.
1099
 * @length:	Size of the allocations.
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
 * @new_pts:	Bitmap set by function with new allocations. Likely used by the
 *		caller to free on error.
 *
 * Allocate the required number of page tables. Extremely similar to
 * gen8_ppgtt_alloc_page_directories(). The main difference is here we are limited by
 * the page directory boundary (instead of the page directory pointer). That
 * boundary is 1GB virtual. Therefore, unlike gen8_ppgtt_alloc_page_directories(), it is
 * possible, and likely that the caller will need to use multiple calls of this
 * function to achieve the appropriate allocation.
 *
 * Return: 0 if success; negative error code otherwise.
 */
1112
static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
1113
				     struct i915_page_directory *pd,
1114
				     uint64_t start,
1115 1116
				     uint64_t length,
				     unsigned long *new_pts)
1117
{
1118
	struct drm_i915_private *dev_priv = vm->i915;
1119
	struct i915_page_table *pt;
1120
	uint32_t pde;
1121

1122
	gen8_for_each_pde(pt, pd, start, length, pde) {
1123
		/* Don't reallocate page tables */
1124
		if (test_bit(pde, pd->used_pdes)) {
1125
			/* Scratch is never allocated this way */
1126
			WARN_ON(pt == vm->scratch_pt);
1127 1128 1129
			continue;
		}

1130
		pt = alloc_pt(dev_priv);
1131
		if (IS_ERR(pt))
1132 1133
			goto unwind_out;

1134
		gen8_initialize_pt(vm, pt);
1135
		pd->page_table[pde] = pt;
1136
		__set_bit(pde, new_pts);
1137
		trace_i915_page_table_entry_alloc(vm, pde, start, GEN8_PDE_SHIFT);
1138 1139
	}

1140
	return 0;
1141 1142

unwind_out:
1143
	for_each_set_bit(pde, new_pts, I915_PDES)
1144
		free_pt(dev_priv, pd->page_table[pde]);
1145

B
Ben Widawsky 已提交
1146
	return -ENOMEM;
1147 1148
}

1149 1150
/**
 * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range.
1151
 * @vm:	Master vm structure.
1152 1153
 * @pdp:	Page directory pointer for this address range.
 * @start:	Starting virtual address to begin allocations.
1154 1155
 * @length:	Size of the allocations.
 * @new_pds:	Bitmap set by function with new allocations. Likely used by the
1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171
 *		caller to free on error.
 *
 * Allocate the required number of page directories starting at the pde index of
 * @start, and ending at the pde index @start + @length. This function will skip
 * over already allocated page directories within the range, and only allocate
 * new ones, setting the appropriate pointer within the pdp as well as the
 * correct position in the bitmap @new_pds.
 *
 * The function will only allocate the pages within the range for a give page
 * directory pointer. In other words, if @start + @length straddles a virtually
 * addressed PDP boundary (512GB for 4k pages), there will be more allocations
 * required by the caller, This is not currently possible, and the BUG in the
 * code will prevent it.
 *
 * Return: 0 if success; negative error code otherwise.
 */
1172 1173 1174 1175 1176 1177
static int
gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
				  struct i915_page_directory_pointer *pdp,
				  uint64_t start,
				  uint64_t length,
				  unsigned long *new_pds)
1178
{
1179
	struct drm_i915_private *dev_priv = vm->i915;
1180
	struct i915_page_directory *pd;
1181
	uint32_t pdpe;
1182
	uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv);
1183

1184
	WARN_ON(!bitmap_empty(new_pds, pdpes));
1185

1186
	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1187
		if (test_bit(pdpe, pdp->used_pdpes))
1188
			continue;
1189

1190
		pd = alloc_pd(dev_priv);
1191
		if (IS_ERR(pd))
B
Ben Widawsky 已提交
1192
			goto unwind_out;
1193

1194
		gen8_initialize_pd(vm, pd);
1195
		pdp->page_directory[pdpe] = pd;
1196
		__set_bit(pdpe, new_pds);
1197
		trace_i915_page_directory_entry_alloc(vm, pdpe, start, GEN8_PDPE_SHIFT);
B
Ben Widawsky 已提交
1198 1199
	}

1200
	return 0;
B
Ben Widawsky 已提交
1201 1202

unwind_out:
1203
	for_each_set_bit(pdpe, new_pds, pdpes)
1204
		free_pd(dev_priv, pdp->page_directory[pdpe]);
B
Ben Widawsky 已提交
1205 1206

	return -ENOMEM;
1207 1208
}

1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231
/**
 * gen8_ppgtt_alloc_page_dirpointers() - Allocate pdps for VA range.
 * @vm:	Master vm structure.
 * @pml4:	Page map level 4 for this address range.
 * @start:	Starting virtual address to begin allocations.
 * @length:	Size of the allocations.
 * @new_pdps:	Bitmap set by function with new allocations. Likely used by the
 *		caller to free on error.
 *
 * Allocate the required number of page directory pointers. Extremely similar to
 * gen8_ppgtt_alloc_page_directories() and gen8_ppgtt_alloc_pagetabs().
 * The main difference is here we are limited by the pml4 boundary (instead of
 * the page directory pointer).
 *
 * Return: 0 if success; negative error code otherwise.
 */
static int
gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
				  struct i915_pml4 *pml4,
				  uint64_t start,
				  uint64_t length,
				  unsigned long *new_pdps)
{
1232
	struct drm_i915_private *dev_priv = vm->i915;
1233 1234 1235 1236 1237
	struct i915_page_directory_pointer *pdp;
	uint32_t pml4e;

	WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4));

1238
	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1239
		if (!test_bit(pml4e, pml4->used_pml4es)) {
1240
			pdp = alloc_pdp(dev_priv);
1241 1242 1243
			if (IS_ERR(pdp))
				goto unwind_out;

1244
			gen8_initialize_pdp(vm, pdp);
1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
			pml4->pdps[pml4e] = pdp;
			__set_bit(pml4e, new_pdps);
			trace_i915_page_directory_pointer_entry_alloc(vm,
								      pml4e,
								      start,
								      GEN8_PML4E_SHIFT);
		}
	}

	return 0;

unwind_out:
	for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
1258
		free_pdp(dev_priv, pml4->pdps[pml4e]);
1259 1260 1261 1262

	return -ENOMEM;
}

1263
static void
1264
free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long *new_pts)
1265 1266 1267 1268 1269 1270 1271 1272 1273 1274
{
	kfree(new_pts);
	kfree(new_pds);
}

/* Fills in the page directory bitmap, and the array of page tables bitmap. Both
 * of these are based on the number of PDPEs in the system.
 */
static
int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds,
1275
					 unsigned long **new_pts,
1276
					 uint32_t pdpes)
1277 1278
{
	unsigned long *pds;
1279
	unsigned long *pts;
1280

1281
	pds = kcalloc(BITS_TO_LONGS(pdpes), sizeof(unsigned long), GFP_TEMPORARY);
1282 1283 1284
	if (!pds)
		return -ENOMEM;

1285 1286 1287 1288
	pts = kcalloc(pdpes, BITS_TO_LONGS(I915_PDES) * sizeof(unsigned long),
		      GFP_TEMPORARY);
	if (!pts)
		goto err_out;
1289 1290 1291 1292 1293 1294 1295

	*new_pds = pds;
	*new_pts = pts;

	return 0;

err_out:
1296
	free_gen8_temp_bitmaps(pds, pts);
1297 1298 1299
	return -ENOMEM;
}

1300 1301 1302 1303
static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
				    struct i915_page_directory_pointer *pdp,
				    uint64_t start,
				    uint64_t length)
1304
{
1305
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1306
	unsigned long *new_page_dirs, *new_page_tables;
1307
	struct drm_i915_private *dev_priv = vm->i915;
1308
	struct i915_page_directory *pd;
1309 1310
	const uint64_t orig_start = start;
	const uint64_t orig_length = length;
1311
	uint32_t pdpe;
1312
	uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv);
1313 1314
	int ret;

1315
	ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
1316 1317 1318
	if (ret)
		return ret;

1319
	/* Do the allocations first so we can easily bail out */
1320 1321
	ret = gen8_ppgtt_alloc_page_directories(vm, pdp, start, length,
						new_page_dirs);
1322
	if (ret) {
1323
		free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1324 1325 1326 1327
		return ret;
	}

	/* For every page directory referenced, allocate page tables */
1328
	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1329
		ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length,
1330
						new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES));
1331 1332 1333 1334
		if (ret)
			goto err_out;
	}

1335 1336 1337
	start = orig_start;
	length = orig_length;

1338 1339
	/* Allocations have completed successfully, so set the bitmaps, and do
	 * the mappings. */
1340
	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1341
		gen8_pde_t *const page_directory = kmap_px(pd);
1342
		struct i915_page_table *pt;
1343
		uint64_t pd_len = length;
1344 1345 1346
		uint64_t pd_start = start;
		uint32_t pde;

1347 1348 1349
		/* Every pd should be allocated, we just did that above. */
		WARN_ON(!pd);

1350
		gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
			/* Same reasoning as pd */
			WARN_ON(!pt);
			WARN_ON(!pd_len);
			WARN_ON(!gen8_pte_count(pd_start, pd_len));

			/* Set our used ptes within the page table */
			bitmap_set(pt->used_ptes,
				   gen8_pte_index(pd_start),
				   gen8_pte_count(pd_start, pd_len));

			/* Our pde is now pointing to the pagetable, pt */
1362
			__set_bit(pde, pd->used_pdes);
1363 1364

			/* Map the PDE to the page table */
1365 1366
			page_directory[pde] = gen8_pde_encode(px_dma(pt),
							      I915_CACHE_LLC);
1367 1368 1369 1370
			trace_i915_page_table_entry_map(&ppgtt->base, pde, pt,
							gen8_pte_index(start),
							gen8_pte_count(start, length),
							GEN8_PTES);
1371 1372 1373

			/* NB: We haven't yet mapped ptes to pages. At this
			 * point we're still relying on insert_entries() */
1374
		}
1375

1376
		kunmap_px(ppgtt, page_directory);
1377
		__set_bit(pdpe, pdp->used_pdpes);
1378
		gen8_setup_pdpe(ppgtt, pdp, pd, pdpe);
1379 1380
	}

1381
	free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1382
	mark_tlbs_dirty(ppgtt);
B
Ben Widawsky 已提交
1383
	return 0;
1384

B
Ben Widawsky 已提交
1385
err_out:
1386
	while (pdpe--) {
1387 1388
		unsigned long temp;

1389 1390
		for_each_set_bit(temp, new_page_tables + pdpe *
				BITS_TO_LONGS(I915_PDES), I915_PDES)
1391 1392
			free_pt(dev_priv,
				pdp->page_directory[pdpe]->page_table[temp]);
1393 1394
	}

1395
	for_each_set_bit(pdpe, new_page_dirs, pdpes)
1396
		free_pd(dev_priv, pdp->page_directory[pdpe]);
1397

1398
	free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1399
	mark_tlbs_dirty(ppgtt);
1400 1401 1402
	return ret;
}

1403 1404 1405 1406 1407 1408
static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
				    struct i915_pml4 *pml4,
				    uint64_t start,
				    uint64_t length)
{
	DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4);
1409
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1410
	struct i915_page_directory_pointer *pdp;
1411
	uint64_t pml4e;
1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425
	int ret = 0;

	/* Do the pml4 allocations first, so we don't need to track the newly
	 * allocated tables below the pdp */
	bitmap_zero(new_pdps, GEN8_PML4ES_PER_PML4);

	/* The pagedirectory and pagetable allocations are done in the shared 3
	 * and 4 level code. Just allocate the pdps.
	 */
	ret = gen8_ppgtt_alloc_page_dirpointers(vm, pml4, start, length,
						new_pdps);
	if (ret)
		return ret;

1426
	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1427 1428 1429 1430 1431 1432
		WARN_ON(!pdp);

		ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length);
		if (ret)
			goto err_out;

1433
		gen8_setup_pml4e(ppgtt, pml4, pdp, pml4e);
1434 1435 1436 1437 1438 1439 1440 1441 1442
	}

	bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es,
		  GEN8_PML4ES_PER_PML4);

	return 0;

err_out:
	for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
1443
		gen8_ppgtt_cleanup_3lvl(vm->i915, pml4->pdps[pml4e]);
1444 1445 1446 1447 1448 1449 1450

	return ret;
}

static int gen8_alloc_va_range(struct i915_address_space *vm,
			       uint64_t start, uint64_t length)
{
1451
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1452

1453
	if (USES_FULL_48BIT_PPGTT(vm->i915))
1454 1455 1456 1457 1458
		return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
	else
		return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
}

1459 1460 1461 1462 1463 1464 1465 1466
static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp,
			  uint64_t start, uint64_t length,
			  gen8_pte_t scratch_pte,
			  struct seq_file *m)
{
	struct i915_page_directory *pd;
	uint32_t pdpe;

1467
	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1468 1469 1470 1471 1472 1473 1474 1475 1476
		struct i915_page_table *pt;
		uint64_t pd_len = length;
		uint64_t pd_start = start;
		uint32_t pde;

		if (!test_bit(pdpe, pdp->used_pdpes))
			continue;

		seq_printf(m, "\tPDPE #%d\n", pdpe);
1477
		gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520
			uint32_t  pte;
			gen8_pte_t *pt_vaddr;

			if (!test_bit(pde, pd->used_pdes))
				continue;

			pt_vaddr = kmap_px(pt);
			for (pte = 0; pte < GEN8_PTES; pte += 4) {
				uint64_t va =
					(pdpe << GEN8_PDPE_SHIFT) |
					(pde << GEN8_PDE_SHIFT) |
					(pte << GEN8_PTE_SHIFT);
				int i;
				bool found = false;

				for (i = 0; i < 4; i++)
					if (pt_vaddr[pte + i] != scratch_pte)
						found = true;
				if (!found)
					continue;

				seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
				for (i = 0; i < 4; i++) {
					if (pt_vaddr[pte + i] != scratch_pte)
						seq_printf(m, " %llx", pt_vaddr[pte + i]);
					else
						seq_puts(m, "  SCRATCH ");
				}
				seq_puts(m, "\n");
			}
			/* don't use kunmap_px, it could trigger
			 * an unnecessary flush.
			 */
			kunmap_atomic(pt_vaddr);
		}
	}
}

static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
	struct i915_address_space *vm = &ppgtt->base;
	uint64_t start = ppgtt->base.start;
	uint64_t length = ppgtt->base.total;
1521
	gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
1522
						 I915_CACHE_LLC);
1523

1524
	if (!USES_FULL_48BIT_PPGTT(vm->i915)) {
1525 1526
		gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
	} else {
1527
		uint64_t pml4e;
1528 1529 1530
		struct i915_pml4 *pml4 = &ppgtt->pml4;
		struct i915_page_directory_pointer *pdp;

1531
		gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1532 1533 1534 1535 1536 1537 1538 1539 1540
			if (!test_bit(pml4e, pml4->used_pml4es))
				continue;

			seq_printf(m, "    PML4E #%llu\n", pml4e);
			gen8_dump_pdp(pdp, start, length, scratch_pte, m);
		}
	}
}

1541 1542
static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
{
1543
	unsigned long *new_page_dirs, *new_page_tables;
1544
	uint32_t pdpes = I915_PDPES_PER_PDP(to_i915(ppgtt->base.dev));
1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562
	int ret;

	/* We allocate temp bitmap for page tables for no gain
	 * but as this is for init only, lets keep the things simple
	 */
	ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
	if (ret)
		return ret;

	/* Allocate for all pdps regardless of how the ppgtt
	 * was defined.
	 */
	ret = gen8_ppgtt_alloc_page_directories(&ppgtt->base, &ppgtt->pdp,
						0, 1ULL << 32,
						new_page_dirs);
	if (!ret)
		*ppgtt->pdp.used_pdpes = *new_page_dirs;

1563
	free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1564 1565 1566 1567

	return ret;
}

1568
/*
1569 1570 1571 1572
 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
 * with a net effect resembling a 2-level page table in normal x86 terms. Each
 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
 * space.
B
Ben Widawsky 已提交
1573
 *
1574
 */
1575
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
B
Ben Widawsky 已提交
1576
{
1577
	struct drm_i915_private *dev_priv = ppgtt->base.i915;
1578
	int ret;
1579

1580 1581 1582
	ret = gen8_init_scratch(&ppgtt->base);
	if (ret)
		return ret;
1583

1584 1585
	ppgtt->base.start = 0;
	ppgtt->base.cleanup = gen8_ppgtt_cleanup;
1586
	ppgtt->base.allocate_va_range = gen8_alloc_va_range;
1587
	ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
1588
	ppgtt->base.clear_range = gen8_ppgtt_clear_range;
1589 1590
	ppgtt->base.unbind_vma = ppgtt_unbind_vma;
	ppgtt->base.bind_vma = ppgtt_bind_vma;
1591
	ppgtt->debug_dump = gen8_dump_ppgtt;
1592

1593 1594
	if (USES_FULL_48BIT_PPGTT(dev_priv)) {
		ret = setup_px(dev_priv, &ppgtt->pml4);
1595 1596
		if (ret)
			goto free_scratch;
1597

1598 1599
		gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);

1600
		ppgtt->base.total = 1ULL << 48;
1601
		ppgtt->switch_mm = gen8_48b_mm_switch;
1602
	} else {
1603
		ret = __pdp_init(dev_priv, &ppgtt->pdp);
1604 1605 1606 1607
		if (ret)
			goto free_scratch;

		ppgtt->base.total = 1ULL << 32;
1608
		ppgtt->switch_mm = gen8_legacy_mm_switch;
1609 1610 1611
		trace_i915_page_directory_pointer_entry_alloc(&ppgtt->base,
							      0, 0,
							      GEN8_PML4E_SHIFT);
1612

1613
		if (intel_vgpu_active(dev_priv)) {
1614 1615 1616 1617
			ret = gen8_preallocate_top_level_pdps(ppgtt);
			if (ret)
				goto free_scratch;
		}
1618
	}
1619

1620
	if (intel_vgpu_active(dev_priv))
1621 1622
		gen8_ppgtt_notify_vgt(ppgtt, true);

1623
	return 0;
1624 1625 1626 1627

free_scratch:
	gen8_free_scratch(&ppgtt->base);
	return ret;
1628 1629
}

B
Ben Widawsky 已提交
1630 1631 1632
static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
	struct i915_address_space *vm = &ppgtt->base;
1633
	struct i915_page_table *unused;
1634
	gen6_pte_t scratch_pte;
B
Ben Widawsky 已提交
1635
	uint32_t pd_entry;
1636
	uint32_t  pte, pde;
1637
	uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
B
Ben Widawsky 已提交
1638

1639
	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
1640
				     I915_CACHE_LLC, 0);
B
Ben Widawsky 已提交
1641

1642
	gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
B
Ben Widawsky 已提交
1643
		u32 expected;
1644
		gen6_pte_t *pt_vaddr;
1645
		const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
1646
		pd_entry = readl(ppgtt->pd_addr + pde);
B
Ben Widawsky 已提交
1647 1648 1649 1650 1651 1652 1653 1654 1655
		expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);

		if (pd_entry != expected)
			seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
				   pde,
				   pd_entry,
				   expected);
		seq_printf(m, "\tPDE: %x\n", pd_entry);

1656 1657
		pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]);

1658
		for (pte = 0; pte < GEN6_PTES; pte+=4) {
B
Ben Widawsky 已提交
1659
			unsigned long va =
1660
				(pde * PAGE_SIZE * GEN6_PTES) +
B
Ben Widawsky 已提交
1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678
				(pte * PAGE_SIZE);
			int i;
			bool found = false;
			for (i = 0; i < 4; i++)
				if (pt_vaddr[pte + i] != scratch_pte)
					found = true;
			if (!found)
				continue;

			seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
			for (i = 0; i < 4; i++) {
				if (pt_vaddr[pte + i] != scratch_pte)
					seq_printf(m, " %08x", pt_vaddr[pte + i]);
				else
					seq_puts(m, "  SCRATCH ");
			}
			seq_puts(m, "\n");
		}
1679
		kunmap_px(ppgtt, pt_vaddr);
B
Ben Widawsky 已提交
1680 1681 1682
	}
}

1683
/* Write pde (index) from the page directory @pd to the page table @pt */
1684 1685
static void gen6_write_pde(struct i915_page_directory *pd,
			    const int pde, struct i915_page_table *pt)
B
Ben Widawsky 已提交
1686
{
1687 1688 1689 1690
	/* Caller needs to make sure the write completes if necessary */
	struct i915_hw_ppgtt *ppgtt =
		container_of(pd, struct i915_hw_ppgtt, pd);
	u32 pd_entry;
B
Ben Widawsky 已提交
1691

1692
	pd_entry = GEN6_PDE_ADDR_ENCODE(px_dma(pt));
1693
	pd_entry |= GEN6_PDE_VALID;
B
Ben Widawsky 已提交
1694

1695 1696
	writel(pd_entry, ppgtt->pd_addr + pde);
}
B
Ben Widawsky 已提交
1697

1698 1699 1700
/* Write all the page tables found in the ppgtt structure to incrementing page
 * directories. */
static void gen6_write_page_range(struct drm_i915_private *dev_priv,
1701
				  struct i915_page_directory *pd,
1702 1703
				  uint32_t start, uint32_t length)
{
1704
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
1705
	struct i915_page_table *pt;
1706
	uint32_t pde;
1707

1708
	gen6_for_each_pde(pt, pd, start, length, pde)
1709 1710 1711 1712
		gen6_write_pde(pd, pde, pt);

	/* Make sure write is complete before other code can use this page
	 * table. Also require for WC mapped PTEs */
1713
	readl(ggtt->gsm);
B
Ben Widawsky 已提交
1714 1715
}

1716
static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
B
Ben Widawsky 已提交
1717
{
1718
	BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
1719

1720
	return (ppgtt->pd.base.ggtt_offset / 64) << 16;
1721 1722
}

1723
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
1724
			 struct drm_i915_gem_request *req)
1725
{
1726
	struct intel_ring *ring = req->ring;
1727
	struct intel_engine_cs *engine = req->engine;
1728 1729 1730
	int ret;

	/* NB: TLBs must be flushed and invalidated before a switch */
1731
	ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
1732 1733 1734
	if (ret)
		return ret;

1735
	ret = intel_ring_begin(req, 6);
1736 1737 1738
	if (ret)
		return ret;

1739 1740 1741 1742 1743 1744 1745
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
	intel_ring_emit(ring, PP_DIR_DCLV_2G);
	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
	intel_ring_emit(ring, get_pd_offset(ppgtt));
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_advance(ring);
1746 1747 1748 1749

	return 0;
}

1750
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
1751
			  struct drm_i915_gem_request *req)
1752
{
1753
	struct intel_ring *ring = req->ring;
1754
	struct intel_engine_cs *engine = req->engine;
1755 1756 1757
	int ret;

	/* NB: TLBs must be flushed and invalidated before a switch */
1758
	ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
1759 1760 1761
	if (ret)
		return ret;

1762
	ret = intel_ring_begin(req, 6);
1763 1764 1765
	if (ret)
		return ret;

1766 1767 1768 1769 1770 1771 1772
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
	intel_ring_emit(ring, PP_DIR_DCLV_2G);
	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
	intel_ring_emit(ring, get_pd_offset(ppgtt));
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_advance(ring);
1773

1774
	/* XXX: RCS is the only one to auto invalidate the TLBs? */
1775
	if (engine->id != RCS) {
1776
		ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
1777 1778 1779 1780
		if (ret)
			return ret;
	}

1781 1782 1783
	return 0;
}

1784
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
1785
			  struct drm_i915_gem_request *req)
1786
{
1787
	struct intel_engine_cs *engine = req->engine;
1788
	struct drm_i915_private *dev_priv = req->i915;
1789

1790 1791
	I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
	I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
1792 1793 1794
	return 0;
}

1795
static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
1796
{
1797
	struct intel_engine_cs *engine;
1798
	enum intel_engine_id id;
B
Ben Widawsky 已提交
1799

1800
	for_each_engine(engine, dev_priv, id) {
1801 1802
		u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
				 GEN8_GFX_PPGTT_48B : 0;
1803
		I915_WRITE(RING_MODE_GEN7(engine),
1804
			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
1805 1806
	}
}
B
Ben Widawsky 已提交
1807

1808
static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
B
Ben Widawsky 已提交
1809
{
1810
	struct intel_engine_cs *engine;
1811
	uint32_t ecochk, ecobits;
1812
	enum intel_engine_id id;
B
Ben Widawsky 已提交
1813

1814 1815
	ecobits = I915_READ(GAC_ECO_BITS);
	I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
1816

1817
	ecochk = I915_READ(GAM_ECOCHK);
1818
	if (IS_HASWELL(dev_priv)) {
1819 1820 1821 1822 1823 1824
		ecochk |= ECOCHK_PPGTT_WB_HSW;
	} else {
		ecochk |= ECOCHK_PPGTT_LLC_IVB;
		ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
	}
	I915_WRITE(GAM_ECOCHK, ecochk);
1825

1826
	for_each_engine(engine, dev_priv, id) {
B
Ben Widawsky 已提交
1827
		/* GFX_MODE is per-ring on gen7+ */
1828
		I915_WRITE(RING_MODE_GEN7(engine),
1829
			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
B
Ben Widawsky 已提交
1830
	}
1831
}
B
Ben Widawsky 已提交
1832

1833
static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
1834 1835
{
	uint32_t ecochk, gab_ctl, ecobits;
1836

1837 1838 1839
	ecobits = I915_READ(GAC_ECO_BITS);
	I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
		   ECOBITS_PPGTT_CACHE64B);
B
Ben Widawsky 已提交
1840

1841 1842 1843 1844 1845 1846 1847
	gab_ctl = I915_READ(GAB_CTL);
	I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);

	ecochk = I915_READ(GAM_ECOCHK);
	I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);

	I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
B
Ben Widawsky 已提交
1848 1849
}

1850
/* PPGTT support for Sandybdrige/Gen6 and later */
1851
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
1852
				   uint64_t start,
1853
				   uint64_t length)
1854
{
1855
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1856
	gen6_pte_t *pt_vaddr, scratch_pte;
1857 1858
	unsigned first_entry = start >> PAGE_SHIFT;
	unsigned num_entries = length >> PAGE_SHIFT;
1859 1860
	unsigned act_pt = first_entry / GEN6_PTES;
	unsigned first_pte = first_entry % GEN6_PTES;
1861
	unsigned last_pte, i;
1862

1863
	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
1864
				     I915_CACHE_LLC, 0);
1865

1866 1867
	while (num_entries) {
		last_pte = first_pte + num_entries;
1868 1869
		if (last_pte > GEN6_PTES)
			last_pte = GEN6_PTES;
1870

1871
		pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
1872

1873 1874
		for (i = first_pte; i < last_pte; i++)
			pt_vaddr[i] = scratch_pte;
1875

1876
		kunmap_px(ppgtt, pt_vaddr);
1877

1878 1879
		num_entries -= last_pte - first_pte;
		first_pte = 0;
1880
		act_pt++;
1881
	}
1882 1883
}

1884
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
D
Daniel Vetter 已提交
1885
				      struct sg_table *pages,
1886
				      uint64_t start,
1887
				      enum i915_cache_level cache_level, u32 flags)
D
Daniel Vetter 已提交
1888
{
1889
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1890
	unsigned first_entry = start >> PAGE_SHIFT;
1891 1892
	unsigned act_pt = first_entry / GEN6_PTES;
	unsigned act_pte = first_entry % GEN6_PTES;
1893 1894 1895
	gen6_pte_t *pt_vaddr = NULL;
	struct sgt_iter sgt_iter;
	dma_addr_t addr;
1896

1897
	for_each_sgt_dma(addr, sgt_iter, pages) {
1898
		if (pt_vaddr == NULL)
1899
			pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
1900

1901
		pt_vaddr[act_pte] =
1902
			vm->pte_encode(addr, cache_level, flags);
1903

1904
		if (++act_pte == GEN6_PTES) {
1905
			kunmap_px(ppgtt, pt_vaddr);
1906
			pt_vaddr = NULL;
1907
			act_pt++;
1908
			act_pte = 0;
D
Daniel Vetter 已提交
1909 1910
		}
	}
1911

1912
	if (pt_vaddr)
1913
		kunmap_px(ppgtt, pt_vaddr);
D
Daniel Vetter 已提交
1914 1915
}

1916
static int gen6_alloc_va_range(struct i915_address_space *vm,
1917
			       uint64_t start_in, uint64_t length_in)
1918
{
1919
	DECLARE_BITMAP(new_page_tables, I915_PDES);
1920
	struct drm_i915_private *dev_priv = vm->i915;
1921
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
1922
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1923
	struct i915_page_table *pt;
1924
	uint32_t start, length, start_save, length_save;
1925
	uint32_t pde;
1926 1927
	int ret;

1928 1929
	start = start_save = start_in;
	length = length_save = length_in;
1930 1931 1932 1933 1934 1935 1936 1937

	bitmap_zero(new_page_tables, I915_PDES);

	/* The allocation is done in two stages so that we can bail out with
	 * minimal amount of pain. The first stage finds new page tables that
	 * need allocation. The second stage marks use ptes within the page
	 * tables.
	 */
1938
	gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
1939
		if (pt != vm->scratch_pt) {
1940 1941 1942 1943 1944 1945 1946
			WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
			continue;
		}

		/* We've already allocated a page table */
		WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));

1947
		pt = alloc_pt(dev_priv);
1948 1949 1950 1951 1952 1953 1954 1955
		if (IS_ERR(pt)) {
			ret = PTR_ERR(pt);
			goto unwind_out;
		}

		gen6_initialize_pt(vm, pt);

		ppgtt->pd.page_table[pde] = pt;
1956
		__set_bit(pde, new_page_tables);
1957
		trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT);
1958 1959 1960 1961
	}

	start = start_save;
	length = length_save;
1962

1963
	gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
1964 1965 1966 1967 1968 1969
		DECLARE_BITMAP(tmp_bitmap, GEN6_PTES);

		bitmap_zero(tmp_bitmap, GEN6_PTES);
		bitmap_set(tmp_bitmap, gen6_pte_index(start),
			   gen6_pte_count(start, length));

1970
		if (__test_and_clear_bit(pde, new_page_tables))
1971 1972
			gen6_write_pde(&ppgtt->pd, pde, pt);

1973 1974 1975 1976
		trace_i915_page_table_entry_map(vm, pde, pt,
					 gen6_pte_index(start),
					 gen6_pte_count(start, length),
					 GEN6_PTES);
1977
		bitmap_or(pt->used_ptes, tmp_bitmap, pt->used_ptes,
1978 1979 1980
				GEN6_PTES);
	}

1981 1982 1983 1984
	WARN_ON(!bitmap_empty(new_page_tables, I915_PDES));

	/* Make sure write is complete before other code can use this page
	 * table. Also require for WC mapped PTEs */
1985
	readl(ggtt->gsm);
1986

1987
	mark_tlbs_dirty(ppgtt);
1988
	return 0;
1989 1990 1991

unwind_out:
	for_each_set_bit(pde, new_page_tables, I915_PDES) {
1992
		struct i915_page_table *pt = ppgtt->pd.page_table[pde];
1993

1994
		ppgtt->pd.page_table[pde] = vm->scratch_pt;
1995
		free_pt(dev_priv, pt);
1996 1997 1998 1999
	}

	mark_tlbs_dirty(ppgtt);
	return ret;
2000 2001
}

2002 2003
static int gen6_init_scratch(struct i915_address_space *vm)
{
2004
	struct drm_i915_private *dev_priv = vm->i915;
2005
	int ret;
2006

2007
	ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA);
2008 2009
	if (ret)
		return ret;
2010

2011
	vm->scratch_pt = alloc_pt(dev_priv);
2012
	if (IS_ERR(vm->scratch_pt)) {
2013
		cleanup_scratch_page(dev_priv, &vm->scratch_page);
2014 2015 2016 2017 2018 2019 2020 2021 2022 2023
		return PTR_ERR(vm->scratch_pt);
	}

	gen6_initialize_pt(vm, vm->scratch_pt);

	return 0;
}

static void gen6_free_scratch(struct i915_address_space *vm)
{
2024
	struct drm_i915_private *dev_priv = vm->i915;
2025

2026 2027
	free_pt(dev_priv, vm->scratch_pt);
	cleanup_scratch_page(dev_priv, &vm->scratch_page);
2028 2029
}

2030
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
2031
{
2032
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
2033
	struct i915_page_directory *pd = &ppgtt->pd;
2034
	struct drm_i915_private *dev_priv = vm->i915;
2035 2036
	struct i915_page_table *pt;
	uint32_t pde;
2037

2038 2039
	drm_mm_remove_node(&ppgtt->node);

2040
	gen6_for_all_pdes(pt, pd, pde)
2041
		if (pt != vm->scratch_pt)
2042
			free_pt(dev_priv, pt);
2043

2044
	gen6_free_scratch(vm);
2045 2046
}

2047
static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
2048
{
2049
	struct i915_address_space *vm = &ppgtt->base;
2050
	struct drm_i915_private *dev_priv = ppgtt->base.i915;
2051
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2052
	int ret;
2053

B
Ben Widawsky 已提交
2054 2055 2056 2057
	/* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
	 * allocator works in address space sizes, so it's multiplied by page
	 * size. We allocate at the top of the GTT to avoid fragmentation.
	 */
2058
	BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
2059

2060 2061 2062
	ret = gen6_init_scratch(vm);
	if (ret)
		return ret;
2063

2064 2065 2066 2067 2068
	ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
				  GEN6_PD_SIZE, GEN6_PD_ALIGN,
				  I915_COLOR_UNEVICTABLE,
				  0, ggtt->base.total,
				  PIN_HIGH);
2069
	if (ret)
2070 2071
		goto err_out;

2072
	if (ppgtt->node.start < ggtt->mappable_end)
B
Ben Widawsky 已提交
2073
		DRM_DEBUG("Forced to use aperture for PDEs\n");
2074

2075
	return 0;
2076 2077

err_out:
2078
	gen6_free_scratch(vm);
2079
	return ret;
2080 2081 2082 2083
}

static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
{
2084
	return gen6_ppgtt_allocate_page_directories(ppgtt);
2085
}
2086

2087 2088 2089
static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
				  uint64_t start, uint64_t length)
{
2090
	struct i915_page_table *unused;
2091
	uint32_t pde;
2092

2093
	gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
2094
		ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
2095 2096
}

2097
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
2098
{
2099
	struct drm_i915_private *dev_priv = ppgtt->base.i915;
2100
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2101 2102
	int ret;

2103
	ppgtt->base.pte_encode = ggtt->base.pte_encode;
2104
	if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
2105
		ppgtt->switch_mm = gen6_mm_switch;
2106
	else if (IS_HASWELL(dev_priv))
2107
		ppgtt->switch_mm = hsw_mm_switch;
2108
	else if (IS_GEN7(dev_priv))
2109
		ppgtt->switch_mm = gen7_mm_switch;
2110
	else
2111 2112 2113 2114 2115 2116
		BUG();

	ret = gen6_ppgtt_alloc(ppgtt);
	if (ret)
		return ret;

2117
	ppgtt->base.allocate_va_range = gen6_alloc_va_range;
2118 2119
	ppgtt->base.clear_range = gen6_ppgtt_clear_range;
	ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
2120 2121
	ppgtt->base.unbind_vma = ppgtt_unbind_vma;
	ppgtt->base.bind_vma = ppgtt_bind_vma;
2122 2123
	ppgtt->base.cleanup = gen6_ppgtt_cleanup;
	ppgtt->base.start = 0;
2124
	ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
B
Ben Widawsky 已提交
2125
	ppgtt->debug_dump = gen6_dump_ppgtt;
2126

2127
	ppgtt->pd.base.ggtt_offset =
2128
		ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
2129

2130
	ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
2131
		ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
2132

2133
	gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
2134

2135 2136
	gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total);

2137
	DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
2138 2139
			 ppgtt->node.size >> 20,
			 ppgtt->node.start / PAGE_SIZE);
2140

2141
	DRM_DEBUG("Adding PPGTT at offset %x\n",
2142
		  ppgtt->pd.base.ggtt_offset << 10);
2143

2144
	return 0;
2145 2146
}

2147 2148
static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
			   struct drm_i915_private *dev_priv)
2149
{
2150
	ppgtt->base.i915 = dev_priv;
2151

2152
	if (INTEL_INFO(dev_priv)->gen < 8)
2153
		return gen6_ppgtt_init(ppgtt);
B
Ben Widawsky 已提交
2154
	else
2155
		return gen8_ppgtt_init(ppgtt);
2156
}
2157

2158
static void i915_address_space_init(struct i915_address_space *vm,
C
Chris Wilson 已提交
2159 2160
				    struct drm_i915_private *dev_priv,
				    const char *name)
2161
{
C
Chris Wilson 已提交
2162
	i915_gem_timeline_init(dev_priv, &vm->timeline, name);
2163

2164
	drm_mm_init(&vm->mm, vm->start, vm->total);
2165 2166
	vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;

2167 2168
	INIT_LIST_HEAD(&vm->active_list);
	INIT_LIST_HEAD(&vm->inactive_list);
2169
	INIT_LIST_HEAD(&vm->unbound_list);
2170

2171 2172 2173
	list_add_tail(&vm->global_link, &dev_priv->vm_list);
}

2174 2175 2176 2177 2178 2179 2180
static void i915_address_space_fini(struct i915_address_space *vm)
{
	i915_gem_timeline_fini(&vm->timeline);
	drm_mm_takedown(&vm->mm);
	list_del(&vm->global_link);
}

2181
static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
2182 2183 2184 2185 2186
{
	/* This function is for gtt related workarounds. This function is
	 * called on driver load and after a GPU reset, so you can place
	 * workarounds here even if they get overwritten by GPU reset.
	 */
2187
	/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk */
2188
	if (IS_BROADWELL(dev_priv))
2189
		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
2190
	else if (IS_CHERRYVIEW(dev_priv))
2191
		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
2192
	else if (IS_GEN9_BC(dev_priv))
2193
		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
2194
	else if (IS_GEN9_LP(dev_priv))
2195 2196 2197
		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
}

2198 2199
static int i915_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
			   struct drm_i915_private *dev_priv,
C
Chris Wilson 已提交
2200 2201
			   struct drm_i915_file_private *file_priv,
			   const char *name)
2202
{
2203
	int ret;
B
Ben Widawsky 已提交
2204

2205
	ret = __hw_ppgtt_init(ppgtt, dev_priv);
2206
	if (ret == 0) {
B
Ben Widawsky 已提交
2207
		kref_init(&ppgtt->ref);
C
Chris Wilson 已提交
2208
		i915_address_space_init(&ppgtt->base, dev_priv, name);
2209
		ppgtt->base.file = file_priv;
2210
	}
2211 2212 2213 2214

	return ret;
}

2215
int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
2216
{
2217
	gtt_write_workarounds(dev_priv);
2218

2219 2220 2221 2222 2223 2224
	/* In the case of execlists, PPGTT is enabled by the context descriptor
	 * and the PDPs are contained within the context itself.  We don't
	 * need to do anything here. */
	if (i915.enable_execlists)
		return 0;

2225
	if (!USES_PPGTT(dev_priv))
2226 2227
		return 0;

2228
	if (IS_GEN6(dev_priv))
2229
		gen6_ppgtt_enable(dev_priv);
2230
	else if (IS_GEN7(dev_priv))
2231 2232 2233
		gen7_ppgtt_enable(dev_priv);
	else if (INTEL_GEN(dev_priv) >= 8)
		gen8_ppgtt_enable(dev_priv);
2234
	else
2235
		MISSING_CASE(INTEL_GEN(dev_priv));
2236

2237 2238
	return 0;
}
2239

2240
struct i915_hw_ppgtt *
2241
i915_ppgtt_create(struct drm_i915_private *dev_priv,
C
Chris Wilson 已提交
2242 2243
		  struct drm_i915_file_private *fpriv,
		  const char *name)
2244 2245 2246 2247 2248 2249 2250 2251
{
	struct i915_hw_ppgtt *ppgtt;
	int ret;

	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
	if (!ppgtt)
		return ERR_PTR(-ENOMEM);

C
Chris Wilson 已提交
2252
	ret = i915_ppgtt_init(ppgtt, dev_priv, fpriv, name);
2253 2254 2255 2256 2257
	if (ret) {
		kfree(ppgtt);
		return ERR_PTR(ret);
	}

2258 2259
	trace_i915_ppgtt_create(&ppgtt->base);

2260 2261 2262
	return ppgtt;
}

2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283
void i915_ppgtt_close(struct i915_address_space *vm)
{
	struct list_head *phases[] = {
		&vm->active_list,
		&vm->inactive_list,
		&vm->unbound_list,
		NULL,
	}, **phase;

	GEM_BUG_ON(vm->closed);
	vm->closed = true;

	for (phase = phases; *phase; phase++) {
		struct i915_vma *vma, *vn;

		list_for_each_entry_safe(vma, vn, *phase, vm_link)
			if (!i915_vma_is_closed(vma))
				i915_vma_close(vma);
	}
}

2284
void i915_ppgtt_release(struct kref *kref)
2285 2286 2287 2288
{
	struct i915_hw_ppgtt *ppgtt =
		container_of(kref, struct i915_hw_ppgtt, ref);

2289 2290
	trace_i915_ppgtt_release(&ppgtt->base);

2291
	/* vmas should already be unbound and destroyed */
2292 2293
	WARN_ON(!list_empty(&ppgtt->base.active_list));
	WARN_ON(!list_empty(&ppgtt->base.inactive_list));
2294
	WARN_ON(!list_empty(&ppgtt->base.unbound_list));
2295

2296
	i915_address_space_fini(&ppgtt->base);
2297

2298 2299 2300
	ppgtt->base.cleanup(&ppgtt->base);
	kfree(ppgtt);
}
2301

2302 2303 2304
/* Certain Gen5 chipsets require require idling the GPU before
 * unmapping anything from the GTT when VT-d is enabled.
 */
2305
static bool needs_idle_maps(struct drm_i915_private *dev_priv)
2306 2307 2308 2309 2310
{
#ifdef CONFIG_INTEL_IOMMU
	/* Query intel_iommu to see if we need the workaround. Presumably that
	 * was loaded first.
	 */
2311
	if (IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_iommu_gfx_mapped)
2312 2313 2314 2315 2316
		return true;
#endif
	return false;
}

2317
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
2318
{
2319
	struct intel_engine_cs *engine;
2320
	enum intel_engine_id id;
2321

2322
	if (INTEL_INFO(dev_priv)->gen < 6)
2323 2324
		return;

2325
	for_each_engine(engine, dev_priv, id) {
2326
		u32 fault_reg;
2327
		fault_reg = I915_READ(RING_FAULT_REG(engine));
2328 2329
		if (fault_reg & RING_FAULT_VALID) {
			DRM_DEBUG_DRIVER("Unexpected fault\n"
2330
					 "\tAddr: 0x%08lx\n"
2331 2332 2333 2334 2335 2336 2337
					 "\tAddress space: %s\n"
					 "\tSource ID: %d\n"
					 "\tType: %d\n",
					 fault_reg & PAGE_MASK,
					 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
					 RING_FAULT_SRCID(fault_reg),
					 RING_FAULT_FAULT_TYPE(fault_reg));
2338
			I915_WRITE(RING_FAULT_REG(engine),
2339 2340 2341
				   fault_reg & ~RING_FAULT_VALID);
		}
	}
2342 2343 2344 2345

	/* Engine specific init may not have been done till this point. */
	if (dev_priv->engine[RCS])
		POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
2346 2347
}

2348
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
2349
{
2350
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2351 2352 2353 2354

	/* Don't bother messing with faults pre GEN6 as we have little
	 * documentation supporting that it's a good idea.
	 */
2355
	if (INTEL_GEN(dev_priv) < 6)
2356 2357
		return;

2358
	i915_check_and_clear_faults(dev_priv);
2359

2360
	ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total);
2361

2362
	i915_ggtt_invalidate(dev_priv);
2363 2364
}

2365 2366
int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
			       struct sg_table *pages)
2367
{
2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385
	do {
		if (dma_map_sg(&obj->base.dev->pdev->dev,
			       pages->sgl, pages->nents,
			       PCI_DMA_BIDIRECTIONAL))
			return 0;

		/* If the DMA remap fails, one cause can be that we have
		 * too many objects pinned in a small remapping table,
		 * such as swiotlb. Incrementally purge all other objects and
		 * try again - if there are no more pages to remove from
		 * the DMA remapper, i915_gem_shrink will return 0.
		 */
		GEM_BUG_ON(obj->mm.pages == pages);
	} while (i915_gem_shrink(to_i915(obj->base.dev),
				 obj->base.size >> PAGE_SHIFT,
				 I915_SHRINK_BOUND |
				 I915_SHRINK_UNBOUND |
				 I915_SHRINK_ACTIVE));
2386

2387
	return -ENOSPC;
2388 2389
}

2390
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
B
Ben Widawsky 已提交
2391 2392 2393 2394
{
	writeq(pte, addr);
}

2395 2396 2397 2398 2399 2400
static void gen8_ggtt_insert_page(struct i915_address_space *vm,
				  dma_addr_t addr,
				  uint64_t offset,
				  enum i915_cache_level level,
				  u32 unused)
{
2401
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2402
	gen8_pte_t __iomem *pte =
2403
		(gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
2404

2405
	gen8_set_pte(pte, gen8_pte_encode(addr, level));
2406

2407
	ggtt->invalidate(vm->i915);
2408 2409
}

B
Ben Widawsky 已提交
2410 2411
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
				     struct sg_table *st,
2412
				     uint64_t start,
2413
				     enum i915_cache_level level, u32 unused)
B
Ben Widawsky 已提交
2414
{
2415
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2416 2417 2418 2419 2420
	struct sgt_iter sgt_iter;
	gen8_pte_t __iomem *gtt_entries;
	gen8_pte_t gtt_entry;
	dma_addr_t addr;
	int i = 0;
2421

2422 2423 2424
	gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);

	for_each_sgt_dma(addr, sgt_iter, st) {
2425
		gtt_entry = gen8_pte_encode(addr, level);
2426
		gen8_set_pte(&gtt_entries[i++], gtt_entry);
B
Ben Widawsky 已提交
2427 2428 2429 2430 2431 2432 2433 2434 2435 2436
	}

	/*
	 * XXX: This serves as a posting read to make sure that the PTE has
	 * actually been updated. There is some concern that even though
	 * registers and PTEs are within the same BAR that they are potentially
	 * of NUMA access patterns. Therefore, even with the way we assume
	 * hardware should work, we must keep this posting read for paranoia.
	 */
	if (i != 0)
2437
		WARN_ON(readq(&gtt_entries[i-1]) != gtt_entry);
B
Ben Widawsky 已提交
2438 2439 2440 2441 2442

	/* This next bit makes the above posting read even more important. We
	 * want to flush the TLBs only after we're certain all the PTE updates
	 * have finished.
	 */
2443
	ggtt->invalidate(vm->i915);
B
Ben Widawsky 已提交
2444 2445
}

2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471
struct insert_entries {
	struct i915_address_space *vm;
	struct sg_table *st;
	uint64_t start;
	enum i915_cache_level level;
	u32 flags;
};

static int gen8_ggtt_insert_entries__cb(void *_arg)
{
	struct insert_entries *arg = _arg;
	gen8_ggtt_insert_entries(arg->vm, arg->st,
				 arg->start, arg->level, arg->flags);
	return 0;
}

static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm,
					  struct sg_table *st,
					  uint64_t start,
					  enum i915_cache_level level,
					  u32 flags)
{
	struct insert_entries arg = { vm, st, start, level, flags };
	stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL);
}

2472 2473 2474 2475 2476 2477
static void gen6_ggtt_insert_page(struct i915_address_space *vm,
				  dma_addr_t addr,
				  uint64_t offset,
				  enum i915_cache_level level,
				  u32 flags)
{
2478
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2479
	gen6_pte_t __iomem *pte =
2480
		(gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
2481

2482
	iowrite32(vm->pte_encode(addr, level, flags), pte);
2483

2484
	ggtt->invalidate(vm->i915);
2485 2486
}

2487 2488 2489 2490 2491 2492
/*
 * Binds an object into the global gtt with the specified cache level. The object
 * will be accessible to the GPU via commands whose operands reference offsets
 * within the global GTT as well as accessible by the GPU through the GMADR
 * mapped BAR (dev_priv->mm.gtt->gtt).
 */
2493
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2494
				     struct sg_table *st,
2495
				     uint64_t start,
2496
				     enum i915_cache_level level, u32 flags)
2497
{
2498
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2499 2500 2501 2502 2503
	struct sgt_iter sgt_iter;
	gen6_pte_t __iomem *gtt_entries;
	gen6_pte_t gtt_entry;
	dma_addr_t addr;
	int i = 0;
2504

2505 2506 2507
	gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);

	for_each_sgt_dma(addr, sgt_iter, st) {
2508
		gtt_entry = vm->pte_encode(addr, level, flags);
2509
		iowrite32(gtt_entry, &gtt_entries[i++]);
2510 2511 2512 2513 2514 2515 2516 2517
	}

	/* XXX: This serves as a posting read to make sure that the PTE has
	 * actually been updated. There is some concern that even though
	 * registers and PTEs are within the same BAR that they are potentially
	 * of NUMA access patterns. Therefore, even with the way we assume
	 * hardware should work, we must keep this posting read for paranoia.
	 */
2518 2519
	if (i != 0)
		WARN_ON(readl(&gtt_entries[i-1]) != gtt_entry);
2520 2521 2522 2523 2524

	/* This next bit makes the above posting read even more important. We
	 * want to flush the TLBs only after we're certain all the PTE updates
	 * have finished.
	 */
2525
	ggtt->invalidate(vm->i915);
2526 2527
}

2528
static void nop_clear_range(struct i915_address_space *vm,
2529
			    uint64_t start, uint64_t length)
2530 2531 2532
{
}

B
Ben Widawsky 已提交
2533
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2534
				  uint64_t start, uint64_t length)
B
Ben Widawsky 已提交
2535
{
2536
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2537 2538
	unsigned first_entry = start >> PAGE_SHIFT;
	unsigned num_entries = length >> PAGE_SHIFT;
2539
	gen8_pte_t scratch_pte, __iomem *gtt_base =
2540 2541
		(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
B
Ben Widawsky 已提交
2542 2543 2544 2545 2546 2547 2548
	int i;

	if (WARN(num_entries > max_entries,
		 "First entry = %d; Num entries = %d (max=%d)\n",
		 first_entry, num_entries, max_entries))
		num_entries = max_entries;

2549
	scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
2550
				      I915_CACHE_LLC);
B
Ben Widawsky 已提交
2551 2552 2553 2554 2555
	for (i = 0; i < num_entries; i++)
		gen8_set_pte(&gtt_base[i], scratch_pte);
	readl(gtt_base);
}

2556
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2557
				  uint64_t start,
2558
				  uint64_t length)
2559
{
2560
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2561 2562
	unsigned first_entry = start >> PAGE_SHIFT;
	unsigned num_entries = length >> PAGE_SHIFT;
2563
	gen6_pte_t scratch_pte, __iomem *gtt_base =
2564 2565
		(gen6_pte_t __iomem *)ggtt->gsm + first_entry;
	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
2566 2567 2568 2569 2570 2571 2572
	int i;

	if (WARN(num_entries > max_entries,
		 "First entry = %d; Num entries = %d (max=%d)\n",
		 first_entry, num_entries, max_entries))
		num_entries = max_entries;

2573
	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
2574
				     I915_CACHE_LLC, 0);
2575

2576 2577 2578 2579 2580
	for (i = 0; i < num_entries; i++)
		iowrite32(scratch_pte, &gtt_base[i]);
	readl(gtt_base);
}

2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592
static void i915_ggtt_insert_page(struct i915_address_space *vm,
				  dma_addr_t addr,
				  uint64_t offset,
				  enum i915_cache_level cache_level,
				  u32 unused)
{
	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;

	intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
}

2593 2594 2595 2596
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
				     struct sg_table *pages,
				     uint64_t start,
				     enum i915_cache_level cache_level, u32 unused)
2597 2598 2599 2600
{
	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;

2601
	intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
2602

2603 2604
}

2605
static void i915_ggtt_clear_range(struct i915_address_space *vm,
2606
				  uint64_t start,
2607
				  uint64_t length)
2608
{
2609
	intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
2610 2611
}

2612 2613 2614
static int ggtt_bind_vma(struct i915_vma *vma,
			 enum i915_cache_level cache_level,
			 u32 flags)
2615
{
2616
	struct drm_i915_private *i915 = vma->vm->i915;
2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628
	struct drm_i915_gem_object *obj = vma->obj;
	u32 pte_flags = 0;
	int ret;

	ret = i915_get_ggtt_vma_pages(vma);
	if (ret)
		return ret;

	/* Currently applicable only to VLV */
	if (obj->gt_ro)
		pte_flags |= PTE_READ_ONLY;

2629
	intel_runtime_pm_get(i915);
2630
	vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
2631
				cache_level, pte_flags);
2632
	intel_runtime_pm_put(i915);
2633 2634 2635 2636 2637 2638

	/*
	 * Without aliasing PPGTT there's no difference between
	 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
	 * upgrade to both bound if we bind either to avoid double-binding.
	 */
2639
	vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
2640 2641 2642 2643 2644 2645 2646

	return 0;
}

static int aliasing_gtt_bind_vma(struct i915_vma *vma,
				 enum i915_cache_level cache_level,
				 u32 flags)
2647
{
2648
	struct drm_i915_private *i915 = vma->vm->i915;
2649
	u32 pte_flags;
2650 2651 2652 2653 2654
	int ret;

	ret = i915_get_ggtt_vma_pages(vma);
	if (ret)
		return ret;
2655

2656
	/* Currently applicable only to VLV */
2657 2658
	pte_flags = 0;
	if (vma->obj->gt_ro)
2659
		pte_flags |= PTE_READ_ONLY;
2660

2661

2662
	if (flags & I915_VMA_GLOBAL_BIND) {
2663
		intel_runtime_pm_get(i915);
2664
		vma->vm->insert_entries(vma->vm,
2665
					vma->pages, vma->node.start,
2666
					cache_level, pte_flags);
2667
		intel_runtime_pm_put(i915);
2668
	}
2669

2670
	if (flags & I915_VMA_LOCAL_BIND) {
2671
		struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
2672
		appgtt->base.insert_entries(&appgtt->base,
2673
					    vma->pages, vma->node.start,
2674
					    cache_level, pte_flags);
2675
	}
2676 2677

	return 0;
2678 2679
}

2680
static void ggtt_unbind_vma(struct i915_vma *vma)
2681
{
2682
	struct drm_i915_private *i915 = vma->vm->i915;
2683
	struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
2684
	const u64 size = min(vma->size, vma->node.size);
2685

2686 2687
	if (vma->flags & I915_VMA_GLOBAL_BIND) {
		intel_runtime_pm_get(i915);
2688
		vma->vm->clear_range(vma->vm,
2689
				     vma->node.start, size);
2690 2691
		intel_runtime_pm_put(i915);
	}
2692

2693
	if (vma->flags & I915_VMA_LOCAL_BIND && appgtt)
2694
		appgtt->base.clear_range(&appgtt->base,
2695
					 vma->node.start, size);
2696 2697
}

2698 2699
void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
			       struct sg_table *pages)
2700
{
D
David Weinehall 已提交
2701 2702
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
	struct device *kdev = &dev_priv->drm.pdev->dev;
2703
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
B
Ben Widawsky 已提交
2704

2705
	if (unlikely(ggtt->do_idle_maps)) {
2706
		if (i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED)) {
2707 2708 2709 2710 2711
			DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
			/* Wait a bit, in hopes it avoids the hang */
			udelay(10);
		}
	}
B
Ben Widawsky 已提交
2712

2713
	dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
2714
}
2715

C
Chris Wilson 已提交
2716
static void i915_gtt_color_adjust(const struct drm_mm_node *node,
2717
				  unsigned long color,
2718 2719
				  u64 *start,
				  u64 *end)
2720
{
2721
	if (node->allocated && node->color != color)
2722
		*start += I915_GTT_PAGE_SIZE;
2723

2724 2725 2726 2727 2728
	/* Also leave a space between the unallocated reserved node after the
	 * GTT and any objects within the GTT, i.e. we use the color adjustment
	 * to insert a guard page to prevent prefetches crossing over the
	 * GTT boundary.
	 */
2729
	node = list_next_entry(node, node_list);
2730
	if (node->color != color)
2731
		*end -= I915_GTT_PAGE_SIZE;
2732
}
B
Ben Widawsky 已提交
2733

2734
int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
2735
{
2736 2737 2738 2739 2740 2741 2742 2743 2744
	/* Let GEM Manage all of the aperture.
	 *
	 * However, leave one page at the end still bound to the scratch page.
	 * There are a number of places where the hardware apparently prefetches
	 * past the end of the object, and we've seen multiple hangs with the
	 * GPU head pointer stuck in a batchbuffer bound at the last page of the
	 * aperture.  One page should be enough to keep any prefetching inside
	 * of the aperture.
	 */
2745
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2746
	unsigned long hole_start, hole_end;
2747
	struct i915_hw_ppgtt *ppgtt;
2748
	struct drm_mm_node *entry;
2749
	int ret;
2750

2751 2752 2753
	ret = intel_vgt_balloon(dev_priv);
	if (ret)
		return ret;
2754

2755 2756 2757
	/* Reserve a mappable slot for our lockless error capture */
	ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
						  &ggtt->error_capture,
2758
						  PAGE_SIZE, 0,
2759
						  I915_COLOR_UNEVICTABLE,
2760 2761 2762 2763 2764
						  0, ggtt->mappable_end,
						  0, 0);
	if (ret)
		return ret;

2765
	/* Clear any non-preallocated blocks */
2766
	drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
2767 2768
		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
			      hole_start, hole_end);
2769
		ggtt->base.clear_range(&ggtt->base, hole_start,
2770
				       hole_end - hole_start);
2771 2772 2773
	}

	/* And finally clear the reserved guard page */
2774
	ggtt->base.clear_range(&ggtt->base,
2775
			       ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
2776

2777
	if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
2778
		ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
2779 2780 2781 2782
		if (!ppgtt) {
			ret = -ENOMEM;
			goto err;
		}
2783

2784
		ret = __hw_ppgtt_init(ppgtt, dev_priv);
2785 2786
		if (ret)
			goto err_ppgtt;
2787

2788
		if (ppgtt->base.allocate_va_range) {
2789 2790
			ret = ppgtt->base.allocate_va_range(&ppgtt->base, 0,
							    ppgtt->base.total);
2791 2792
			if (ret)
				goto err_ppgtt_cleanup;
2793
		}
2794

2795 2796
		ppgtt->base.clear_range(&ppgtt->base,
					ppgtt->base.start,
2797
					ppgtt->base.total);
2798

2799
		dev_priv->mm.aliasing_ppgtt = ppgtt;
2800 2801
		WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
		ggtt->base.bind_vma = aliasing_gtt_bind_vma;
2802 2803
	}

2804
	return 0;
2805 2806 2807 2808 2809 2810 2811 2812

err_ppgtt_cleanup:
	ppgtt->base.cleanup(&ppgtt->base);
err_ppgtt:
	kfree(ppgtt);
err:
	drm_mm_remove_node(&ggtt->error_capture);
	return ret;
2813 2814
}

2815 2816
/**
 * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
2817
 * @dev_priv: i915 device
2818
 */
2819
void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
2820
{
2821
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2822

2823 2824 2825
	if (dev_priv->mm.aliasing_ppgtt) {
		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
		ppgtt->base.cleanup(&ppgtt->base);
M
Matthew Auld 已提交
2826
		kfree(ppgtt);
2827 2828
	}

2829
	i915_gem_cleanup_stolen(&dev_priv->drm);
2830

2831 2832 2833
	if (drm_mm_node_allocated(&ggtt->error_capture))
		drm_mm_remove_node(&ggtt->error_capture);

2834
	if (drm_mm_initialized(&ggtt->base.mm)) {
2835
		intel_vgt_deballoon(dev_priv);
2836

2837 2838 2839
		mutex_lock(&dev_priv->drm.struct_mutex);
		i915_address_space_fini(&ggtt->base);
		mutex_unlock(&dev_priv->drm.struct_mutex);
2840 2841
	}

2842
	ggtt->base.cleanup(&ggtt->base);
2843 2844

	arch_phys_wc_del(ggtt->mtrr);
2845
	io_mapping_fini(&ggtt->mappable);
2846
}
2847

2848
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
2849 2850 2851 2852 2853 2854
{
	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
	snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
	return snb_gmch_ctl << 20;
}

2855
static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
2856 2857 2858 2859 2860
{
	bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
	bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
	if (bdw_gmch_ctl)
		bdw_gmch_ctl = 1 << bdw_gmch_ctl;
2861 2862 2863 2864 2865 2866 2867

#ifdef CONFIG_X86_32
	/* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
	if (bdw_gmch_ctl > 4)
		bdw_gmch_ctl = 4;
#endif

2868 2869 2870
	return bdw_gmch_ctl << 20;
}

2871
static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
2872 2873 2874 2875 2876 2877 2878 2879 2880 2881
{
	gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
	gmch_ctrl &= SNB_GMCH_GGMS_MASK;

	if (gmch_ctrl)
		return 1 << (20 + gmch_ctrl);

	return 0;
}

2882
static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
2883 2884 2885 2886 2887 2888
{
	snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
	snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
	return snb_gmch_ctl << 25; /* 32 MB units */
}

2889
static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
2890 2891 2892 2893 2894 2895
{
	bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
	bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
	return bdw_gmch_ctl << 25; /* 32 MB units */
}

2896 2897 2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913
static size_t chv_get_stolen_size(u16 gmch_ctrl)
{
	gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
	gmch_ctrl &= SNB_GMCH_GMS_MASK;

	/*
	 * 0x0  to 0x10: 32MB increments starting at 0MB
	 * 0x11 to 0x16: 4MB increments starting at 8MB
	 * 0x17 to 0x1d: 4MB increments start at 36MB
	 */
	if (gmch_ctrl < 0x11)
		return gmch_ctrl << 25;
	else if (gmch_ctrl < 0x17)
		return (gmch_ctrl - 0x11 + 2) << 22;
	else
		return (gmch_ctrl - 0x17 + 9) << 22;
}

2914 2915 2916 2917 2918 2919 2920 2921 2922 2923 2924 2925
static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
{
	gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
	gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;

	if (gen9_gmch_ctl < 0xf0)
		return gen9_gmch_ctl << 25; /* 32 MB units */
	else
		/* 4MB increments starting at 0xf0 for 4MB */
		return (gen9_gmch_ctl - 0xf0 + 1) << 22;
}

2926
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
B
Ben Widawsky 已提交
2927
{
2928 2929
	struct drm_i915_private *dev_priv = ggtt->base.i915;
	struct pci_dev *pdev = dev_priv->drm.pdev;
2930
	phys_addr_t phys_addr;
2931
	int ret;
B
Ben Widawsky 已提交
2932 2933

	/* For Modern GENs the PTEs and register space are split in the BAR */
2934
	phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
B
Ben Widawsky 已提交
2935

I
Imre Deak 已提交
2936 2937 2938 2939 2940 2941 2942
	/*
	 * On BXT writes larger than 64 bit to the GTT pagetable range will be
	 * dropped. For WC mappings in general we have 64 byte burst writes
	 * when the WC buffer is flushed, so we can't use it, but have to
	 * resort to an uncached mapping. The WC issue is easily caught by the
	 * readback check when writing GTT PTE entries.
	 */
2943
	if (IS_GEN9_LP(dev_priv))
2944
		ggtt->gsm = ioremap_nocache(phys_addr, size);
I
Imre Deak 已提交
2945
	else
2946
		ggtt->gsm = ioremap_wc(phys_addr, size);
2947
	if (!ggtt->gsm) {
2948
		DRM_ERROR("Failed to map the ggtt page table\n");
B
Ben Widawsky 已提交
2949 2950 2951
		return -ENOMEM;
	}

2952
	ret = setup_scratch_page(dev_priv, &ggtt->base.scratch_page, GFP_DMA32);
2953
	if (ret) {
B
Ben Widawsky 已提交
2954 2955
		DRM_ERROR("Scratch setup failed\n");
		/* iounmap will also get called at remove, but meh */
2956
		iounmap(ggtt->gsm);
2957
		return ret;
B
Ben Widawsky 已提交
2958 2959
	}

2960
	return 0;
B
Ben Widawsky 已提交
2961 2962
}

B
Ben Widawsky 已提交
2963 2964 2965
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
 * bits. When using advanced contexts each context stores its own PAT, but
 * writing this data shouldn't be harmful even in those cases. */
2966
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
B
Ben Widawsky 已提交
2967 2968 2969 2970 2971 2972 2973 2974 2975 2976 2977 2978
{
	uint64_t pat;

	pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC)     | /* for normal objects, no eLLC */
	      GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
	      GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
	      GEN8_PPAT(3, GEN8_PPAT_UC)                     | /* Uncached objects, mostly for scanout */
	      GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
	      GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
	      GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
	      GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));

2979
	if (!USES_PPGTT(dev_priv))
2980 2981 2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994
		/* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
		 * so RTL will always use the value corresponding to
		 * pat_sel = 000".
		 * So let's disable cache for GGTT to avoid screen corruptions.
		 * MOCS still can be used though.
		 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
		 * before this patch, i.e. the same uncached + snooping access
		 * like on gen6/7 seems to be in effect.
		 * - So this just fixes blitter/render access. Again it looks
		 * like it's not just uncached access, but uncached + snooping.
		 * So we can still hold onto all our assumptions wrt cpu
		 * clflushing on LLC machines.
		 */
		pat = GEN8_PPAT(0, GEN8_PPAT_UC);

B
Ben Widawsky 已提交
2995 2996
	/* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
	 * write would work. */
2997 2998
	I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
	I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
B
Ben Widawsky 已提交
2999 3000
}

3001 3002 3003 3004 3005 3006 3007 3008 3009 3010
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
{
	uint64_t pat;

	/*
	 * Map WB on BDW to snooped on CHV.
	 *
	 * Only the snoop bit has meaning for CHV, the rest is
	 * ignored.
	 *
3011 3012 3013 3014 3015 3016 3017 3018 3019 3020 3021
	 * The hardware will never snoop for certain types of accesses:
	 * - CPU GTT (GMADR->GGTT->no snoop->memory)
	 * - PPGTT page tables
	 * - some other special cycles
	 *
	 * As with BDW, we also need to consider the following for GT accesses:
	 * "For GGTT, there is NO pat_sel[2:0] from the entry,
	 * so RTL will always use the value corresponding to
	 * pat_sel = 000".
	 * Which means we must set the snoop bit in PAT entry 0
	 * in order to keep the global status page working.
3022 3023 3024 3025 3026 3027 3028 3029 3030 3031
	 */
	pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
	      GEN8_PPAT(1, 0) |
	      GEN8_PPAT(2, 0) |
	      GEN8_PPAT(3, 0) |
	      GEN8_PPAT(4, CHV_PPAT_SNOOP) |
	      GEN8_PPAT(5, CHV_PPAT_SNOOP) |
	      GEN8_PPAT(6, CHV_PPAT_SNOOP) |
	      GEN8_PPAT(7, CHV_PPAT_SNOOP);

3032 3033
	I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
	I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
3034 3035
}

3036 3037 3038 3039 3040
static void gen6_gmch_remove(struct i915_address_space *vm)
{
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);

	iounmap(ggtt->gsm);
3041
	cleanup_scratch_page(vm->i915, &vm->scratch_page);
3042 3043
}

3044
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
B
Ben Widawsky 已提交
3045
{
3046
	struct drm_i915_private *dev_priv = ggtt->base.i915;
3047
	struct pci_dev *pdev = dev_priv->drm.pdev;
3048
	unsigned int size;
B
Ben Widawsky 已提交
3049 3050 3051
	u16 snb_gmch_ctl;

	/* TODO: We're not aware of mappable constraints on gen8 yet */
3052 3053
	ggtt->mappable_base = pci_resource_start(pdev, 2);
	ggtt->mappable_end = pci_resource_len(pdev, 2);
B
Ben Widawsky 已提交
3054

3055 3056
	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(39)))
		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
B
Ben Widawsky 已提交
3057

3058
	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
B
Ben Widawsky 已提交
3059

3060
	if (INTEL_GEN(dev_priv) >= 9) {
3061
		ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
3062
		size = gen8_get_total_gtt_size(snb_gmch_ctl);
3063
	} else if (IS_CHERRYVIEW(dev_priv)) {
3064
		ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
3065
		size = chv_get_total_gtt_size(snb_gmch_ctl);
3066
	} else {
3067
		ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
3068
		size = gen8_get_total_gtt_size(snb_gmch_ctl);
3069
	}
B
Ben Widawsky 已提交
3070

3071
	ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
B
Ben Widawsky 已提交
3072

3073
	if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
3074 3075 3076
		chv_setup_private_ppat(dev_priv);
	else
		bdw_setup_private_ppat(dev_priv);
B
Ben Widawsky 已提交
3077

3078
	ggtt->base.cleanup = gen6_gmch_remove;
3079 3080
	ggtt->base.bind_vma = ggtt_bind_vma;
	ggtt->base.unbind_vma = ggtt_unbind_vma;
3081
	ggtt->base.insert_page = gen8_ggtt_insert_page;
3082
	ggtt->base.clear_range = nop_clear_range;
3083
	if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
3084 3085 3086 3087 3088 3089
		ggtt->base.clear_range = gen8_ggtt_clear_range;

	ggtt->base.insert_entries = gen8_ggtt_insert_entries;
	if (IS_CHERRYVIEW(dev_priv))
		ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;

3090 3091
	ggtt->invalidate = gen6_ggtt_invalidate;

3092
	return ggtt_probe_common(ggtt, size);
B
Ben Widawsky 已提交
3093 3094
}

3095
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
3096
{
3097
	struct drm_i915_private *dev_priv = ggtt->base.i915;
3098
	struct pci_dev *pdev = dev_priv->drm.pdev;
3099
	unsigned int size;
3100 3101
	u16 snb_gmch_ctl;

3102 3103
	ggtt->mappable_base = pci_resource_start(pdev, 2);
	ggtt->mappable_end = pci_resource_len(pdev, 2);
3104

3105 3106
	/* 64/512MB is the current min/max we actually know of, but this is just
	 * a coarse sanity check.
3107
	 */
3108
	if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
3109
		DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
3110
		return -ENXIO;
3111 3112
	}

3113 3114 3115
	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(40)))
		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3116

3117
	ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
3118

3119 3120
	size = gen6_get_total_gtt_size(snb_gmch_ctl);
	ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
3121

3122
	ggtt->base.clear_range = gen6_ggtt_clear_range;
3123
	ggtt->base.insert_page = gen6_ggtt_insert_page;
3124 3125 3126
	ggtt->base.insert_entries = gen6_ggtt_insert_entries;
	ggtt->base.bind_vma = ggtt_bind_vma;
	ggtt->base.unbind_vma = ggtt_unbind_vma;
3127 3128
	ggtt->base.cleanup = gen6_gmch_remove;

3129 3130
	ggtt->invalidate = gen6_ggtt_invalidate;

3131 3132 3133 3134 3135 3136 3137 3138 3139 3140
	if (HAS_EDRAM(dev_priv))
		ggtt->base.pte_encode = iris_pte_encode;
	else if (IS_HASWELL(dev_priv))
		ggtt->base.pte_encode = hsw_pte_encode;
	else if (IS_VALLEYVIEW(dev_priv))
		ggtt->base.pte_encode = byt_pte_encode;
	else if (INTEL_GEN(dev_priv) >= 7)
		ggtt->base.pte_encode = ivb_pte_encode;
	else
		ggtt->base.pte_encode = snb_pte_encode;
3141

3142
	return ggtt_probe_common(ggtt, size);
3143 3144
}

3145
static void i915_gmch_remove(struct i915_address_space *vm)
3146
{
3147
	intel_gmch_remove();
3148
}
3149

3150
static int i915_gmch_probe(struct i915_ggtt *ggtt)
3151
{
3152
	struct drm_i915_private *dev_priv = ggtt->base.i915;
3153 3154
	int ret;

3155
	ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
3156 3157 3158 3159 3160
	if (!ret) {
		DRM_ERROR("failed to set up gmch\n");
		return -EIO;
	}

3161 3162 3163 3164
	intel_gtt_get(&ggtt->base.total,
		      &ggtt->stolen_size,
		      &ggtt->mappable_base,
		      &ggtt->mappable_end);
3165

3166
	ggtt->do_idle_maps = needs_idle_maps(dev_priv);
3167
	ggtt->base.insert_page = i915_ggtt_insert_page;
3168 3169 3170 3171
	ggtt->base.insert_entries = i915_ggtt_insert_entries;
	ggtt->base.clear_range = i915_ggtt_clear_range;
	ggtt->base.bind_vma = ggtt_bind_vma;
	ggtt->base.unbind_vma = ggtt_unbind_vma;
3172
	ggtt->base.cleanup = i915_gmch_remove;
3173

3174 3175
	ggtt->invalidate = gmch_ggtt_invalidate;

3176
	if (unlikely(ggtt->do_idle_maps))
3177 3178
		DRM_INFO("applying Ironlake quirks for intel_iommu\n");

3179 3180 3181
	return 0;
}

3182
/**
3183
 * i915_ggtt_probe_hw - Probe GGTT hardware location
3184
 * @dev_priv: i915 device
3185
 */
3186
int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
3187
{
3188
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
3189 3190
	int ret;

3191
	ggtt->base.i915 = dev_priv;
3192

3193 3194 3195 3196 3197 3198
	if (INTEL_GEN(dev_priv) <= 5)
		ret = i915_gmch_probe(ggtt);
	else if (INTEL_GEN(dev_priv) < 8)
		ret = gen6_gmch_probe(ggtt);
	else
		ret = gen8_gmch_probe(ggtt);
3199
	if (ret)
3200 3201
		return ret;

3202 3203 3204 3205 3206 3207 3208 3209 3210 3211
	/* Trim the GGTT to fit the GuC mappable upper range (when enabled).
	 * This is easier than doing range restriction on the fly, as we
	 * currently don't have any bits spare to pass in this upper
	 * restriction!
	 */
	if (HAS_GUC(dev_priv) && i915.enable_guc_loading) {
		ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
		ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
	}

3212 3213
	if ((ggtt->base.total - 1) >> 32) {
		DRM_ERROR("We never expected a Global GTT with more than 32bits"
3214
			  " of address space! Found %lldM!\n",
3215 3216 3217 3218 3219
			  ggtt->base.total >> 20);
		ggtt->base.total = 1ULL << 32;
		ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
	}

3220 3221 3222 3223 3224 3225 3226
	if (ggtt->mappable_end > ggtt->base.total) {
		DRM_ERROR("mappable aperture extends past end of GGTT,"
			  " aperture=%llx, total=%llx\n",
			  ggtt->mappable_end, ggtt->base.total);
		ggtt->mappable_end = ggtt->base.total;
	}

3227
	/* GMADR is the PCI mmio aperture into the global GTT. */
3228
	DRM_INFO("Memory usable by graphics device = %lluM\n",
3229 3230
		 ggtt->base.total >> 20);
	DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
3231
	DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20);
3232 3233 3234 3235
#ifdef CONFIG_INTEL_IOMMU
	if (intel_iommu_gfx_mapped)
		DRM_INFO("VT-d active for gfx access\n");
#endif
3236 3237

	return 0;
3238 3239 3240 3241
}

/**
 * i915_ggtt_init_hw - Initialize GGTT hardware
3242
 * @dev_priv: i915 device
3243
 */
3244
int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
3245 3246 3247 3248
{
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
	int ret;

3249 3250
	INIT_LIST_HEAD(&dev_priv->vm_list);

3251 3252 3253 3254
	/* Note that we use page colouring to enforce a guard page at the
	 * end of the address space. This is required as the CS may prefetch
	 * beyond the end of the batch buffer, across the page boundary,
	 * and beyond the end of the GTT if we do not provide a guard.
3255
	 */
C
Chris Wilson 已提交
3256 3257
	mutex_lock(&dev_priv->drm.struct_mutex);
	i915_address_space_init(&ggtt->base, dev_priv, "[global]");
3258
	if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
3259
		ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
C
Chris Wilson 已提交
3260
	mutex_unlock(&dev_priv->drm.struct_mutex);
3261

3262 3263 3264
	if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
				dev_priv->ggtt.mappable_base,
				dev_priv->ggtt.mappable_end)) {
3265 3266 3267 3268 3269 3270
		ret = -EIO;
		goto out_gtt_cleanup;
	}

	ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end);

3271 3272 3273 3274
	/*
	 * Initialise stolen early so that we may reserve preallocated
	 * objects for the BIOS to KMS transition.
	 */
3275
	ret = i915_gem_init_stolen(dev_priv);
3276 3277 3278 3279
	if (ret)
		goto out_gtt_cleanup;

	return 0;
3280 3281

out_gtt_cleanup:
3282
	ggtt->base.cleanup(&ggtt->base);
3283
	return ret;
3284
}
3285

3286
int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
3287
{
3288
	if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
3289 3290 3291 3292 3293
		return -EIO;

	return 0;
}

3294 3295 3296 3297 3298 3299 3300 3301 3302 3303
void i915_ggtt_enable_guc(struct drm_i915_private *i915)
{
	i915->ggtt.invalidate = guc_ggtt_invalidate;
}

void i915_ggtt_disable_guc(struct drm_i915_private *i915)
{
	i915->ggtt.invalidate = gen6_ggtt_invalidate;
}

3304
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
3305
{
3306
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
3307
	struct drm_i915_gem_object *obj, *on;
3308

3309
	i915_check_and_clear_faults(dev_priv);
3310 3311

	/* First fill our portion of the GTT with scratch pages */
3312
	ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total);
3313

3314 3315 3316 3317
	ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */

	/* clflush objects bound into the GGTT and rebind them. */
	list_for_each_entry_safe(obj, on,
3318
				 &dev_priv->mm.bound_list, global_link) {
3319 3320 3321
		bool ggtt_bound = false;
		struct i915_vma *vma;

3322
		list_for_each_entry(vma, &obj->vma_list, obj_link) {
3323
			if (vma->vm != &ggtt->base)
3324
				continue;
3325

3326 3327 3328
			if (!i915_vma_unbind(vma))
				continue;

3329 3330
			WARN_ON(i915_vma_bind(vma, obj->cache_level,
					      PIN_UPDATE));
3331
			ggtt_bound = true;
3332 3333
		}

3334
		if (ggtt_bound)
3335
			WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
3336
	}
3337

3338 3339
	ggtt->base.closed = false;

3340
	if (INTEL_GEN(dev_priv) >= 8) {
3341
		if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
3342 3343 3344 3345 3346 3347 3348
			chv_setup_private_ppat(dev_priv);
		else
			bdw_setup_private_ppat(dev_priv);

		return;
	}

3349
	if (USES_PPGTT(dev_priv)) {
3350 3351
		struct i915_address_space *vm;

3352 3353 3354
		list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
			/* TODO: Perhaps it shouldn't be gen6 specific */

3355
			struct i915_hw_ppgtt *ppgtt;
3356

3357
			if (i915_is_ggtt(vm))
3358
				ppgtt = dev_priv->mm.aliasing_ppgtt;
3359 3360
			else
				ppgtt = i915_vm_to_ppgtt(vm);
3361 3362 3363 3364 3365 3366

			gen6_write_page_range(dev_priv, &ppgtt->pd,
					      0, ppgtt->base.total);
		}
	}

3367
	i915_ggtt_invalidate(dev_priv);
3368 3369
}

3370
static struct scatterlist *
3371
rotate_pages(const dma_addr_t *in, unsigned int offset,
3372
	     unsigned int width, unsigned int height,
3373
	     unsigned int stride,
3374
	     struct sg_table *st, struct scatterlist *sg)
3375 3376 3377 3378 3379
{
	unsigned int column, row;
	unsigned int src_idx;

	for (column = 0; column < width; column++) {
3380
		src_idx = stride * (height - 1) + column;
3381 3382 3383 3384 3385 3386 3387
		for (row = 0; row < height; row++) {
			st->nents++;
			/* We don't need the pages, but need to initialize
			 * the entries so the sg list can be happily traversed.
			 * The only thing we need are DMA addresses.
			 */
			sg_set_page(sg, NULL, PAGE_SIZE, 0);
3388
			sg_dma_address(sg) = in[offset + src_idx];
3389 3390
			sg_dma_len(sg) = PAGE_SIZE;
			sg = sg_next(sg);
3391
			src_idx -= stride;
3392 3393
		}
	}
3394 3395

	return sg;
3396 3397 3398
}

static struct sg_table *
3399
intel_rotate_fb_obj_pages(const struct intel_rotation_info *rot_info,
3400 3401
			  struct drm_i915_gem_object *obj)
{
3402
	const size_t n_pages = obj->base.size / PAGE_SIZE;
3403
	unsigned int size = intel_rotation_info_size(rot_info);
3404 3405
	struct sgt_iter sgt_iter;
	dma_addr_t dma_addr;
3406 3407 3408
	unsigned long i;
	dma_addr_t *page_addr_list;
	struct sg_table *st;
3409
	struct scatterlist *sg;
3410
	int ret = -ENOMEM;
3411 3412

	/* Allocate a temporary list of source pages for random access. */
3413
	page_addr_list = drm_malloc_gfp(n_pages,
3414 3415
					sizeof(dma_addr_t),
					GFP_TEMPORARY);
3416 3417 3418 3419 3420 3421 3422 3423
	if (!page_addr_list)
		return ERR_PTR(ret);

	/* Allocate target SG list. */
	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (!st)
		goto err_st_alloc;

3424
	ret = sg_alloc_table(st, size, GFP_KERNEL);
3425 3426 3427 3428 3429
	if (ret)
		goto err_sg_alloc;

	/* Populate source page list from the object. */
	i = 0;
C
Chris Wilson 已提交
3430
	for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
3431
		page_addr_list[i++] = dma_addr;
3432

3433
	GEM_BUG_ON(i != n_pages);
3434 3435 3436
	st->nents = 0;
	sg = st->sgl;

3437 3438 3439 3440
	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
		sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
				  rot_info->plane[i].width, rot_info->plane[i].height,
				  rot_info->plane[i].stride, st, sg);
3441 3442
	}

3443 3444
	DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
		      obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3445 3446 3447 3448 3449 3450 3451 3452 3453 3454

	drm_free_large(page_addr_list);

	return st;

err_sg_alloc:
	kfree(st);
err_st_alloc:
	drm_free_large(page_addr_list);

3455 3456 3457
	DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
		      obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);

3458 3459
	return ERR_PTR(ret);
}
3460

3461 3462 3463 3464 3465
static struct sg_table *
intel_partial_pages(const struct i915_ggtt_view *view,
		    struct drm_i915_gem_object *obj)
{
	struct sg_table *st;
3466
	struct scatterlist *sg, *iter;
3467
	unsigned int count = view->partial.size;
3468
	unsigned int offset;
3469 3470 3471 3472 3473 3474
	int ret = -ENOMEM;

	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (!st)
		goto err_st_alloc;

3475
	ret = sg_alloc_table(st, count, GFP_KERNEL);
3476 3477 3478
	if (ret)
		goto err_sg_alloc;

3479
	iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
3480 3481
	GEM_BUG_ON(!iter);

3482 3483
	sg = st->sgl;
	st->nents = 0;
3484 3485
	do {
		unsigned int len;
3486

3487 3488 3489 3490 3491 3492
		len = min(iter->length - (offset << PAGE_SHIFT),
			  count << PAGE_SHIFT);
		sg_set_page(sg, NULL, len, 0);
		sg_dma_address(sg) =
			sg_dma_address(iter) + (offset << PAGE_SHIFT);
		sg_dma_len(sg) = len;
3493 3494

		st->nents++;
3495 3496 3497 3498 3499
		count -= len >> PAGE_SHIFT;
		if (count == 0) {
			sg_mark_end(sg);
			return st;
		}
3500

3501 3502 3503 3504
		sg = __sg_next(sg);
		iter = __sg_next(iter);
		offset = 0;
	} while (1);
3505 3506 3507 3508 3509 3510 3511

err_sg_alloc:
	kfree(st);
err_st_alloc:
	return ERR_PTR(ret);
}

3512
static int
3513
i915_get_ggtt_vma_pages(struct i915_vma *vma)
3514
{
3515 3516
	int ret = 0;

3517 3518 3519 3520 3521 3522 3523
	/* The vma->pages are only valid within the lifespan of the borrowed
	 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
	 * must be the vma->pages. A simple rule is that vma->pages must only
	 * be accessed when the obj->mm.pages are pinned.
	 */
	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));

3524
	if (vma->pages)
3525 3526 3527
		return 0;

	if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
C
Chris Wilson 已提交
3528
		vma->pages = vma->obj->mm.pages;
3529
	else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
3530
		vma->pages =
3531 3532
			intel_rotate_fb_obj_pages(&vma->ggtt_view.rotated,
						  vma->obj);
3533
	else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
3534
		vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
3535 3536 3537 3538
	else
		WARN_ONCE(1, "GGTT view %u not implemented!\n",
			  vma->ggtt_view.type);

3539
	if (!vma->pages) {
3540
		DRM_ERROR("Failed to get pages for GGTT view type %u!\n",
3541
			  vma->ggtt_view.type);
3542
		ret = -EINVAL;
3543 3544 3545
	} else if (IS_ERR(vma->pages)) {
		ret = PTR_ERR(vma->pages);
		vma->pages = NULL;
3546 3547
		DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
			  vma->ggtt_view.type, ret);
3548 3549
	}

3550
	return ret;
3551 3552
}

3553 3554
/**
 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
3555 3556 3557 3558 3559 3560 3561 3562 3563 3564
 * @vm: the &struct i915_address_space
 * @node: the &struct drm_mm_node (typically i915_vma.mode)
 * @size: how much space to allocate inside the GTT,
 *        must be #I915_GTT_PAGE_SIZE aligned
 * @offset: where to insert inside the GTT,
 *          must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
 *          (@offset + @size) must fit within the address space
 * @color: color to apply to node, if this node is not from a VMA,
 *         color must be #I915_COLOR_UNEVICTABLE
 * @flags: control search and eviction behaviour
3565 3566 3567 3568 3569 3570 3571 3572 3573 3574 3575 3576 3577 3578 3579 3580 3581 3582 3583 3584 3585 3586 3587 3588
 *
 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
 * the address space (using @size and @color). If the @node does not fit, it
 * tries to evict any overlapping nodes from the GTT, including any
 * neighbouring nodes if the colors do not match (to ensure guard pages between
 * differing domains). See i915_gem_evict_for_node() for the gory details
 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
 * evicting active overlapping objects, and any overlapping node that is pinned
 * or marked as unevictable will also result in failure.
 *
 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
 * asked to wait for eviction and interrupted.
 */
int i915_gem_gtt_reserve(struct i915_address_space *vm,
			 struct drm_mm_node *node,
			 u64 size, u64 offset, unsigned long color,
			 unsigned int flags)
{
	int err;

	GEM_BUG_ON(!size);
	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
	GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
	GEM_BUG_ON(range_overflows(offset, size, vm->total));
3589
	GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
3590
	GEM_BUG_ON(drm_mm_node_allocated(node));
3591 3592 3593 3594 3595 3596 3597 3598 3599 3600 3601 3602 3603 3604 3605 3606

	node->size = size;
	node->start = offset;
	node->color = color;

	err = drm_mm_reserve_node(&vm->mm, node);
	if (err != -ENOSPC)
		return err;

	err = i915_gem_evict_for_node(vm, node, flags);
	if (err == 0)
		err = drm_mm_reserve_node(&vm->mm, node);

	return err;
}

3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631
static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
{
	u64 range, addr;

	GEM_BUG_ON(range_overflows(start, len, end));
	GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));

	range = round_down(end - len, align) - round_up(start, align);
	if (range) {
		if (sizeof(unsigned long) == sizeof(u64)) {
			addr = get_random_long();
		} else {
			addr = get_random_int();
			if (range > U32_MAX) {
				addr <<= 32;
				addr |= get_random_int();
			}
		}
		div64_u64_rem(addr, range, &addr);
		start += addr;
	}

	return round_up(start, align);
}

3632 3633
/**
 * i915_gem_gtt_insert - insert a node into an address_space (GTT)
3634 3635 3636 3637 3638 3639 3640 3641 3642
 * @vm: the &struct i915_address_space
 * @node: the &struct drm_mm_node (typically i915_vma.node)
 * @size: how much space to allocate inside the GTT,
 *        must be #I915_GTT_PAGE_SIZE aligned
 * @alignment: required alignment of starting offset, may be 0 but
 *             if specified, this must be a power-of-two and at least
 *             #I915_GTT_MIN_ALIGNMENT
 * @color: color to apply to node
 * @start: start of any range restriction inside GTT (0 for all),
3643
 *         must be #I915_GTT_PAGE_SIZE aligned
3644 3645 3646
 * @end: end of any range restriction inside GTT (U64_MAX for all),
 *       must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
 * @flags: control search and eviction behaviour
3647 3648 3649 3650 3651 3652
 *
 * i915_gem_gtt_insert() first searches for an available hole into which
 * is can insert the node. The hole address is aligned to @alignment and
 * its @size must then fit entirely within the [@start, @end] bounds. The
 * nodes on either side of the hole must match @color, or else a guard page
 * will be inserted between the two nodes (or the node evicted). If no
3653 3654
 * suitable hole is found, first a victim is randomly selected and tested
 * for eviction, otherwise then the LRU list of objects within the GTT
3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671
 * is scanned to find the first set of replacement nodes to create the hole.
 * Those old overlapping nodes are evicted from the GTT (and so must be
 * rebound before any future use). Any node that is currently pinned cannot
 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
 * active and #PIN_NONBLOCK is specified, that node is also skipped when
 * searching for an eviction candidate. See i915_gem_evict_something() for
 * the gory details on the eviction algorithm.
 *
 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
 * asked to wait for eviction and interrupted.
 */
int i915_gem_gtt_insert(struct i915_address_space *vm,
			struct drm_mm_node *node,
			u64 size, u64 alignment, unsigned long color,
			u64 start, u64 end, unsigned int flags)
{
	u32 search_flag, alloc_flag;
3672
	u64 offset;
3673 3674 3675 3676 3677 3678 3679 3680 3681 3682
	int err;

	lockdep_assert_held(&vm->i915->drm.struct_mutex);
	GEM_BUG_ON(!size);
	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
	GEM_BUG_ON(alignment && !is_power_of_2(alignment));
	GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
	GEM_BUG_ON(start >= end);
	GEM_BUG_ON(start > 0  && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
	GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
3683
	GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
3684
	GEM_BUG_ON(drm_mm_node_allocated(node));
3685 3686 3687 3688 3689 3690 3691 3692 3693 3694 3695 3696 3697 3698 3699 3700 3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716

	if (unlikely(range_overflows(start, size, end)))
		return -ENOSPC;

	if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
		return -ENOSPC;

	if (flags & PIN_HIGH) {
		search_flag = DRM_MM_SEARCH_BELOW;
		alloc_flag = DRM_MM_CREATE_TOP;
	} else {
		search_flag = DRM_MM_SEARCH_DEFAULT;
		alloc_flag = DRM_MM_CREATE_DEFAULT;
	}

	/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
	 * so we know that we always have a minimum alignment of 4096.
	 * The drm_mm range manager is optimised to return results
	 * with zero alignment, so where possible use the optimal
	 * path.
	 */
	BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
	if (alignment <= I915_GTT_MIN_ALIGNMENT)
		alignment = 0;

	err = drm_mm_insert_node_in_range_generic(&vm->mm, node,
						  size, alignment, color,
						  start, end,
						  search_flag, alloc_flag);
	if (err != -ENOSPC)
		return err;

3717 3718 3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745
	/* No free space, pick a slot at random.
	 *
	 * There is a pathological case here using a GTT shared between
	 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
	 *
	 *    |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
	 *         (64k objects)             (448k objects)
	 *
	 * Now imagine that the eviction LRU is ordered top-down (just because
	 * pathology meets real life), and that we need to evict an object to
	 * make room inside the aperture. The eviction scan then has to walk
	 * the 448k list before it finds one within range. And now imagine that
	 * it has to search for a new hole between every byte inside the memcpy,
	 * for several simultaneous clients.
	 *
	 * On a full-ppgtt system, if we have run out of available space, there
	 * will be lots and lots of objects in the eviction list! Again,
	 * searching that LRU list may be slow if we are also applying any
	 * range restrictions (e.g. restriction to low 4GiB) and so, for
	 * simplicity and similarilty between different GTT, try the single
	 * random replacement first.
	 */
	offset = random_offset(start, end,
			       size, alignment ?: I915_GTT_MIN_ALIGNMENT);
	err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
	if (err != -ENOSPC)
		return err;

	/* Randomly selected placement is pinned, do a search */
3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756
	err = i915_gem_evict_something(vm, size, alignment, color,
				       start, end, flags);
	if (err)
		return err;

	search_flag = DRM_MM_SEARCH_DEFAULT;
	return drm_mm_insert_node_in_range_generic(&vm->mm, node,
						   size, alignment, color,
						   start, end,
						   search_flag, alloc_flag);
}