i915_gem_gtt.c 101.1 KB
Newer Older
1 2
/*
 * Copyright © 2010 Daniel Vetter
3
 * Copyright © 2011-2014 Intel Corporation
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

26
#include <linux/log2.h>
27
#include <linux/random.h>
28
#include <linux/seq_file.h>
29
#include <linux/stop_machine.h>
30

31 32
#include <drm/drmP.h>
#include <drm/i915_drm.h>
33

34
#include "i915_drv.h"
35
#include "i915_vgpu.h"
36 37
#include "i915_trace.h"
#include "intel_drv.h"
38
#include "intel_frontbuffer.h"
39

40 41
#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)

42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
/**
 * DOC: Global GTT views
 *
 * Background and previous state
 *
 * Historically objects could exists (be bound) in global GTT space only as
 * singular instances with a view representing all of the object's backing pages
 * in a linear fashion. This view will be called a normal view.
 *
 * To support multiple views of the same object, where the number of mapped
 * pages is not equal to the backing store, or where the layout of the pages
 * is not linear, concept of a GGTT view was added.
 *
 * One example of an alternative view is a stereo display driven by a single
 * image. In this case we would have a framebuffer looking like this
 * (2x2 pages):
 *
 *    12
 *    34
 *
 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
 * rendering. In contrast, fed to the display engine would be an alternative
 * view which could look something like this:
 *
 *   1212
 *   3434
 *
 * In this example both the size and layout of pages in the alternative view is
 * different from the normal view.
 *
 * Implementation and usage
 *
 * GGTT views are implemented using VMAs and are distinguished via enum
 * i915_ggtt_view_type and struct i915_ggtt_view.
 *
 * A new flavour of core GEM functions which work with GGTT bound objects were
78 79 80
 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
 * renaming  in large amounts of code. They take the struct i915_ggtt_view
 * parameter encapsulating all metadata required to implement a view.
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102
 *
 * As a helper for callers which are only interested in the normal view,
 * globally const i915_ggtt_view_normal singleton instance exists. All old core
 * GEM API functions, the ones not taking the view parameter, are operating on,
 * or with the normal GGTT view.
 *
 * Code wanting to add or use a new GGTT view needs to:
 *
 * 1. Add a new enum with a suitable name.
 * 2. Extend the metadata in the i915_ggtt_view structure if required.
 * 3. Add support to i915_get_vma_pages().
 *
 * New views are required to build a scatter-gather table from within the
 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
 * exists for the lifetime of an VMA.
 *
 * Core API is designed to have copy semantics which means that passed in
 * struct i915_ggtt_view does not need to be persistent (left around after
 * calling the core API functions).
 *
 */

103 104 105
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);

106 107 108
const struct i915_ggtt_view i915_ggtt_view_normal = {
	.type = I915_GGTT_VIEW_NORMAL,
};
109
const struct i915_ggtt_view i915_ggtt_view_rotated = {
110
	.type = I915_GGTT_VIEW_ROTATED,
111
};
112

113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
{
	/* Note that as an uncached mmio write, this should flush the
	 * WCB of the writes into the GGTT before it triggers the invalidate.
	 */
	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
}

static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
{
	gen6_ggtt_invalidate(dev_priv);
	I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
}

static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
{
	intel_gtt_chipset_flush();
}

static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
{
	i915->ggtt.invalidate(i915);
}

137 138
int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
			       	int enable_ppgtt)
139
{
140 141
	bool has_aliasing_ppgtt;
	bool has_full_ppgtt;
142
	bool has_full_48bit_ppgtt;
143

144 145 146
	has_aliasing_ppgtt = dev_priv->info.has_aliasing_ppgtt;
	has_full_ppgtt = dev_priv->info.has_full_ppgtt;
	has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
147

148 149 150 151 152
	if (intel_vgpu_active(dev_priv)) {
		/* emulation is too hard */
		has_full_ppgtt = false;
		has_full_48bit_ppgtt = false;
	}
153

154 155 156
	if (!has_aliasing_ppgtt)
		return 0;

157 158 159 160
	/*
	 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
	 * execlists, the sole mechanism available to submit work.
	 */
161
	if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
162 163 164 165 166
		return 0;

	if (enable_ppgtt == 1)
		return 1;

167
	if (enable_ppgtt == 2 && has_full_ppgtt)
168 169
		return 2;

170 171 172
	if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
		return 3;

173 174
#ifdef CONFIG_INTEL_IOMMU
	/* Disable ppgtt on SNB if VT-d is on. */
175
	if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) {
176
		DRM_INFO("Disabling PPGTT because VT-d is on\n");
177
		return 0;
178 179 180
	}
#endif

181
	/* Early VLV doesn't have this */
182
	if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
183 184 185 186
		DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
		return 0;
	}

187
	if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists && has_full_ppgtt)
188
		return has_full_48bit_ppgtt ? 3 : 2;
189 190
	else
		return has_aliasing_ppgtt ? 1 : 0;
191 192
}

193 194 195
static int ppgtt_bind_vma(struct i915_vma *vma,
			  enum i915_cache_level cache_level,
			  u32 unused)
196 197 198
{
	u32 pte_flags = 0;

C
Chris Wilson 已提交
199
	vma->pages = vma->obj->mm.pages;
200

201 202 203 204
	/* Currently applicable only to VLV */
	if (vma->obj->gt_ro)
		pte_flags |= PTE_READ_ONLY;

205
	vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
206
				cache_level, pte_flags);
207 208

	return 0;
209 210 211 212 213 214
}

static void ppgtt_unbind_vma(struct i915_vma *vma)
{
	vma->vm->clear_range(vma->vm,
			     vma->node.start,
215
			     vma->size);
216
}
217

218
static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
219
				  enum i915_cache_level level)
B
Ben Widawsky 已提交
220
{
221
	gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
B
Ben Widawsky 已提交
222
	pte |= addr;
223 224 225

	switch (level) {
	case I915_CACHE_NONE:
B
Ben Widawsky 已提交
226
		pte |= PPAT_UNCACHED_INDEX;
227 228 229 230 231 232 233 234 235
		break;
	case I915_CACHE_WT:
		pte |= PPAT_DISPLAY_ELLC_INDEX;
		break;
	default:
		pte |= PPAT_CACHED_INDEX;
		break;
	}

B
Ben Widawsky 已提交
236 237 238
	return pte;
}

239 240
static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
				  const enum i915_cache_level level)
B
Ben Widawsky 已提交
241
{
242
	gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
B
Ben Widawsky 已提交
243 244 245 246 247 248 249 250
	pde |= addr;
	if (level != I915_CACHE_NONE)
		pde |= PPAT_CACHED_PDE_INDEX;
	else
		pde |= PPAT_UNCACHED_INDEX;
	return pde;
}

251 252 253
#define gen8_pdpe_encode gen8_pde_encode
#define gen8_pml4e_encode gen8_pde_encode

254 255
static gen6_pte_t snb_pte_encode(dma_addr_t addr,
				 enum i915_cache_level level,
256
				 u32 unused)
257
{
258
	gen6_pte_t pte = GEN6_PTE_VALID;
259
	pte |= GEN6_PTE_ADDR_ENCODE(addr);
260 261

	switch (level) {
262 263 264 265 266 267 268 269
	case I915_CACHE_L3_LLC:
	case I915_CACHE_LLC:
		pte |= GEN6_PTE_CACHE_LLC;
		break;
	case I915_CACHE_NONE:
		pte |= GEN6_PTE_UNCACHED;
		break;
	default:
270
		MISSING_CASE(level);
271 272 273 274 275
	}

	return pte;
}

276 277
static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
				 enum i915_cache_level level,
278
				 u32 unused)
279
{
280
	gen6_pte_t pte = GEN6_PTE_VALID;
281 282 283 284 285
	pte |= GEN6_PTE_ADDR_ENCODE(addr);

	switch (level) {
	case I915_CACHE_L3_LLC:
		pte |= GEN7_PTE_CACHE_L3_LLC;
286 287 288 289 290
		break;
	case I915_CACHE_LLC:
		pte |= GEN6_PTE_CACHE_LLC;
		break;
	case I915_CACHE_NONE:
291
		pte |= GEN6_PTE_UNCACHED;
292 293
		break;
	default:
294
		MISSING_CASE(level);
295 296
	}

297 298 299
	return pte;
}

300 301
static gen6_pte_t byt_pte_encode(dma_addr_t addr,
				 enum i915_cache_level level,
302
				 u32 flags)
303
{
304
	gen6_pte_t pte = GEN6_PTE_VALID;
305 306
	pte |= GEN6_PTE_ADDR_ENCODE(addr);

307 308
	if (!(flags & PTE_READ_ONLY))
		pte |= BYT_PTE_WRITEABLE;
309 310 311 312 313 314 315

	if (level != I915_CACHE_NONE)
		pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;

	return pte;
}

316 317
static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
				 enum i915_cache_level level,
318
				 u32 unused)
319
{
320
	gen6_pte_t pte = GEN6_PTE_VALID;
321
	pte |= HSW_PTE_ADDR_ENCODE(addr);
322 323

	if (level != I915_CACHE_NONE)
324
		pte |= HSW_WB_LLC_AGE3;
325 326 327 328

	return pte;
}

329 330
static gen6_pte_t iris_pte_encode(dma_addr_t addr,
				  enum i915_cache_level level,
331
				  u32 unused)
332
{
333
	gen6_pte_t pte = GEN6_PTE_VALID;
334 335
	pte |= HSW_PTE_ADDR_ENCODE(addr);

336 337 338 339
	switch (level) {
	case I915_CACHE_NONE:
		break;
	case I915_CACHE_WT:
340
		pte |= HSW_WT_ELLC_LLC_AGE3;
341 342
		break;
	default:
343
		pte |= HSW_WB_ELLC_LLC_AGE3;
344 345
		break;
	}
346 347 348 349

	return pte;
}

350
static int __setup_page_dma(struct drm_i915_private *dev_priv,
351
			    struct i915_page_dma *p, gfp_t flags)
352
{
353
	struct device *kdev = &dev_priv->drm.pdev->dev;
354

355
	p->page = alloc_page(flags);
356 357
	if (!p->page)
		return -ENOMEM;
358

359
	p->daddr = dma_map_page(kdev,
360
				p->page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
361

362
	if (dma_mapping_error(kdev, p->daddr)) {
363 364 365
		__free_page(p->page);
		return -EINVAL;
	}
366 367

	return 0;
368 369
}

370 371
static int setup_page_dma(struct drm_i915_private *dev_priv,
			  struct i915_page_dma *p)
372
{
373
	return __setup_page_dma(dev_priv, p, I915_GFP_DMA);
374 375
}

376 377
static void cleanup_page_dma(struct drm_i915_private *dev_priv,
			     struct i915_page_dma *p)
378
{
379
	struct pci_dev *pdev = dev_priv->drm.pdev;
D
David Weinehall 已提交
380

381
	if (WARN_ON(!p->page))
382
		return;
383

384
	dma_unmap_page(&pdev->dev, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
385 386 387 388
	__free_page(p->page);
	memset(p, 0, sizeof(*p));
}

389
static void *kmap_page_dma(struct i915_page_dma *p)
390
{
391 392
	return kmap_atomic(p->page);
}
393

394 395 396
/* We use the flushing unmap only with ppgtt structures:
 * page directories, page tables and scratch pages.
 */
397
static void kunmap_page_dma(struct drm_i915_private *dev_priv, void *vaddr)
398
{
399 400 401
	/* There are only few exceptions for gen >=6. chv and bxt.
	 * And we are not sure about the latter so play safe for now.
	 */
402
	if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
403 404 405 406 407
		drm_clflush_virt_range(vaddr, PAGE_SIZE);

	kunmap_atomic(vaddr);
}

408
#define kmap_px(px) kmap_page_dma(px_base(px))
409
#define kunmap_px(ppgtt, vaddr) \
410
		kunmap_page_dma((ppgtt)->base.i915, (vaddr))
411

412 413
#define setup_px(dev_priv, px) setup_page_dma((dev_priv), px_base(px))
#define cleanup_px(dev_priv, px) cleanup_page_dma((dev_priv), px_base(px))
414 415 416
#define fill_px(dev_priv, px, v) fill_page_dma((dev_priv), px_base(px), (v))
#define fill32_px(dev_priv, px, v) \
		fill_page_dma_32((dev_priv), px_base(px), (v))
417

418 419
static void fill_page_dma(struct drm_i915_private *dev_priv,
			  struct i915_page_dma *p, const uint64_t val)
420 421 422 423 424 425 426
{
	int i;
	uint64_t * const vaddr = kmap_page_dma(p);

	for (i = 0; i < 512; i++)
		vaddr[i] = val;

427
	kunmap_page_dma(dev_priv, vaddr);
428 429
}

430 431
static void fill_page_dma_32(struct drm_i915_private *dev_priv,
			     struct i915_page_dma *p, const uint32_t val32)
432 433 434 435 436
{
	uint64_t v = val32;

	v = v << 32 | val32;

437
	fill_page_dma(dev_priv, p, v);
438 439
}

440
static int
441
setup_scratch_page(struct drm_i915_private *dev_priv,
442 443
		   struct i915_page_dma *scratch,
		   gfp_t gfp)
444
{
445
	return __setup_page_dma(dev_priv, scratch, gfp | __GFP_ZERO);
446 447
}

448
static void cleanup_scratch_page(struct drm_i915_private *dev_priv,
449
				 struct i915_page_dma *scratch)
450
{
451
	cleanup_page_dma(dev_priv, scratch);
452 453
}

454
static struct i915_page_table *alloc_pt(struct drm_i915_private *dev_priv)
455
{
456
	struct i915_page_table *pt;
457
	const size_t count = INTEL_GEN(dev_priv) >= 8 ? GEN8_PTES : GEN6_PTES;
458
	int ret = -ENOMEM;
459 460 461 462 463

	pt = kzalloc(sizeof(*pt), GFP_KERNEL);
	if (!pt)
		return ERR_PTR(-ENOMEM);

464 465 466 467 468 469
	pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes),
				GFP_KERNEL);

	if (!pt->used_ptes)
		goto fail_bitmap;

470
	ret = setup_px(dev_priv, pt);
471
	if (ret)
472
		goto fail_page_m;
473 474

	return pt;
475

476
fail_page_m:
477 478 479 480 481
	kfree(pt->used_ptes);
fail_bitmap:
	kfree(pt);

	return ERR_PTR(ret);
482 483
}

484 485
static void free_pt(struct drm_i915_private *dev_priv,
		    struct i915_page_table *pt)
486
{
487
	cleanup_px(dev_priv, pt);
488 489 490 491 492 493 494 495 496
	kfree(pt->used_ptes);
	kfree(pt);
}

static void gen8_initialize_pt(struct i915_address_space *vm,
			       struct i915_page_table *pt)
{
	gen8_pte_t scratch_pte;

497
	scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
498
				      I915_CACHE_LLC);
499

500
	fill_px(vm->i915, pt, scratch_pte);
501 502 503 504 505 506 507
}

static void gen6_initialize_pt(struct i915_address_space *vm,
			       struct i915_page_table *pt)
{
	gen6_pte_t scratch_pte;

508
	WARN_ON(vm->scratch_page.daddr == 0);
509

510
	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
511
				     I915_CACHE_LLC, 0);
512

513
	fill32_px(vm->i915, pt, scratch_pte);
514 515
}

516
static struct i915_page_directory *alloc_pd(struct drm_i915_private *dev_priv)
517
{
518
	struct i915_page_directory *pd;
519
	int ret = -ENOMEM;
520 521 522 523 524

	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
	if (!pd)
		return ERR_PTR(-ENOMEM);

525 526 527
	pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES),
				sizeof(*pd->used_pdes), GFP_KERNEL);
	if (!pd->used_pdes)
528
		goto fail_bitmap;
529

530
	ret = setup_px(dev_priv, pd);
531
	if (ret)
532
		goto fail_page_m;
533

534
	return pd;
535

536
fail_page_m:
537
	kfree(pd->used_pdes);
538
fail_bitmap:
539 540 541
	kfree(pd);

	return ERR_PTR(ret);
542 543
}

544 545
static void free_pd(struct drm_i915_private *dev_priv,
		    struct i915_page_directory *pd)
546 547
{
	if (px_page(pd)) {
548
		cleanup_px(dev_priv, pd);
549 550 551 552 553 554 555 556 557 558 559 560
		kfree(pd->used_pdes);
		kfree(pd);
	}
}

static void gen8_initialize_pd(struct i915_address_space *vm,
			       struct i915_page_directory *pd)
{
	gen8_pde_t scratch_pde;

	scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);

561
	fill_px(vm->i915, pd, scratch_pde);
562 563
}

564
static int __pdp_init(struct drm_i915_private *dev_priv,
565 566
		      struct i915_page_directory_pointer *pdp)
{
567
	size_t pdpes = I915_PDPES_PER_PDP(dev_priv);
568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594

	pdp->used_pdpes = kcalloc(BITS_TO_LONGS(pdpes),
				  sizeof(unsigned long),
				  GFP_KERNEL);
	if (!pdp->used_pdpes)
		return -ENOMEM;

	pdp->page_directory = kcalloc(pdpes, sizeof(*pdp->page_directory),
				      GFP_KERNEL);
	if (!pdp->page_directory) {
		kfree(pdp->used_pdpes);
		/* the PDP might be the statically allocated top level. Keep it
		 * as clean as possible */
		pdp->used_pdpes = NULL;
		return -ENOMEM;
	}

	return 0;
}

static void __pdp_fini(struct i915_page_directory_pointer *pdp)
{
	kfree(pdp->used_pdpes);
	kfree(pdp->page_directory);
	pdp->page_directory = NULL;
}

595
static struct
596
i915_page_directory_pointer *alloc_pdp(struct drm_i915_private *dev_priv)
597 598 599 600
{
	struct i915_page_directory_pointer *pdp;
	int ret = -ENOMEM;

601
	WARN_ON(!USES_FULL_48BIT_PPGTT(dev_priv));
602 603 604 605 606

	pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
	if (!pdp)
		return ERR_PTR(-ENOMEM);

607
	ret = __pdp_init(dev_priv, pdp);
608 609 610
	if (ret)
		goto fail_bitmap;

611
	ret = setup_px(dev_priv, pdp);
612 613 614 615 616 617 618 619 620 621 622 623 624
	if (ret)
		goto fail_page_m;

	return pdp;

fail_page_m:
	__pdp_fini(pdp);
fail_bitmap:
	kfree(pdp);

	return ERR_PTR(ret);
}

625
static void free_pdp(struct drm_i915_private *dev_priv,
626 627 628
		     struct i915_page_directory_pointer *pdp)
{
	__pdp_fini(pdp);
629 630
	if (USES_FULL_48BIT_PPGTT(dev_priv)) {
		cleanup_px(dev_priv, pdp);
631 632 633 634
		kfree(pdp);
	}
}

635 636 637 638 639 640 641
static void gen8_initialize_pdp(struct i915_address_space *vm,
				struct i915_page_directory_pointer *pdp)
{
	gen8_ppgtt_pdpe_t scratch_pdpe;

	scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);

642
	fill_px(vm->i915, pdp, scratch_pdpe);
643 644 645 646 647 648 649 650 651 652
}

static void gen8_initialize_pml4(struct i915_address_space *vm,
				 struct i915_pml4 *pml4)
{
	gen8_ppgtt_pml4e_t scratch_pml4e;

	scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
					  I915_CACHE_LLC);

653
	fill_px(vm->i915, pml4, scratch_pml4e);
654 655
}

656
static void
657 658 659 660
gen8_setup_pdpe(struct i915_hw_ppgtt *ppgtt,
		struct i915_page_directory_pointer *pdp,
		struct i915_page_directory *pd,
		int index)
661 662 663
{
	gen8_ppgtt_pdpe_t *page_directorypo;

664
	if (!USES_FULL_48BIT_PPGTT(to_i915(ppgtt->base.dev)))
665 666 667 668 669 670 671 672
		return;

	page_directorypo = kmap_px(pdp);
	page_directorypo[index] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
	kunmap_px(ppgtt, page_directorypo);
}

static void
673 674 675 676
gen8_setup_pml4e(struct i915_hw_ppgtt *ppgtt,
		 struct i915_pml4 *pml4,
		 struct i915_page_directory_pointer *pdp,
		 int index)
677 678 679
{
	gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);

680
	WARN_ON(!USES_FULL_48BIT_PPGTT(to_i915(ppgtt->base.dev)));
681 682
	pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
	kunmap_px(ppgtt, pagemap);
683 684
}

685
/* Broadwell Page Directory Pointer Descriptors */
686
static int gen8_write_pdp(struct drm_i915_gem_request *req,
687 688
			  unsigned entry,
			  dma_addr_t addr)
689
{
690
	struct intel_ring *ring = req->ring;
691
	struct intel_engine_cs *engine = req->engine;
692 693 694 695
	int ret;

	BUG_ON(entry >= 4);

696
	ret = intel_ring_begin(req, 6);
697 698 699
	if (ret)
		return ret;

700 701 702 703 704 705 706
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
	intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, entry));
	intel_ring_emit(ring, upper_32_bits(addr));
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
	intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, entry));
	intel_ring_emit(ring, lower_32_bits(addr));
	intel_ring_advance(ring);
707 708 709 710

	return 0;
}

711 712
static int gen8_legacy_mm_switch(struct i915_hw_ppgtt *ppgtt,
				 struct drm_i915_gem_request *req)
713
{
714
	int i, ret;
715

716
	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
717 718
		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);

719
		ret = gen8_write_pdp(req, i, pd_daddr);
720 721
		if (ret)
			return ret;
722
	}
B
Ben Widawsky 已提交
723

724
	return 0;
725 726
}

727 728 729 730 731 732
static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
			      struct drm_i915_gem_request *req)
{
	return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
}

733 734 735 736 737 738 739
/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
 * the page table structures, we mark them dirty so that
 * context switching/execlist queuing code takes extra steps
 * to ensure that tlbs are flushed.
 */
static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
{
740
	ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
741 742
}

743 744 745 746
/* Removes entries from a single page table, releasing it if it's empty.
 * Caller can use the return value to update higher-level entries.
 */
static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
747 748 749
				struct i915_page_table *pt,
				uint64_t start,
				uint64_t length)
750
{
751
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
752
	unsigned int num_entries = gen8_pte_count(start, length);
M
Mika Kuoppala 已提交
753 754
	unsigned int pte = gen8_pte_index(start);
	unsigned int pte_end = pte + num_entries;
755
	gen8_pte_t *pt_vaddr;
756 757
	gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
						 I915_CACHE_LLC);
758

759
	if (WARN_ON(!px_page(pt)))
760
		return false;
761

M
Mika Kuoppala 已提交
762 763 764
	GEM_BUG_ON(pte_end > GEN8_PTES);

	bitmap_clear(pt->used_ptes, pte, num_entries);
765

766
	if (bitmap_empty(pt->used_ptes, GEN8_PTES))
767 768
		return true;

769 770
	pt_vaddr = kmap_px(pt);

M
Mika Kuoppala 已提交
771 772
	while (pte < pte_end)
		pt_vaddr[pte++] = scratch_pte;
773

774
	kunmap_px(ppgtt, pt_vaddr);
775 776

	return false;
777
}
778

779 780 781 782
/* Removes entries from a single page dir, releasing it if it's empty.
 * Caller can use the return value to update higher-level entries
 */
static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
783 784 785 786
				struct i915_page_directory *pd,
				uint64_t start,
				uint64_t length)
{
787
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
788 789
	struct i915_page_table *pt;
	uint64_t pde;
790 791 792
	gen8_pde_t *pde_vaddr;
	gen8_pde_t scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt),
						 I915_CACHE_LLC);
793 794

	gen8_for_each_pde(pt, pd, start, length, pde) {
795
		if (WARN_ON(!pd->page_table[pde]))
796
			break;
797

798 799 800 801 802
		if (gen8_ppgtt_clear_pt(vm, pt, start, length)) {
			__clear_bit(pde, pd->used_pdes);
			pde_vaddr = kmap_px(pd);
			pde_vaddr[pde] = scratch_pde;
			kunmap_px(ppgtt, pde_vaddr);
803
			free_pt(vm->i915, pt);
804 805 806
		}
	}

807
	if (bitmap_empty(pd->used_pdes, I915_PDES))
808 809 810
		return true;

	return false;
811
}
812

813 814 815 816
/* Removes entries from a single page dir pointer, releasing it if it's empty.
 * Caller can use the return value to update higher-level entries
 */
static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
817 818 819 820
				 struct i915_page_directory_pointer *pdp,
				 uint64_t start,
				 uint64_t length)
{
821
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
822 823
	struct i915_page_directory *pd;
	uint64_t pdpe;
824

825 826 827
	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
		if (WARN_ON(!pdp->page_directory[pdpe]))
			break;
828

829 830
		if (gen8_ppgtt_clear_pd(vm, pd, start, length)) {
			__clear_bit(pdpe, pdp->used_pdpes);
831
			gen8_setup_pdpe(ppgtt, pdp, vm->scratch_pd, pdpe);
832
			free_pd(vm->i915, pd);
833 834 835
		}
	}

836 837
	mark_tlbs_dirty(ppgtt);

838
	if (bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv)))
839 840 841
		return true;

	return false;
842
}
843

844 845 846 847
/* Removes entries from a single pml4.
 * This is the top-level structure in 4-level page tables used on gen8+.
 * Empty entries are always scratch pml4e.
 */
848 849 850 851 852
static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
				  struct i915_pml4 *pml4,
				  uint64_t start,
				  uint64_t length)
{
853
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
854 855
	struct i915_page_directory_pointer *pdp;
	uint64_t pml4e;
856

857
	GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(vm->i915));
858

859 860 861
	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
		if (WARN_ON(!pml4->pdps[pml4e]))
			break;
862

863 864
		if (gen8_ppgtt_clear_pdp(vm, pdp, start, length)) {
			__clear_bit(pml4e, pml4->used_pml4es);
865
			gen8_setup_pml4e(ppgtt, pml4, vm->scratch_pdp, pml4e);
866
			free_pdp(vm->i915, pdp);
867
		}
868 869 870
	}
}

871
static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
872
				   uint64_t start, uint64_t length)
873
{
874
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
875

876
	if (USES_FULL_48BIT_PPGTT(vm->i915))
877 878 879
		gen8_ppgtt_clear_pml4(vm, &ppgtt->pml4, start, length);
	else
		gen8_ppgtt_clear_pdp(vm, &ppgtt->pdp, start, length);
880 881 882 883 884
}

static void
gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
			      struct i915_page_directory_pointer *pdp,
885
			      struct sg_page_iter *sg_iter,
886 887 888
			      uint64_t start,
			      enum i915_cache_level cache_level)
{
889
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
890
	gen8_pte_t *pt_vaddr;
891 892 893
	unsigned pdpe = gen8_pdpe_index(start);
	unsigned pde = gen8_pde_index(start);
	unsigned pte = gen8_pte_index(start);
894

895
	pt_vaddr = NULL;
896

897
	while (__sg_page_iter_next(sg_iter)) {
B
Ben Widawsky 已提交
898
		if (pt_vaddr == NULL) {
899
			struct i915_page_directory *pd = pdp->page_directory[pdpe];
900
			struct i915_page_table *pt = pd->page_table[pde];
901
			pt_vaddr = kmap_px(pt);
B
Ben Widawsky 已提交
902
		}
903

904
		pt_vaddr[pte] =
905
			gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
906
					cache_level);
907
		if (++pte == GEN8_PTES) {
908
			kunmap_px(ppgtt, pt_vaddr);
909
			pt_vaddr = NULL;
910
			if (++pde == I915_PDES) {
911
				if (++pdpe == I915_PDPES_PER_PDP(vm->i915))
912
					break;
913 914 915
				pde = 0;
			}
			pte = 0;
916 917
		}
	}
918 919 920

	if (pt_vaddr)
		kunmap_px(ppgtt, pt_vaddr);
921 922
}

923 924 925 926 927 928
static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
				      struct sg_table *pages,
				      uint64_t start,
				      enum i915_cache_level cache_level,
				      u32 unused)
{
929
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
930
	struct sg_page_iter sg_iter;
931

932
	__sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
933

934
	if (!USES_FULL_48BIT_PPGTT(vm->i915)) {
935 936 937 938
		gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
					      cache_level);
	} else {
		struct i915_page_directory_pointer *pdp;
939
		uint64_t pml4e;
940 941
		uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT;

942
		gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
943 944 945 946
			gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
						      start, cache_level);
		}
	}
947 948
}

949
static void gen8_free_page_tables(struct drm_i915_private *dev_priv,
950
				  struct i915_page_directory *pd)
951 952 953
{
	int i;

954
	if (!px_page(pd))
955 956
		return;

957
	for_each_set_bit(i, pd->used_pdes, I915_PDES) {
958 959
		if (WARN_ON(!pd->page_table[i]))
			continue;
960

961
		free_pt(dev_priv, pd->page_table[i]);
962 963
		pd->page_table[i] = NULL;
	}
B
Ben Widawsky 已提交
964 965
}

966 967
static int gen8_init_scratch(struct i915_address_space *vm)
{
968
	struct drm_i915_private *dev_priv = vm->i915;
969
	int ret;
970

971
	ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA);
972 973
	if (ret)
		return ret;
974

975
	vm->scratch_pt = alloc_pt(dev_priv);
976
	if (IS_ERR(vm->scratch_pt)) {
977 978
		ret = PTR_ERR(vm->scratch_pt);
		goto free_scratch_page;
979 980
	}

981
	vm->scratch_pd = alloc_pd(dev_priv);
982
	if (IS_ERR(vm->scratch_pd)) {
983 984
		ret = PTR_ERR(vm->scratch_pd);
		goto free_pt;
985 986
	}

987 988
	if (USES_FULL_48BIT_PPGTT(dev_priv)) {
		vm->scratch_pdp = alloc_pdp(dev_priv);
989
		if (IS_ERR(vm->scratch_pdp)) {
990 991
			ret = PTR_ERR(vm->scratch_pdp);
			goto free_pd;
992 993 994
		}
	}

995 996
	gen8_initialize_pt(vm, vm->scratch_pt);
	gen8_initialize_pd(vm, vm->scratch_pd);
997
	if (USES_FULL_48BIT_PPGTT(dev_priv))
998
		gen8_initialize_pdp(vm, vm->scratch_pdp);
999 1000

	return 0;
1001 1002

free_pd:
1003
	free_pd(dev_priv, vm->scratch_pd);
1004
free_pt:
1005
	free_pt(dev_priv, vm->scratch_pt);
1006
free_scratch_page:
1007
	cleanup_scratch_page(dev_priv, &vm->scratch_page);
1008 1009

	return ret;
1010 1011
}

1012 1013 1014
static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
{
	enum vgt_g2v_type msg;
1015
	struct drm_i915_private *dev_priv = ppgtt->base.i915;
1016 1017
	int i;

1018
	if (USES_FULL_48BIT_PPGTT(dev_priv)) {
1019 1020
		u64 daddr = px_dma(&ppgtt->pml4);

1021 1022
		I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
		I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
1023 1024 1025 1026 1027 1028 1029

		msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
				VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
	} else {
		for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
			u64 daddr = i915_page_dir_dma_addr(ppgtt, i);

1030 1031
			I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
			I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
		}

		msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
				VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
	}

	I915_WRITE(vgtif_reg(g2v_notify), msg);

	return 0;
}

1043 1044
static void gen8_free_scratch(struct i915_address_space *vm)
{
1045
	struct drm_i915_private *dev_priv = vm->i915;
1046

1047 1048 1049 1050 1051
	if (USES_FULL_48BIT_PPGTT(dev_priv))
		free_pdp(dev_priv, vm->scratch_pdp);
	free_pd(dev_priv, vm->scratch_pd);
	free_pt(dev_priv, vm->scratch_pt);
	cleanup_scratch_page(dev_priv, &vm->scratch_page);
1052 1053
}

1054
static void gen8_ppgtt_cleanup_3lvl(struct drm_i915_private *dev_priv,
1055
				    struct i915_page_directory_pointer *pdp)
1056 1057 1058
{
	int i;

1059
	for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv)) {
1060
		if (WARN_ON(!pdp->page_directory[i]))
1061 1062
			continue;

1063 1064
		gen8_free_page_tables(dev_priv, pdp->page_directory[i]);
		free_pd(dev_priv, pdp->page_directory[i]);
1065
	}
1066

1067
	free_pdp(dev_priv, pdp);
1068 1069 1070 1071
}

static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
{
1072
	struct drm_i915_private *dev_priv = ppgtt->base.i915;
1073 1074 1075 1076 1077 1078
	int i;

	for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) {
		if (WARN_ON(!ppgtt->pml4.pdps[i]))
			continue;

1079
		gen8_ppgtt_cleanup_3lvl(dev_priv, ppgtt->pml4.pdps[i]);
1080 1081
	}

1082
	cleanup_px(dev_priv, &ppgtt->pml4);
1083 1084 1085 1086
}

static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
1087
	struct drm_i915_private *dev_priv = vm->i915;
1088
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1089

1090
	if (intel_vgpu_active(dev_priv))
1091 1092
		gen8_ppgtt_notify_vgt(ppgtt, false);

1093 1094
	if (!USES_FULL_48BIT_PPGTT(dev_priv))
		gen8_ppgtt_cleanup_3lvl(dev_priv, &ppgtt->pdp);
1095 1096
	else
		gen8_ppgtt_cleanup_4lvl(ppgtt);
1097

1098
	gen8_free_scratch(vm);
1099 1100
}

1101 1102
/**
 * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range.
1103 1104
 * @vm:	Master vm structure.
 * @pd:	Page directory for this address range.
1105
 * @start:	Starting virtual address to begin allocations.
1106
 * @length:	Size of the allocations.
1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118
 * @new_pts:	Bitmap set by function with new allocations. Likely used by the
 *		caller to free on error.
 *
 * Allocate the required number of page tables. Extremely similar to
 * gen8_ppgtt_alloc_page_directories(). The main difference is here we are limited by
 * the page directory boundary (instead of the page directory pointer). That
 * boundary is 1GB virtual. Therefore, unlike gen8_ppgtt_alloc_page_directories(), it is
 * possible, and likely that the caller will need to use multiple calls of this
 * function to achieve the appropriate allocation.
 *
 * Return: 0 if success; negative error code otherwise.
 */
1119
static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
1120
				     struct i915_page_directory *pd,
1121
				     uint64_t start,
1122 1123
				     uint64_t length,
				     unsigned long *new_pts)
1124
{
1125
	struct drm_i915_private *dev_priv = vm->i915;
1126
	struct i915_page_table *pt;
1127
	uint32_t pde;
1128

1129
	gen8_for_each_pde(pt, pd, start, length, pde) {
1130
		/* Don't reallocate page tables */
1131
		if (test_bit(pde, pd->used_pdes)) {
1132
			/* Scratch is never allocated this way */
1133
			WARN_ON(pt == vm->scratch_pt);
1134 1135 1136
			continue;
		}

1137
		pt = alloc_pt(dev_priv);
1138
		if (IS_ERR(pt))
1139 1140
			goto unwind_out;

1141
		gen8_initialize_pt(vm, pt);
1142
		pd->page_table[pde] = pt;
1143
		__set_bit(pde, new_pts);
1144
		trace_i915_page_table_entry_alloc(vm, pde, start, GEN8_PDE_SHIFT);
1145 1146
	}

1147
	return 0;
1148 1149

unwind_out:
1150
	for_each_set_bit(pde, new_pts, I915_PDES)
1151
		free_pt(dev_priv, pd->page_table[pde]);
1152

B
Ben Widawsky 已提交
1153
	return -ENOMEM;
1154 1155
}

1156 1157
/**
 * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range.
1158
 * @vm:	Master vm structure.
1159 1160
 * @pdp:	Page directory pointer for this address range.
 * @start:	Starting virtual address to begin allocations.
1161 1162
 * @length:	Size of the allocations.
 * @new_pds:	Bitmap set by function with new allocations. Likely used by the
1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178
 *		caller to free on error.
 *
 * Allocate the required number of page directories starting at the pde index of
 * @start, and ending at the pde index @start + @length. This function will skip
 * over already allocated page directories within the range, and only allocate
 * new ones, setting the appropriate pointer within the pdp as well as the
 * correct position in the bitmap @new_pds.
 *
 * The function will only allocate the pages within the range for a give page
 * directory pointer. In other words, if @start + @length straddles a virtually
 * addressed PDP boundary (512GB for 4k pages), there will be more allocations
 * required by the caller, This is not currently possible, and the BUG in the
 * code will prevent it.
 *
 * Return: 0 if success; negative error code otherwise.
 */
1179 1180 1181 1182 1183 1184
static int
gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
				  struct i915_page_directory_pointer *pdp,
				  uint64_t start,
				  uint64_t length,
				  unsigned long *new_pds)
1185
{
1186
	struct drm_i915_private *dev_priv = vm->i915;
1187
	struct i915_page_directory *pd;
1188
	uint32_t pdpe;
1189
	uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv);
1190

1191
	WARN_ON(!bitmap_empty(new_pds, pdpes));
1192

1193
	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1194
		if (test_bit(pdpe, pdp->used_pdpes))
1195
			continue;
1196

1197
		pd = alloc_pd(dev_priv);
1198
		if (IS_ERR(pd))
B
Ben Widawsky 已提交
1199
			goto unwind_out;
1200

1201
		gen8_initialize_pd(vm, pd);
1202
		pdp->page_directory[pdpe] = pd;
1203
		__set_bit(pdpe, new_pds);
1204
		trace_i915_page_directory_entry_alloc(vm, pdpe, start, GEN8_PDPE_SHIFT);
B
Ben Widawsky 已提交
1205 1206
	}

1207
	return 0;
B
Ben Widawsky 已提交
1208 1209

unwind_out:
1210
	for_each_set_bit(pdpe, new_pds, pdpes)
1211
		free_pd(dev_priv, pdp->page_directory[pdpe]);
B
Ben Widawsky 已提交
1212 1213

	return -ENOMEM;
1214 1215
}

1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238
/**
 * gen8_ppgtt_alloc_page_dirpointers() - Allocate pdps for VA range.
 * @vm:	Master vm structure.
 * @pml4:	Page map level 4 for this address range.
 * @start:	Starting virtual address to begin allocations.
 * @length:	Size of the allocations.
 * @new_pdps:	Bitmap set by function with new allocations. Likely used by the
 *		caller to free on error.
 *
 * Allocate the required number of page directory pointers. Extremely similar to
 * gen8_ppgtt_alloc_page_directories() and gen8_ppgtt_alloc_pagetabs().
 * The main difference is here we are limited by the pml4 boundary (instead of
 * the page directory pointer).
 *
 * Return: 0 if success; negative error code otherwise.
 */
static int
gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
				  struct i915_pml4 *pml4,
				  uint64_t start,
				  uint64_t length,
				  unsigned long *new_pdps)
{
1239
	struct drm_i915_private *dev_priv = vm->i915;
1240 1241 1242 1243 1244
	struct i915_page_directory_pointer *pdp;
	uint32_t pml4e;

	WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4));

1245
	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1246
		if (!test_bit(pml4e, pml4->used_pml4es)) {
1247
			pdp = alloc_pdp(dev_priv);
1248 1249 1250
			if (IS_ERR(pdp))
				goto unwind_out;

1251
			gen8_initialize_pdp(vm, pdp);
1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264
			pml4->pdps[pml4e] = pdp;
			__set_bit(pml4e, new_pdps);
			trace_i915_page_directory_pointer_entry_alloc(vm,
								      pml4e,
								      start,
								      GEN8_PML4E_SHIFT);
		}
	}

	return 0;

unwind_out:
	for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
1265
		free_pdp(dev_priv, pml4->pdps[pml4e]);
1266 1267 1268 1269

	return -ENOMEM;
}

1270
static void
1271
free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long *new_pts)
1272 1273 1274 1275 1276 1277 1278 1279 1280 1281
{
	kfree(new_pts);
	kfree(new_pds);
}

/* Fills in the page directory bitmap, and the array of page tables bitmap. Both
 * of these are based on the number of PDPEs in the system.
 */
static
int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds,
1282
					 unsigned long **new_pts,
1283
					 uint32_t pdpes)
1284 1285
{
	unsigned long *pds;
1286
	unsigned long *pts;
1287

1288
	pds = kcalloc(BITS_TO_LONGS(pdpes), sizeof(unsigned long), GFP_TEMPORARY);
1289 1290 1291
	if (!pds)
		return -ENOMEM;

1292 1293 1294 1295
	pts = kcalloc(pdpes, BITS_TO_LONGS(I915_PDES) * sizeof(unsigned long),
		      GFP_TEMPORARY);
	if (!pts)
		goto err_out;
1296 1297 1298 1299 1300 1301 1302

	*new_pds = pds;
	*new_pts = pts;

	return 0;

err_out:
1303
	free_gen8_temp_bitmaps(pds, pts);
1304 1305 1306
	return -ENOMEM;
}

1307 1308 1309 1310
static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
				    struct i915_page_directory_pointer *pdp,
				    uint64_t start,
				    uint64_t length)
1311
{
1312
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1313
	unsigned long *new_page_dirs, *new_page_tables;
1314
	struct drm_i915_private *dev_priv = vm->i915;
1315
	struct i915_page_directory *pd;
1316 1317
	const uint64_t orig_start = start;
	const uint64_t orig_length = length;
1318
	uint32_t pdpe;
1319
	uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv);
1320 1321
	int ret;

1322
	ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
1323 1324 1325
	if (ret)
		return ret;

1326
	/* Do the allocations first so we can easily bail out */
1327 1328
	ret = gen8_ppgtt_alloc_page_directories(vm, pdp, start, length,
						new_page_dirs);
1329
	if (ret) {
1330
		free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1331 1332 1333 1334
		return ret;
	}

	/* For every page directory referenced, allocate page tables */
1335
	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1336
		ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length,
1337
						new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES));
1338 1339 1340 1341
		if (ret)
			goto err_out;
	}

1342 1343 1344
	start = orig_start;
	length = orig_length;

1345 1346
	/* Allocations have completed successfully, so set the bitmaps, and do
	 * the mappings. */
1347
	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1348
		gen8_pde_t *const page_directory = kmap_px(pd);
1349
		struct i915_page_table *pt;
1350
		uint64_t pd_len = length;
1351 1352 1353
		uint64_t pd_start = start;
		uint32_t pde;

1354 1355 1356
		/* Every pd should be allocated, we just did that above. */
		WARN_ON(!pd);

1357
		gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368
			/* Same reasoning as pd */
			WARN_ON(!pt);
			WARN_ON(!pd_len);
			WARN_ON(!gen8_pte_count(pd_start, pd_len));

			/* Set our used ptes within the page table */
			bitmap_set(pt->used_ptes,
				   gen8_pte_index(pd_start),
				   gen8_pte_count(pd_start, pd_len));

			/* Our pde is now pointing to the pagetable, pt */
1369
			__set_bit(pde, pd->used_pdes);
1370 1371

			/* Map the PDE to the page table */
1372 1373
			page_directory[pde] = gen8_pde_encode(px_dma(pt),
							      I915_CACHE_LLC);
1374 1375 1376 1377
			trace_i915_page_table_entry_map(&ppgtt->base, pde, pt,
							gen8_pte_index(start),
							gen8_pte_count(start, length),
							GEN8_PTES);
1378 1379 1380

			/* NB: We haven't yet mapped ptes to pages. At this
			 * point we're still relying on insert_entries() */
1381
		}
1382

1383
		kunmap_px(ppgtt, page_directory);
1384
		__set_bit(pdpe, pdp->used_pdpes);
1385
		gen8_setup_pdpe(ppgtt, pdp, pd, pdpe);
1386 1387
	}

1388
	free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1389
	mark_tlbs_dirty(ppgtt);
B
Ben Widawsky 已提交
1390
	return 0;
1391

B
Ben Widawsky 已提交
1392
err_out:
1393
	while (pdpe--) {
1394 1395
		unsigned long temp;

1396 1397
		for_each_set_bit(temp, new_page_tables + pdpe *
				BITS_TO_LONGS(I915_PDES), I915_PDES)
1398 1399
			free_pt(dev_priv,
				pdp->page_directory[pdpe]->page_table[temp]);
1400 1401
	}

1402
	for_each_set_bit(pdpe, new_page_dirs, pdpes)
1403
		free_pd(dev_priv, pdp->page_directory[pdpe]);
1404

1405
	free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1406
	mark_tlbs_dirty(ppgtt);
1407 1408 1409
	return ret;
}

1410 1411 1412 1413 1414 1415
static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
				    struct i915_pml4 *pml4,
				    uint64_t start,
				    uint64_t length)
{
	DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4);
1416
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1417
	struct i915_page_directory_pointer *pdp;
1418
	uint64_t pml4e;
1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436
	int ret = 0;

	/* Do the pml4 allocations first, so we don't need to track the newly
	 * allocated tables below the pdp */
	bitmap_zero(new_pdps, GEN8_PML4ES_PER_PML4);

	/* The pagedirectory and pagetable allocations are done in the shared 3
	 * and 4 level code. Just allocate the pdps.
	 */
	ret = gen8_ppgtt_alloc_page_dirpointers(vm, pml4, start, length,
						new_pdps);
	if (ret)
		return ret;

	WARN(bitmap_weight(new_pdps, GEN8_PML4ES_PER_PML4) > 2,
	     "The allocation has spanned more than 512GB. "
	     "It is highly likely this is incorrect.");

1437
	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1438 1439 1440 1441 1442 1443
		WARN_ON(!pdp);

		ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length);
		if (ret)
			goto err_out;

1444
		gen8_setup_pml4e(ppgtt, pml4, pdp, pml4e);
1445 1446 1447 1448 1449 1450 1451 1452 1453
	}

	bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es,
		  GEN8_PML4ES_PER_PML4);

	return 0;

err_out:
	for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
1454
		gen8_ppgtt_cleanup_3lvl(vm->i915, pml4->pdps[pml4e]);
1455 1456 1457 1458 1459 1460 1461

	return ret;
}

static int gen8_alloc_va_range(struct i915_address_space *vm,
			       uint64_t start, uint64_t length)
{
1462
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1463

1464
	if (USES_FULL_48BIT_PPGTT(vm->i915))
1465 1466 1467 1468 1469
		return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
	else
		return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
}

1470 1471 1472 1473 1474 1475 1476 1477
static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp,
			  uint64_t start, uint64_t length,
			  gen8_pte_t scratch_pte,
			  struct seq_file *m)
{
	struct i915_page_directory *pd;
	uint32_t pdpe;

1478
	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1479 1480 1481 1482 1483 1484 1485 1486 1487
		struct i915_page_table *pt;
		uint64_t pd_len = length;
		uint64_t pd_start = start;
		uint32_t pde;

		if (!test_bit(pdpe, pdp->used_pdpes))
			continue;

		seq_printf(m, "\tPDPE #%d\n", pdpe);
1488
		gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531
			uint32_t  pte;
			gen8_pte_t *pt_vaddr;

			if (!test_bit(pde, pd->used_pdes))
				continue;

			pt_vaddr = kmap_px(pt);
			for (pte = 0; pte < GEN8_PTES; pte += 4) {
				uint64_t va =
					(pdpe << GEN8_PDPE_SHIFT) |
					(pde << GEN8_PDE_SHIFT) |
					(pte << GEN8_PTE_SHIFT);
				int i;
				bool found = false;

				for (i = 0; i < 4; i++)
					if (pt_vaddr[pte + i] != scratch_pte)
						found = true;
				if (!found)
					continue;

				seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
				for (i = 0; i < 4; i++) {
					if (pt_vaddr[pte + i] != scratch_pte)
						seq_printf(m, " %llx", pt_vaddr[pte + i]);
					else
						seq_puts(m, "  SCRATCH ");
				}
				seq_puts(m, "\n");
			}
			/* don't use kunmap_px, it could trigger
			 * an unnecessary flush.
			 */
			kunmap_atomic(pt_vaddr);
		}
	}
}

static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
	struct i915_address_space *vm = &ppgtt->base;
	uint64_t start = ppgtt->base.start;
	uint64_t length = ppgtt->base.total;
1532
	gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
1533
						 I915_CACHE_LLC);
1534

1535
	if (!USES_FULL_48BIT_PPGTT(vm->i915)) {
1536 1537
		gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
	} else {
1538
		uint64_t pml4e;
1539 1540 1541
		struct i915_pml4 *pml4 = &ppgtt->pml4;
		struct i915_page_directory_pointer *pdp;

1542
		gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1543 1544 1545 1546 1547 1548 1549 1550 1551
			if (!test_bit(pml4e, pml4->used_pml4es))
				continue;

			seq_printf(m, "    PML4E #%llu\n", pml4e);
			gen8_dump_pdp(pdp, start, length, scratch_pte, m);
		}
	}
}

1552 1553
static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
{
1554
	unsigned long *new_page_dirs, *new_page_tables;
1555
	uint32_t pdpes = I915_PDPES_PER_PDP(to_i915(ppgtt->base.dev));
1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
	int ret;

	/* We allocate temp bitmap for page tables for no gain
	 * but as this is for init only, lets keep the things simple
	 */
	ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
	if (ret)
		return ret;

	/* Allocate for all pdps regardless of how the ppgtt
	 * was defined.
	 */
	ret = gen8_ppgtt_alloc_page_directories(&ppgtt->base, &ppgtt->pdp,
						0, 1ULL << 32,
						new_page_dirs);
	if (!ret)
		*ppgtt->pdp.used_pdpes = *new_page_dirs;

1574
	free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1575 1576 1577 1578

	return ret;
}

1579
/*
1580 1581 1582 1583
 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
 * with a net effect resembling a 2-level page table in normal x86 terms. Each
 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
 * space.
B
Ben Widawsky 已提交
1584
 *
1585
 */
1586
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
B
Ben Widawsky 已提交
1587
{
1588
	struct drm_i915_private *dev_priv = ppgtt->base.i915;
1589
	int ret;
1590

1591 1592 1593
	ret = gen8_init_scratch(&ppgtt->base);
	if (ret)
		return ret;
1594

1595 1596
	ppgtt->base.start = 0;
	ppgtt->base.cleanup = gen8_ppgtt_cleanup;
1597
	ppgtt->base.allocate_va_range = gen8_alloc_va_range;
1598
	ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
1599
	ppgtt->base.clear_range = gen8_ppgtt_clear_range;
1600 1601
	ppgtt->base.unbind_vma = ppgtt_unbind_vma;
	ppgtt->base.bind_vma = ppgtt_bind_vma;
1602
	ppgtt->debug_dump = gen8_dump_ppgtt;
1603

1604 1605
	if (USES_FULL_48BIT_PPGTT(dev_priv)) {
		ret = setup_px(dev_priv, &ppgtt->pml4);
1606 1607
		if (ret)
			goto free_scratch;
1608

1609 1610
		gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);

1611
		ppgtt->base.total = 1ULL << 48;
1612
		ppgtt->switch_mm = gen8_48b_mm_switch;
1613
	} else {
1614
		ret = __pdp_init(dev_priv, &ppgtt->pdp);
1615 1616 1617 1618
		if (ret)
			goto free_scratch;

		ppgtt->base.total = 1ULL << 32;
1619
		ppgtt->switch_mm = gen8_legacy_mm_switch;
1620 1621 1622
		trace_i915_page_directory_pointer_entry_alloc(&ppgtt->base,
							      0, 0,
							      GEN8_PML4E_SHIFT);
1623

1624
		if (intel_vgpu_active(dev_priv)) {
1625 1626 1627 1628
			ret = gen8_preallocate_top_level_pdps(ppgtt);
			if (ret)
				goto free_scratch;
		}
1629
	}
1630

1631
	if (intel_vgpu_active(dev_priv))
1632 1633
		gen8_ppgtt_notify_vgt(ppgtt, true);

1634
	return 0;
1635 1636 1637 1638

free_scratch:
	gen8_free_scratch(&ppgtt->base);
	return ret;
1639 1640
}

B
Ben Widawsky 已提交
1641 1642 1643
static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
	struct i915_address_space *vm = &ppgtt->base;
1644
	struct i915_page_table *unused;
1645
	gen6_pte_t scratch_pte;
B
Ben Widawsky 已提交
1646
	uint32_t pd_entry;
1647
	uint32_t  pte, pde;
1648
	uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
B
Ben Widawsky 已提交
1649

1650
	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
1651
				     I915_CACHE_LLC, 0);
B
Ben Widawsky 已提交
1652

1653
	gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
B
Ben Widawsky 已提交
1654
		u32 expected;
1655
		gen6_pte_t *pt_vaddr;
1656
		const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
1657
		pd_entry = readl(ppgtt->pd_addr + pde);
B
Ben Widawsky 已提交
1658 1659 1660 1661 1662 1663 1664 1665 1666
		expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);

		if (pd_entry != expected)
			seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
				   pde,
				   pd_entry,
				   expected);
		seq_printf(m, "\tPDE: %x\n", pd_entry);

1667 1668
		pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]);

1669
		for (pte = 0; pte < GEN6_PTES; pte+=4) {
B
Ben Widawsky 已提交
1670
			unsigned long va =
1671
				(pde * PAGE_SIZE * GEN6_PTES) +
B
Ben Widawsky 已提交
1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689
				(pte * PAGE_SIZE);
			int i;
			bool found = false;
			for (i = 0; i < 4; i++)
				if (pt_vaddr[pte + i] != scratch_pte)
					found = true;
			if (!found)
				continue;

			seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
			for (i = 0; i < 4; i++) {
				if (pt_vaddr[pte + i] != scratch_pte)
					seq_printf(m, " %08x", pt_vaddr[pte + i]);
				else
					seq_puts(m, "  SCRATCH ");
			}
			seq_puts(m, "\n");
		}
1690
		kunmap_px(ppgtt, pt_vaddr);
B
Ben Widawsky 已提交
1691 1692 1693
	}
}

1694
/* Write pde (index) from the page directory @pd to the page table @pt */
1695 1696
static void gen6_write_pde(struct i915_page_directory *pd,
			    const int pde, struct i915_page_table *pt)
B
Ben Widawsky 已提交
1697
{
1698 1699 1700 1701
	/* Caller needs to make sure the write completes if necessary */
	struct i915_hw_ppgtt *ppgtt =
		container_of(pd, struct i915_hw_ppgtt, pd);
	u32 pd_entry;
B
Ben Widawsky 已提交
1702

1703
	pd_entry = GEN6_PDE_ADDR_ENCODE(px_dma(pt));
1704
	pd_entry |= GEN6_PDE_VALID;
B
Ben Widawsky 已提交
1705

1706 1707
	writel(pd_entry, ppgtt->pd_addr + pde);
}
B
Ben Widawsky 已提交
1708

1709 1710 1711
/* Write all the page tables found in the ppgtt structure to incrementing page
 * directories. */
static void gen6_write_page_range(struct drm_i915_private *dev_priv,
1712
				  struct i915_page_directory *pd,
1713 1714
				  uint32_t start, uint32_t length)
{
1715
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
1716
	struct i915_page_table *pt;
1717
	uint32_t pde;
1718

1719
	gen6_for_each_pde(pt, pd, start, length, pde)
1720 1721 1722 1723
		gen6_write_pde(pd, pde, pt);

	/* Make sure write is complete before other code can use this page
	 * table. Also require for WC mapped PTEs */
1724
	readl(ggtt->gsm);
B
Ben Widawsky 已提交
1725 1726
}

1727
static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
B
Ben Widawsky 已提交
1728
{
1729
	BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
1730

1731
	return (ppgtt->pd.base.ggtt_offset / 64) << 16;
1732 1733
}

1734
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
1735
			 struct drm_i915_gem_request *req)
1736
{
1737
	struct intel_ring *ring = req->ring;
1738
	struct intel_engine_cs *engine = req->engine;
1739 1740 1741
	int ret;

	/* NB: TLBs must be flushed and invalidated before a switch */
1742
	ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
1743 1744 1745
	if (ret)
		return ret;

1746
	ret = intel_ring_begin(req, 6);
1747 1748 1749
	if (ret)
		return ret;

1750 1751 1752 1753 1754 1755 1756
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
	intel_ring_emit(ring, PP_DIR_DCLV_2G);
	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
	intel_ring_emit(ring, get_pd_offset(ppgtt));
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_advance(ring);
1757 1758 1759 1760

	return 0;
}

1761
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
1762
			  struct drm_i915_gem_request *req)
1763
{
1764
	struct intel_ring *ring = req->ring;
1765
	struct intel_engine_cs *engine = req->engine;
1766 1767 1768
	int ret;

	/* NB: TLBs must be flushed and invalidated before a switch */
1769
	ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
1770 1771 1772
	if (ret)
		return ret;

1773
	ret = intel_ring_begin(req, 6);
1774 1775 1776
	if (ret)
		return ret;

1777 1778 1779 1780 1781 1782 1783
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
	intel_ring_emit(ring, PP_DIR_DCLV_2G);
	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
	intel_ring_emit(ring, get_pd_offset(ppgtt));
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_advance(ring);
1784

1785
	/* XXX: RCS is the only one to auto invalidate the TLBs? */
1786
	if (engine->id != RCS) {
1787
		ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
1788 1789 1790 1791
		if (ret)
			return ret;
	}

1792 1793 1794
	return 0;
}

1795
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
1796
			  struct drm_i915_gem_request *req)
1797
{
1798
	struct intel_engine_cs *engine = req->engine;
1799
	struct drm_i915_private *dev_priv = req->i915;
1800

1801 1802
	I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
	I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
1803 1804 1805
	return 0;
}

1806
static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
1807
{
1808
	struct intel_engine_cs *engine;
1809
	enum intel_engine_id id;
B
Ben Widawsky 已提交
1810

1811
	for_each_engine(engine, dev_priv, id) {
1812 1813
		u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
				 GEN8_GFX_PPGTT_48B : 0;
1814
		I915_WRITE(RING_MODE_GEN7(engine),
1815
			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
1816 1817
	}
}
B
Ben Widawsky 已提交
1818

1819
static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
B
Ben Widawsky 已提交
1820
{
1821
	struct intel_engine_cs *engine;
1822
	uint32_t ecochk, ecobits;
1823
	enum intel_engine_id id;
B
Ben Widawsky 已提交
1824

1825 1826
	ecobits = I915_READ(GAC_ECO_BITS);
	I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
1827

1828
	ecochk = I915_READ(GAM_ECOCHK);
1829
	if (IS_HASWELL(dev_priv)) {
1830 1831 1832 1833 1834 1835
		ecochk |= ECOCHK_PPGTT_WB_HSW;
	} else {
		ecochk |= ECOCHK_PPGTT_LLC_IVB;
		ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
	}
	I915_WRITE(GAM_ECOCHK, ecochk);
1836

1837
	for_each_engine(engine, dev_priv, id) {
B
Ben Widawsky 已提交
1838
		/* GFX_MODE is per-ring on gen7+ */
1839
		I915_WRITE(RING_MODE_GEN7(engine),
1840
			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
B
Ben Widawsky 已提交
1841
	}
1842
}
B
Ben Widawsky 已提交
1843

1844
static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
1845 1846
{
	uint32_t ecochk, gab_ctl, ecobits;
1847

1848 1849 1850
	ecobits = I915_READ(GAC_ECO_BITS);
	I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
		   ECOBITS_PPGTT_CACHE64B);
B
Ben Widawsky 已提交
1851

1852 1853 1854 1855 1856 1857 1858
	gab_ctl = I915_READ(GAB_CTL);
	I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);

	ecochk = I915_READ(GAM_ECOCHK);
	I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);

	I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
B
Ben Widawsky 已提交
1859 1860
}

1861
/* PPGTT support for Sandybdrige/Gen6 and later */
1862
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
1863
				   uint64_t start,
1864
				   uint64_t length)
1865
{
1866
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1867
	gen6_pte_t *pt_vaddr, scratch_pte;
1868 1869
	unsigned first_entry = start >> PAGE_SHIFT;
	unsigned num_entries = length >> PAGE_SHIFT;
1870 1871
	unsigned act_pt = first_entry / GEN6_PTES;
	unsigned first_pte = first_entry % GEN6_PTES;
1872
	unsigned last_pte, i;
1873

1874
	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
1875
				     I915_CACHE_LLC, 0);
1876

1877 1878
	while (num_entries) {
		last_pte = first_pte + num_entries;
1879 1880
		if (last_pte > GEN6_PTES)
			last_pte = GEN6_PTES;
1881

1882
		pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
1883

1884 1885
		for (i = first_pte; i < last_pte; i++)
			pt_vaddr[i] = scratch_pte;
1886

1887
		kunmap_px(ppgtt, pt_vaddr);
1888

1889 1890
		num_entries -= last_pte - first_pte;
		first_pte = 0;
1891
		act_pt++;
1892
	}
1893 1894
}

1895
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
D
Daniel Vetter 已提交
1896
				      struct sg_table *pages,
1897
				      uint64_t start,
1898
				      enum i915_cache_level cache_level, u32 flags)
D
Daniel Vetter 已提交
1899
{
1900
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1901
	unsigned first_entry = start >> PAGE_SHIFT;
1902 1903
	unsigned act_pt = first_entry / GEN6_PTES;
	unsigned act_pte = first_entry % GEN6_PTES;
1904 1905 1906
	gen6_pte_t *pt_vaddr = NULL;
	struct sgt_iter sgt_iter;
	dma_addr_t addr;
1907

1908
	for_each_sgt_dma(addr, sgt_iter, pages) {
1909
		if (pt_vaddr == NULL)
1910
			pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
1911

1912
		pt_vaddr[act_pte] =
1913
			vm->pte_encode(addr, cache_level, flags);
1914

1915
		if (++act_pte == GEN6_PTES) {
1916
			kunmap_px(ppgtt, pt_vaddr);
1917
			pt_vaddr = NULL;
1918
			act_pt++;
1919
			act_pte = 0;
D
Daniel Vetter 已提交
1920 1921
		}
	}
1922

1923
	if (pt_vaddr)
1924
		kunmap_px(ppgtt, pt_vaddr);
D
Daniel Vetter 已提交
1925 1926
}

1927
static int gen6_alloc_va_range(struct i915_address_space *vm,
1928
			       uint64_t start_in, uint64_t length_in)
1929
{
1930
	DECLARE_BITMAP(new_page_tables, I915_PDES);
1931
	struct drm_i915_private *dev_priv = vm->i915;
1932
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
1933
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1934
	struct i915_page_table *pt;
1935
	uint32_t start, length, start_save, length_save;
1936
	uint32_t pde;
1937 1938
	int ret;

1939 1940
	start = start_save = start_in;
	length = length_save = length_in;
1941 1942 1943 1944 1945 1946 1947 1948

	bitmap_zero(new_page_tables, I915_PDES);

	/* The allocation is done in two stages so that we can bail out with
	 * minimal amount of pain. The first stage finds new page tables that
	 * need allocation. The second stage marks use ptes within the page
	 * tables.
	 */
1949
	gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
1950
		if (pt != vm->scratch_pt) {
1951 1952 1953 1954 1955 1956 1957
			WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
			continue;
		}

		/* We've already allocated a page table */
		WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));

1958
		pt = alloc_pt(dev_priv);
1959 1960 1961 1962 1963 1964 1965 1966
		if (IS_ERR(pt)) {
			ret = PTR_ERR(pt);
			goto unwind_out;
		}

		gen6_initialize_pt(vm, pt);

		ppgtt->pd.page_table[pde] = pt;
1967
		__set_bit(pde, new_page_tables);
1968
		trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT);
1969 1970 1971 1972
	}

	start = start_save;
	length = length_save;
1973

1974
	gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
1975 1976 1977 1978 1979 1980
		DECLARE_BITMAP(tmp_bitmap, GEN6_PTES);

		bitmap_zero(tmp_bitmap, GEN6_PTES);
		bitmap_set(tmp_bitmap, gen6_pte_index(start),
			   gen6_pte_count(start, length));

1981
		if (__test_and_clear_bit(pde, new_page_tables))
1982 1983
			gen6_write_pde(&ppgtt->pd, pde, pt);

1984 1985 1986 1987
		trace_i915_page_table_entry_map(vm, pde, pt,
					 gen6_pte_index(start),
					 gen6_pte_count(start, length),
					 GEN6_PTES);
1988
		bitmap_or(pt->used_ptes, tmp_bitmap, pt->used_ptes,
1989 1990 1991
				GEN6_PTES);
	}

1992 1993 1994 1995
	WARN_ON(!bitmap_empty(new_page_tables, I915_PDES));

	/* Make sure write is complete before other code can use this page
	 * table. Also require for WC mapped PTEs */
1996
	readl(ggtt->gsm);
1997

1998
	mark_tlbs_dirty(ppgtt);
1999
	return 0;
2000 2001 2002

unwind_out:
	for_each_set_bit(pde, new_page_tables, I915_PDES) {
2003
		struct i915_page_table *pt = ppgtt->pd.page_table[pde];
2004

2005
		ppgtt->pd.page_table[pde] = vm->scratch_pt;
2006
		free_pt(dev_priv, pt);
2007 2008 2009 2010
	}

	mark_tlbs_dirty(ppgtt);
	return ret;
2011 2012
}

2013 2014
static int gen6_init_scratch(struct i915_address_space *vm)
{
2015
	struct drm_i915_private *dev_priv = vm->i915;
2016
	int ret;
2017

2018
	ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA);
2019 2020
	if (ret)
		return ret;
2021

2022
	vm->scratch_pt = alloc_pt(dev_priv);
2023
	if (IS_ERR(vm->scratch_pt)) {
2024
		cleanup_scratch_page(dev_priv, &vm->scratch_page);
2025 2026 2027 2028 2029 2030 2031 2032 2033 2034
		return PTR_ERR(vm->scratch_pt);
	}

	gen6_initialize_pt(vm, vm->scratch_pt);

	return 0;
}

static void gen6_free_scratch(struct i915_address_space *vm)
{
2035
	struct drm_i915_private *dev_priv = vm->i915;
2036

2037 2038
	free_pt(dev_priv, vm->scratch_pt);
	cleanup_scratch_page(dev_priv, &vm->scratch_page);
2039 2040
}

2041
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
2042
{
2043
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
2044
	struct i915_page_directory *pd = &ppgtt->pd;
2045
	struct drm_i915_private *dev_priv = vm->i915;
2046 2047
	struct i915_page_table *pt;
	uint32_t pde;
2048

2049 2050
	drm_mm_remove_node(&ppgtt->node);

2051
	gen6_for_all_pdes(pt, pd, pde)
2052
		if (pt != vm->scratch_pt)
2053
			free_pt(dev_priv, pt);
2054

2055
	gen6_free_scratch(vm);
2056 2057
}

2058
static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
2059
{
2060
	struct i915_address_space *vm = &ppgtt->base;
2061
	struct drm_i915_private *dev_priv = ppgtt->base.i915;
2062
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2063
	int ret;
2064

B
Ben Widawsky 已提交
2065 2066 2067 2068
	/* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
	 * allocator works in address space sizes, so it's multiplied by page
	 * size. We allocate at the top of the GTT to avoid fragmentation.
	 */
2069
	BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
2070

2071 2072 2073
	ret = gen6_init_scratch(vm);
	if (ret)
		return ret;
2074

2075 2076 2077 2078 2079
	ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
				  GEN6_PD_SIZE, GEN6_PD_ALIGN,
				  I915_COLOR_UNEVICTABLE,
				  0, ggtt->base.total,
				  PIN_HIGH);
2080
	if (ret)
2081 2082
		goto err_out;

2083
	if (ppgtt->node.start < ggtt->mappable_end)
B
Ben Widawsky 已提交
2084
		DRM_DEBUG("Forced to use aperture for PDEs\n");
2085

2086
	return 0;
2087 2088

err_out:
2089
	gen6_free_scratch(vm);
2090
	return ret;
2091 2092 2093 2094
}

static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
{
2095
	return gen6_ppgtt_allocate_page_directories(ppgtt);
2096
}
2097

2098 2099 2100
static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
				  uint64_t start, uint64_t length)
{
2101
	struct i915_page_table *unused;
2102
	uint32_t pde;
2103

2104
	gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
2105
		ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
2106 2107
}

2108
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
2109
{
2110
	struct drm_i915_private *dev_priv = ppgtt->base.i915;
2111
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2112 2113
	int ret;

2114
	ppgtt->base.pte_encode = ggtt->base.pte_encode;
2115
	if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
2116
		ppgtt->switch_mm = gen6_mm_switch;
2117
	else if (IS_HASWELL(dev_priv))
2118
		ppgtt->switch_mm = hsw_mm_switch;
2119
	else if (IS_GEN7(dev_priv))
2120
		ppgtt->switch_mm = gen7_mm_switch;
2121
	else
2122 2123 2124 2125 2126 2127
		BUG();

	ret = gen6_ppgtt_alloc(ppgtt);
	if (ret)
		return ret;

2128
	ppgtt->base.allocate_va_range = gen6_alloc_va_range;
2129 2130
	ppgtt->base.clear_range = gen6_ppgtt_clear_range;
	ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
2131 2132
	ppgtt->base.unbind_vma = ppgtt_unbind_vma;
	ppgtt->base.bind_vma = ppgtt_bind_vma;
2133 2134
	ppgtt->base.cleanup = gen6_ppgtt_cleanup;
	ppgtt->base.start = 0;
2135
	ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
B
Ben Widawsky 已提交
2136
	ppgtt->debug_dump = gen6_dump_ppgtt;
2137

2138
	ppgtt->pd.base.ggtt_offset =
2139
		ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
2140

2141
	ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
2142
		ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
2143

2144
	gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
2145

2146 2147
	gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total);

2148
	DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
2149 2150
			 ppgtt->node.size >> 20,
			 ppgtt->node.start / PAGE_SIZE);
2151

2152
	DRM_DEBUG("Adding PPGTT at offset %x\n",
2153
		  ppgtt->pd.base.ggtt_offset << 10);
2154

2155
	return 0;
2156 2157
}

2158 2159
static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
			   struct drm_i915_private *dev_priv)
2160
{
2161
	ppgtt->base.i915 = dev_priv;
2162

2163
	if (INTEL_INFO(dev_priv)->gen < 8)
2164
		return gen6_ppgtt_init(ppgtt);
B
Ben Widawsky 已提交
2165
	else
2166
		return gen8_ppgtt_init(ppgtt);
2167
}
2168

2169
static void i915_address_space_init(struct i915_address_space *vm,
C
Chris Wilson 已提交
2170 2171
				    struct drm_i915_private *dev_priv,
				    const char *name)
2172
{
C
Chris Wilson 已提交
2173
	i915_gem_timeline_init(dev_priv, &vm->timeline, name);
2174 2175 2176
	drm_mm_init(&vm->mm, vm->start, vm->total);
	INIT_LIST_HEAD(&vm->active_list);
	INIT_LIST_HEAD(&vm->inactive_list);
2177
	INIT_LIST_HEAD(&vm->unbound_list);
2178 2179 2180
	list_add_tail(&vm->global_link, &dev_priv->vm_list);
}

2181 2182 2183 2184 2185 2186 2187
static void i915_address_space_fini(struct i915_address_space *vm)
{
	i915_gem_timeline_fini(&vm->timeline);
	drm_mm_takedown(&vm->mm);
	list_del(&vm->global_link);
}

2188
static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
2189 2190 2191 2192 2193 2194
{
	/* This function is for gtt related workarounds. This function is
	 * called on driver load and after a GPU reset, so you can place
	 * workarounds here even if they get overwritten by GPU reset.
	 */
	/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */
2195
	if (IS_BROADWELL(dev_priv))
2196
		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
2197
	else if (IS_CHERRYVIEW(dev_priv))
2198
		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
2199
	else if (IS_SKYLAKE(dev_priv))
2200
		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
2201
	else if (IS_BROXTON(dev_priv))
2202 2203 2204
		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
}

2205 2206
static int i915_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
			   struct drm_i915_private *dev_priv,
C
Chris Wilson 已提交
2207 2208
			   struct drm_i915_file_private *file_priv,
			   const char *name)
2209
{
2210
	int ret;
B
Ben Widawsky 已提交
2211

2212
	ret = __hw_ppgtt_init(ppgtt, dev_priv);
2213
	if (ret == 0) {
B
Ben Widawsky 已提交
2214
		kref_init(&ppgtt->ref);
C
Chris Wilson 已提交
2215
		i915_address_space_init(&ppgtt->base, dev_priv, name);
2216
		ppgtt->base.file = file_priv;
2217
	}
2218 2219 2220 2221

	return ret;
}

2222
int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
2223
{
2224
	gtt_write_workarounds(dev_priv);
2225

2226 2227 2228 2229 2230 2231
	/* In the case of execlists, PPGTT is enabled by the context descriptor
	 * and the PDPs are contained within the context itself.  We don't
	 * need to do anything here. */
	if (i915.enable_execlists)
		return 0;

2232
	if (!USES_PPGTT(dev_priv))
2233 2234
		return 0;

2235
	if (IS_GEN6(dev_priv))
2236
		gen6_ppgtt_enable(dev_priv);
2237
	else if (IS_GEN7(dev_priv))
2238 2239 2240
		gen7_ppgtt_enable(dev_priv);
	else if (INTEL_GEN(dev_priv) >= 8)
		gen8_ppgtt_enable(dev_priv);
2241
	else
2242
		MISSING_CASE(INTEL_GEN(dev_priv));
2243

2244 2245
	return 0;
}
2246

2247
struct i915_hw_ppgtt *
2248
i915_ppgtt_create(struct drm_i915_private *dev_priv,
C
Chris Wilson 已提交
2249 2250
		  struct drm_i915_file_private *fpriv,
		  const char *name)
2251 2252 2253 2254 2255 2256 2257 2258
{
	struct i915_hw_ppgtt *ppgtt;
	int ret;

	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
	if (!ppgtt)
		return ERR_PTR(-ENOMEM);

C
Chris Wilson 已提交
2259
	ret = i915_ppgtt_init(ppgtt, dev_priv, fpriv, name);
2260 2261 2262 2263 2264
	if (ret) {
		kfree(ppgtt);
		return ERR_PTR(ret);
	}

2265 2266
	trace_i915_ppgtt_create(&ppgtt->base);

2267 2268 2269
	return ppgtt;
}

2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290
void i915_ppgtt_close(struct i915_address_space *vm)
{
	struct list_head *phases[] = {
		&vm->active_list,
		&vm->inactive_list,
		&vm->unbound_list,
		NULL,
	}, **phase;

	GEM_BUG_ON(vm->closed);
	vm->closed = true;

	for (phase = phases; *phase; phase++) {
		struct i915_vma *vma, *vn;

		list_for_each_entry_safe(vma, vn, *phase, vm_link)
			if (!i915_vma_is_closed(vma))
				i915_vma_close(vma);
	}
}

2291
void i915_ppgtt_release(struct kref *kref)
2292 2293 2294 2295
{
	struct i915_hw_ppgtt *ppgtt =
		container_of(kref, struct i915_hw_ppgtt, ref);

2296 2297
	trace_i915_ppgtt_release(&ppgtt->base);

2298
	/* vmas should already be unbound and destroyed */
2299 2300
	WARN_ON(!list_empty(&ppgtt->base.active_list));
	WARN_ON(!list_empty(&ppgtt->base.inactive_list));
2301
	WARN_ON(!list_empty(&ppgtt->base.unbound_list));
2302

2303
	i915_address_space_fini(&ppgtt->base);
2304

2305 2306 2307
	ppgtt->base.cleanup(&ppgtt->base);
	kfree(ppgtt);
}
2308

2309 2310 2311
/* Certain Gen5 chipsets require require idling the GPU before
 * unmapping anything from the GTT when VT-d is enabled.
 */
2312
static bool needs_idle_maps(struct drm_i915_private *dev_priv)
2313 2314 2315 2316 2317
{
#ifdef CONFIG_INTEL_IOMMU
	/* Query intel_iommu to see if we need the workaround. Presumably that
	 * was loaded first.
	 */
2318
	if (IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_iommu_gfx_mapped)
2319 2320 2321 2322 2323
		return true;
#endif
	return false;
}

2324
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
2325
{
2326
	struct intel_engine_cs *engine;
2327
	enum intel_engine_id id;
2328

2329
	if (INTEL_INFO(dev_priv)->gen < 6)
2330 2331
		return;

2332
	for_each_engine(engine, dev_priv, id) {
2333
		u32 fault_reg;
2334
		fault_reg = I915_READ(RING_FAULT_REG(engine));
2335 2336
		if (fault_reg & RING_FAULT_VALID) {
			DRM_DEBUG_DRIVER("Unexpected fault\n"
2337
					 "\tAddr: 0x%08lx\n"
2338 2339 2340 2341 2342 2343 2344
					 "\tAddress space: %s\n"
					 "\tSource ID: %d\n"
					 "\tType: %d\n",
					 fault_reg & PAGE_MASK,
					 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
					 RING_FAULT_SRCID(fault_reg),
					 RING_FAULT_FAULT_TYPE(fault_reg));
2345
			I915_WRITE(RING_FAULT_REG(engine),
2346 2347 2348
				   fault_reg & ~RING_FAULT_VALID);
		}
	}
2349 2350 2351 2352

	/* Engine specific init may not have been done till this point. */
	if (dev_priv->engine[RCS])
		POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
2353 2354
}

2355
void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
2356
{
2357
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2358 2359 2360 2361

	/* Don't bother messing with faults pre GEN6 as we have little
	 * documentation supporting that it's a good idea.
	 */
2362
	if (INTEL_GEN(dev_priv) < 6)
2363 2364
		return;

2365
	i915_check_and_clear_faults(dev_priv);
2366

2367
	ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total);
2368

2369
	i915_ggtt_invalidate(dev_priv);
2370 2371
}

2372 2373
int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
			       struct sg_table *pages)
2374
{
2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392
	do {
		if (dma_map_sg(&obj->base.dev->pdev->dev,
			       pages->sgl, pages->nents,
			       PCI_DMA_BIDIRECTIONAL))
			return 0;

		/* If the DMA remap fails, one cause can be that we have
		 * too many objects pinned in a small remapping table,
		 * such as swiotlb. Incrementally purge all other objects and
		 * try again - if there are no more pages to remove from
		 * the DMA remapper, i915_gem_shrink will return 0.
		 */
		GEM_BUG_ON(obj->mm.pages == pages);
	} while (i915_gem_shrink(to_i915(obj->base.dev),
				 obj->base.size >> PAGE_SHIFT,
				 I915_SHRINK_BOUND |
				 I915_SHRINK_UNBOUND |
				 I915_SHRINK_ACTIVE));
2393

2394
	return -ENOSPC;
2395 2396
}

2397
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
B
Ben Widawsky 已提交
2398 2399 2400 2401
{
	writeq(pte, addr);
}

2402 2403 2404 2405 2406 2407
static void gen8_ggtt_insert_page(struct i915_address_space *vm,
				  dma_addr_t addr,
				  uint64_t offset,
				  enum i915_cache_level level,
				  u32 unused)
{
2408
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2409
	gen8_pte_t __iomem *pte =
2410
		(gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
2411

2412
	gen8_set_pte(pte, gen8_pte_encode(addr, level));
2413

2414
	ggtt->invalidate(vm->i915);
2415 2416
}

B
Ben Widawsky 已提交
2417 2418
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
				     struct sg_table *st,
2419
				     uint64_t start,
2420
				     enum i915_cache_level level, u32 unused)
B
Ben Widawsky 已提交
2421
{
2422
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2423 2424 2425 2426 2427
	struct sgt_iter sgt_iter;
	gen8_pte_t __iomem *gtt_entries;
	gen8_pte_t gtt_entry;
	dma_addr_t addr;
	int i = 0;
2428

2429 2430 2431
	gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);

	for_each_sgt_dma(addr, sgt_iter, st) {
2432
		gtt_entry = gen8_pte_encode(addr, level);
2433
		gen8_set_pte(&gtt_entries[i++], gtt_entry);
B
Ben Widawsky 已提交
2434 2435 2436 2437 2438 2439 2440 2441 2442 2443
	}

	/*
	 * XXX: This serves as a posting read to make sure that the PTE has
	 * actually been updated. There is some concern that even though
	 * registers and PTEs are within the same BAR that they are potentially
	 * of NUMA access patterns. Therefore, even with the way we assume
	 * hardware should work, we must keep this posting read for paranoia.
	 */
	if (i != 0)
2444
		WARN_ON(readq(&gtt_entries[i-1]) != gtt_entry);
B
Ben Widawsky 已提交
2445 2446 2447 2448 2449

	/* This next bit makes the above posting read even more important. We
	 * want to flush the TLBs only after we're certain all the PTE updates
	 * have finished.
	 */
2450
	ggtt->invalidate(vm->i915);
B
Ben Widawsky 已提交
2451 2452
}

2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478
struct insert_entries {
	struct i915_address_space *vm;
	struct sg_table *st;
	uint64_t start;
	enum i915_cache_level level;
	u32 flags;
};

static int gen8_ggtt_insert_entries__cb(void *_arg)
{
	struct insert_entries *arg = _arg;
	gen8_ggtt_insert_entries(arg->vm, arg->st,
				 arg->start, arg->level, arg->flags);
	return 0;
}

static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm,
					  struct sg_table *st,
					  uint64_t start,
					  enum i915_cache_level level,
					  u32 flags)
{
	struct insert_entries arg = { vm, st, start, level, flags };
	stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL);
}

2479 2480 2481 2482 2483 2484
static void gen6_ggtt_insert_page(struct i915_address_space *vm,
				  dma_addr_t addr,
				  uint64_t offset,
				  enum i915_cache_level level,
				  u32 flags)
{
2485
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2486
	gen6_pte_t __iomem *pte =
2487
		(gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
2488

2489
	iowrite32(vm->pte_encode(addr, level, flags), pte);
2490

2491
	ggtt->invalidate(vm->i915);
2492 2493
}

2494 2495 2496 2497 2498 2499
/*
 * Binds an object into the global gtt with the specified cache level. The object
 * will be accessible to the GPU via commands whose operands reference offsets
 * within the global GTT as well as accessible by the GPU through the GMADR
 * mapped BAR (dev_priv->mm.gtt->gtt).
 */
2500
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2501
				     struct sg_table *st,
2502
				     uint64_t start,
2503
				     enum i915_cache_level level, u32 flags)
2504
{
2505
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2506 2507 2508 2509 2510
	struct sgt_iter sgt_iter;
	gen6_pte_t __iomem *gtt_entries;
	gen6_pte_t gtt_entry;
	dma_addr_t addr;
	int i = 0;
2511

2512 2513 2514
	gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);

	for_each_sgt_dma(addr, sgt_iter, st) {
2515
		gtt_entry = vm->pte_encode(addr, level, flags);
2516
		iowrite32(gtt_entry, &gtt_entries[i++]);
2517 2518 2519 2520 2521 2522 2523 2524
	}

	/* XXX: This serves as a posting read to make sure that the PTE has
	 * actually been updated. There is some concern that even though
	 * registers and PTEs are within the same BAR that they are potentially
	 * of NUMA access patterns. Therefore, even with the way we assume
	 * hardware should work, we must keep this posting read for paranoia.
	 */
2525 2526
	if (i != 0)
		WARN_ON(readl(&gtt_entries[i-1]) != gtt_entry);
2527 2528 2529 2530 2531

	/* This next bit makes the above posting read even more important. We
	 * want to flush the TLBs only after we're certain all the PTE updates
	 * have finished.
	 */
2532
	ggtt->invalidate(vm->i915);
2533 2534
}

2535
static void nop_clear_range(struct i915_address_space *vm,
2536
			    uint64_t start, uint64_t length)
2537 2538 2539
{
}

B
Ben Widawsky 已提交
2540
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2541
				  uint64_t start, uint64_t length)
B
Ben Widawsky 已提交
2542
{
2543
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2544 2545
	unsigned first_entry = start >> PAGE_SHIFT;
	unsigned num_entries = length >> PAGE_SHIFT;
2546
	gen8_pte_t scratch_pte, __iomem *gtt_base =
2547 2548
		(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
B
Ben Widawsky 已提交
2549 2550 2551 2552 2553 2554 2555
	int i;

	if (WARN(num_entries > max_entries,
		 "First entry = %d; Num entries = %d (max=%d)\n",
		 first_entry, num_entries, max_entries))
		num_entries = max_entries;

2556
	scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
2557
				      I915_CACHE_LLC);
B
Ben Widawsky 已提交
2558 2559 2560 2561 2562
	for (i = 0; i < num_entries; i++)
		gen8_set_pte(&gtt_base[i], scratch_pte);
	readl(gtt_base);
}

2563
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2564
				  uint64_t start,
2565
				  uint64_t length)
2566
{
2567
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2568 2569
	unsigned first_entry = start >> PAGE_SHIFT;
	unsigned num_entries = length >> PAGE_SHIFT;
2570
	gen6_pte_t scratch_pte, __iomem *gtt_base =
2571 2572
		(gen6_pte_t __iomem *)ggtt->gsm + first_entry;
	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
2573 2574 2575 2576 2577 2578 2579
	int i;

	if (WARN(num_entries > max_entries,
		 "First entry = %d; Num entries = %d (max=%d)\n",
		 first_entry, num_entries, max_entries))
		num_entries = max_entries;

2580
	scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
2581
				     I915_CACHE_LLC, 0);
2582

2583 2584 2585 2586 2587
	for (i = 0; i < num_entries; i++)
		iowrite32(scratch_pte, &gtt_base[i]);
	readl(gtt_base);
}

2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599
static void i915_ggtt_insert_page(struct i915_address_space *vm,
				  dma_addr_t addr,
				  uint64_t offset,
				  enum i915_cache_level cache_level,
				  u32 unused)
{
	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;

	intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
}

2600 2601 2602 2603
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
				     struct sg_table *pages,
				     uint64_t start,
				     enum i915_cache_level cache_level, u32 unused)
2604 2605 2606 2607
{
	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;

2608
	intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
2609

2610 2611
}

2612
static void i915_ggtt_clear_range(struct i915_address_space *vm,
2613
				  uint64_t start,
2614
				  uint64_t length)
2615
{
2616
	intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
2617 2618
}

2619 2620 2621
static int ggtt_bind_vma(struct i915_vma *vma,
			 enum i915_cache_level cache_level,
			 u32 flags)
2622
{
2623
	struct drm_i915_private *i915 = vma->vm->i915;
2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635
	struct drm_i915_gem_object *obj = vma->obj;
	u32 pte_flags = 0;
	int ret;

	ret = i915_get_ggtt_vma_pages(vma);
	if (ret)
		return ret;

	/* Currently applicable only to VLV */
	if (obj->gt_ro)
		pte_flags |= PTE_READ_ONLY;

2636
	intel_runtime_pm_get(i915);
2637
	vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
2638
				cache_level, pte_flags);
2639
	intel_runtime_pm_put(i915);
2640 2641 2642 2643 2644 2645

	/*
	 * Without aliasing PPGTT there's no difference between
	 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
	 * upgrade to both bound if we bind either to avoid double-binding.
	 */
2646
	vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
2647 2648 2649 2650 2651 2652 2653

	return 0;
}

static int aliasing_gtt_bind_vma(struct i915_vma *vma,
				 enum i915_cache_level cache_level,
				 u32 flags)
2654
{
2655
	struct drm_i915_private *i915 = vma->vm->i915;
2656
	u32 pte_flags;
2657 2658 2659 2660 2661
	int ret;

	ret = i915_get_ggtt_vma_pages(vma);
	if (ret)
		return ret;
2662

2663
	/* Currently applicable only to VLV */
2664 2665
	pte_flags = 0;
	if (vma->obj->gt_ro)
2666
		pte_flags |= PTE_READ_ONLY;
2667

2668

2669
	if (flags & I915_VMA_GLOBAL_BIND) {
2670
		intel_runtime_pm_get(i915);
2671
		vma->vm->insert_entries(vma->vm,
2672
					vma->pages, vma->node.start,
2673
					cache_level, pte_flags);
2674
		intel_runtime_pm_put(i915);
2675
	}
2676

2677
	if (flags & I915_VMA_LOCAL_BIND) {
2678
		struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
2679
		appgtt->base.insert_entries(&appgtt->base,
2680
					    vma->pages, vma->node.start,
2681
					    cache_level, pte_flags);
2682
	}
2683 2684

	return 0;
2685 2686
}

2687
static void ggtt_unbind_vma(struct i915_vma *vma)
2688
{
2689
	struct drm_i915_private *i915 = vma->vm->i915;
2690
	struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
2691
	const u64 size = min(vma->size, vma->node.size);
2692

2693 2694
	if (vma->flags & I915_VMA_GLOBAL_BIND) {
		intel_runtime_pm_get(i915);
2695
		vma->vm->clear_range(vma->vm,
2696
				     vma->node.start, size);
2697 2698
		intel_runtime_pm_put(i915);
	}
2699

2700
	if (vma->flags & I915_VMA_LOCAL_BIND && appgtt)
2701
		appgtt->base.clear_range(&appgtt->base,
2702
					 vma->node.start, size);
2703 2704
}

2705 2706
void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
			       struct sg_table *pages)
2707
{
D
David Weinehall 已提交
2708 2709
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
	struct device *kdev = &dev_priv->drm.pdev->dev;
2710
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
B
Ben Widawsky 已提交
2711

2712
	if (unlikely(ggtt->do_idle_maps)) {
2713
		if (i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED)) {
2714 2715 2716 2717 2718
			DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
			/* Wait a bit, in hopes it avoids the hang */
			udelay(10);
		}
	}
B
Ben Widawsky 已提交
2719

2720
	dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
2721
}
2722

C
Chris Wilson 已提交
2723
static void i915_gtt_color_adjust(const struct drm_mm_node *node,
2724
				  unsigned long color,
2725 2726
				  u64 *start,
				  u64 *end)
2727 2728
{
	if (node->color != color)
2729
		*start += I915_GTT_PAGE_SIZE;
2730

2731 2732
	node = list_next_entry(node, node_list);
	if (node->allocated && node->color != color)
2733
		*end -= I915_GTT_PAGE_SIZE;
2734
}
B
Ben Widawsky 已提交
2735

2736
int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
2737
{
2738 2739 2740 2741 2742 2743 2744 2745 2746
	/* Let GEM Manage all of the aperture.
	 *
	 * However, leave one page at the end still bound to the scratch page.
	 * There are a number of places where the hardware apparently prefetches
	 * past the end of the object, and we've seen multiple hangs with the
	 * GPU head pointer stuck in a batchbuffer bound at the last page of the
	 * aperture.  One page should be enough to keep any prefetching inside
	 * of the aperture.
	 */
2747
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2748
	unsigned long hole_start, hole_end;
2749
	struct i915_hw_ppgtt *ppgtt;
2750
	struct drm_mm_node *entry;
2751
	int ret;
2752

2753 2754 2755
	ret = intel_vgt_balloon(dev_priv);
	if (ret)
		return ret;
2756

2757 2758 2759
	/* Reserve a mappable slot for our lockless error capture */
	ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
						  &ggtt->error_capture,
2760
						  PAGE_SIZE, 0,
2761
						  I915_COLOR_UNEVICTABLE,
2762 2763 2764 2765 2766
						  0, ggtt->mappable_end,
						  0, 0);
	if (ret)
		return ret;

2767
	/* Clear any non-preallocated blocks */
2768
	drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
2769 2770
		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
			      hole_start, hole_end);
2771
		ggtt->base.clear_range(&ggtt->base, hole_start,
2772
				       hole_end - hole_start);
2773 2774 2775
	}

	/* And finally clear the reserved guard page */
2776
	ggtt->base.clear_range(&ggtt->base,
2777
			       ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
2778

2779
	if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
2780
		ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
2781 2782 2783 2784
		if (!ppgtt) {
			ret = -ENOMEM;
			goto err;
		}
2785

2786
		ret = __hw_ppgtt_init(ppgtt, dev_priv);
2787 2788
		if (ret)
			goto err_ppgtt;
2789

2790
		if (ppgtt->base.allocate_va_range) {
2791 2792
			ret = ppgtt->base.allocate_va_range(&ppgtt->base, 0,
							    ppgtt->base.total);
2793 2794
			if (ret)
				goto err_ppgtt_cleanup;
2795
		}
2796

2797 2798
		ppgtt->base.clear_range(&ppgtt->base,
					ppgtt->base.start,
2799
					ppgtt->base.total);
2800

2801
		dev_priv->mm.aliasing_ppgtt = ppgtt;
2802 2803
		WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
		ggtt->base.bind_vma = aliasing_gtt_bind_vma;
2804 2805
	}

2806
	return 0;
2807 2808 2809 2810 2811 2812 2813 2814

err_ppgtt_cleanup:
	ppgtt->base.cleanup(&ppgtt->base);
err_ppgtt:
	kfree(ppgtt);
err:
	drm_mm_remove_node(&ggtt->error_capture);
	return ret;
2815 2816
}

2817 2818
/**
 * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
2819
 * @dev_priv: i915 device
2820
 */
2821
void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
2822
{
2823
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2824

2825 2826 2827
	if (dev_priv->mm.aliasing_ppgtt) {
		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
		ppgtt->base.cleanup(&ppgtt->base);
M
Matthew Auld 已提交
2828
		kfree(ppgtt);
2829 2830
	}

2831
	i915_gem_cleanup_stolen(&dev_priv->drm);
2832

2833 2834 2835
	if (drm_mm_node_allocated(&ggtt->error_capture))
		drm_mm_remove_node(&ggtt->error_capture);

2836
	if (drm_mm_initialized(&ggtt->base.mm)) {
2837
		intel_vgt_deballoon(dev_priv);
2838

2839 2840 2841
		mutex_lock(&dev_priv->drm.struct_mutex);
		i915_address_space_fini(&ggtt->base);
		mutex_unlock(&dev_priv->drm.struct_mutex);
2842 2843
	}

2844
	ggtt->base.cleanup(&ggtt->base);
2845 2846

	arch_phys_wc_del(ggtt->mtrr);
2847
	io_mapping_fini(&ggtt->mappable);
2848
}
2849

2850
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
2851 2852 2853 2854 2855 2856
{
	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
	snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
	return snb_gmch_ctl << 20;
}

2857
static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
2858 2859 2860 2861 2862
{
	bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
	bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
	if (bdw_gmch_ctl)
		bdw_gmch_ctl = 1 << bdw_gmch_ctl;
2863 2864 2865 2866 2867 2868 2869

#ifdef CONFIG_X86_32
	/* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
	if (bdw_gmch_ctl > 4)
		bdw_gmch_ctl = 4;
#endif

2870 2871 2872
	return bdw_gmch_ctl << 20;
}

2873
static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
2874 2875 2876 2877 2878 2879 2880 2881 2882 2883
{
	gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
	gmch_ctrl &= SNB_GMCH_GGMS_MASK;

	if (gmch_ctrl)
		return 1 << (20 + gmch_ctrl);

	return 0;
}

2884
static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
2885 2886 2887 2888 2889 2890
{
	snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
	snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
	return snb_gmch_ctl << 25; /* 32 MB units */
}

2891
static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
2892 2893 2894 2895 2896 2897
{
	bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
	bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
	return bdw_gmch_ctl << 25; /* 32 MB units */
}

2898 2899 2900 2901 2902 2903 2904 2905 2906 2907 2908 2909 2910 2911 2912 2913 2914 2915
static size_t chv_get_stolen_size(u16 gmch_ctrl)
{
	gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
	gmch_ctrl &= SNB_GMCH_GMS_MASK;

	/*
	 * 0x0  to 0x10: 32MB increments starting at 0MB
	 * 0x11 to 0x16: 4MB increments starting at 8MB
	 * 0x17 to 0x1d: 4MB increments start at 36MB
	 */
	if (gmch_ctrl < 0x11)
		return gmch_ctrl << 25;
	else if (gmch_ctrl < 0x17)
		return (gmch_ctrl - 0x11 + 2) << 22;
	else
		return (gmch_ctrl - 0x17 + 9) << 22;
}

2916 2917 2918 2919 2920 2921 2922 2923 2924 2925 2926 2927
static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
{
	gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
	gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;

	if (gen9_gmch_ctl < 0xf0)
		return gen9_gmch_ctl << 25; /* 32 MB units */
	else
		/* 4MB increments starting at 0xf0 for 4MB */
		return (gen9_gmch_ctl - 0xf0 + 1) << 22;
}

2928
static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
B
Ben Widawsky 已提交
2929
{
2930 2931
	struct drm_i915_private *dev_priv = ggtt->base.i915;
	struct pci_dev *pdev = dev_priv->drm.pdev;
2932
	phys_addr_t phys_addr;
2933
	int ret;
B
Ben Widawsky 已提交
2934 2935

	/* For Modern GENs the PTEs and register space are split in the BAR */
2936
	phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
B
Ben Widawsky 已提交
2937

I
Imre Deak 已提交
2938 2939 2940 2941 2942 2943 2944
	/*
	 * On BXT writes larger than 64 bit to the GTT pagetable range will be
	 * dropped. For WC mappings in general we have 64 byte burst writes
	 * when the WC buffer is flushed, so we can't use it, but have to
	 * resort to an uncached mapping. The WC issue is easily caught by the
	 * readback check when writing GTT PTE entries.
	 */
2945
	if (IS_GEN9_LP(dev_priv))
2946
		ggtt->gsm = ioremap_nocache(phys_addr, size);
I
Imre Deak 已提交
2947
	else
2948
		ggtt->gsm = ioremap_wc(phys_addr, size);
2949
	if (!ggtt->gsm) {
2950
		DRM_ERROR("Failed to map the ggtt page table\n");
B
Ben Widawsky 已提交
2951 2952 2953
		return -ENOMEM;
	}

2954
	ret = setup_scratch_page(dev_priv, &ggtt->base.scratch_page, GFP_DMA32);
2955
	if (ret) {
B
Ben Widawsky 已提交
2956 2957
		DRM_ERROR("Scratch setup failed\n");
		/* iounmap will also get called at remove, but meh */
2958
		iounmap(ggtt->gsm);
2959
		return ret;
B
Ben Widawsky 已提交
2960 2961
	}

2962
	return 0;
B
Ben Widawsky 已提交
2963 2964
}

B
Ben Widawsky 已提交
2965 2966 2967
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
 * bits. When using advanced contexts each context stores its own PAT, but
 * writing this data shouldn't be harmful even in those cases. */
2968
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
B
Ben Widawsky 已提交
2969 2970 2971 2972 2973 2974 2975 2976 2977 2978 2979 2980
{
	uint64_t pat;

	pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC)     | /* for normal objects, no eLLC */
	      GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
	      GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
	      GEN8_PPAT(3, GEN8_PPAT_UC)                     | /* Uncached objects, mostly for scanout */
	      GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
	      GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
	      GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
	      GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));

2981
	if (!USES_PPGTT(dev_priv))
2982 2983 2984 2985 2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996
		/* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
		 * so RTL will always use the value corresponding to
		 * pat_sel = 000".
		 * So let's disable cache for GGTT to avoid screen corruptions.
		 * MOCS still can be used though.
		 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
		 * before this patch, i.e. the same uncached + snooping access
		 * like on gen6/7 seems to be in effect.
		 * - So this just fixes blitter/render access. Again it looks
		 * like it's not just uncached access, but uncached + snooping.
		 * So we can still hold onto all our assumptions wrt cpu
		 * clflushing on LLC machines.
		 */
		pat = GEN8_PPAT(0, GEN8_PPAT_UC);

B
Ben Widawsky 已提交
2997 2998
	/* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
	 * write would work. */
2999 3000
	I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
	I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
B
Ben Widawsky 已提交
3001 3002
}

3003 3004 3005 3006 3007 3008 3009 3010 3011 3012
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
{
	uint64_t pat;

	/*
	 * Map WB on BDW to snooped on CHV.
	 *
	 * Only the snoop bit has meaning for CHV, the rest is
	 * ignored.
	 *
3013 3014 3015 3016 3017 3018 3019 3020 3021 3022 3023
	 * The hardware will never snoop for certain types of accesses:
	 * - CPU GTT (GMADR->GGTT->no snoop->memory)
	 * - PPGTT page tables
	 * - some other special cycles
	 *
	 * As with BDW, we also need to consider the following for GT accesses:
	 * "For GGTT, there is NO pat_sel[2:0] from the entry,
	 * so RTL will always use the value corresponding to
	 * pat_sel = 000".
	 * Which means we must set the snoop bit in PAT entry 0
	 * in order to keep the global status page working.
3024 3025 3026 3027 3028 3029 3030 3031 3032 3033
	 */
	pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
	      GEN8_PPAT(1, 0) |
	      GEN8_PPAT(2, 0) |
	      GEN8_PPAT(3, 0) |
	      GEN8_PPAT(4, CHV_PPAT_SNOOP) |
	      GEN8_PPAT(5, CHV_PPAT_SNOOP) |
	      GEN8_PPAT(6, CHV_PPAT_SNOOP) |
	      GEN8_PPAT(7, CHV_PPAT_SNOOP);

3034 3035
	I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
	I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
3036 3037
}

3038 3039 3040 3041 3042
static void gen6_gmch_remove(struct i915_address_space *vm)
{
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);

	iounmap(ggtt->gsm);
3043
	cleanup_scratch_page(vm->i915, &vm->scratch_page);
3044 3045
}

3046
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
B
Ben Widawsky 已提交
3047
{
3048
	struct drm_i915_private *dev_priv = ggtt->base.i915;
3049
	struct pci_dev *pdev = dev_priv->drm.pdev;
3050
	unsigned int size;
B
Ben Widawsky 已提交
3051 3052 3053
	u16 snb_gmch_ctl;

	/* TODO: We're not aware of mappable constraints on gen8 yet */
3054 3055
	ggtt->mappable_base = pci_resource_start(pdev, 2);
	ggtt->mappable_end = pci_resource_len(pdev, 2);
B
Ben Widawsky 已提交
3056

3057 3058
	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(39)))
		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
B
Ben Widawsky 已提交
3059

3060
	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
B
Ben Widawsky 已提交
3061

3062
	if (INTEL_GEN(dev_priv) >= 9) {
3063
		ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
3064
		size = gen8_get_total_gtt_size(snb_gmch_ctl);
3065
	} else if (IS_CHERRYVIEW(dev_priv)) {
3066
		ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
3067
		size = chv_get_total_gtt_size(snb_gmch_ctl);
3068
	} else {
3069
		ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
3070
		size = gen8_get_total_gtt_size(snb_gmch_ctl);
3071
	}
B
Ben Widawsky 已提交
3072

3073
	ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
B
Ben Widawsky 已提交
3074

3075
	if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
3076 3077 3078
		chv_setup_private_ppat(dev_priv);
	else
		bdw_setup_private_ppat(dev_priv);
B
Ben Widawsky 已提交
3079

3080
	ggtt->base.cleanup = gen6_gmch_remove;
3081 3082
	ggtt->base.bind_vma = ggtt_bind_vma;
	ggtt->base.unbind_vma = ggtt_unbind_vma;
3083
	ggtt->base.insert_page = gen8_ggtt_insert_page;
3084
	ggtt->base.clear_range = nop_clear_range;
3085
	if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
3086 3087 3088 3089 3090 3091
		ggtt->base.clear_range = gen8_ggtt_clear_range;

	ggtt->base.insert_entries = gen8_ggtt_insert_entries;
	if (IS_CHERRYVIEW(dev_priv))
		ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;

3092 3093
	ggtt->invalidate = gen6_ggtt_invalidate;

3094
	return ggtt_probe_common(ggtt, size);
B
Ben Widawsky 已提交
3095 3096
}

3097
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
3098
{
3099
	struct drm_i915_private *dev_priv = ggtt->base.i915;
3100
	struct pci_dev *pdev = dev_priv->drm.pdev;
3101
	unsigned int size;
3102 3103
	u16 snb_gmch_ctl;

3104 3105
	ggtt->mappable_base = pci_resource_start(pdev, 2);
	ggtt->mappable_end = pci_resource_len(pdev, 2);
3106

3107 3108
	/* 64/512MB is the current min/max we actually know of, but this is just
	 * a coarse sanity check.
3109
	 */
3110
	if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
3111
		DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
3112
		return -ENXIO;
3113 3114
	}

3115 3116 3117
	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(40)))
		pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
	pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
3118

3119
	ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
3120

3121 3122
	size = gen6_get_total_gtt_size(snb_gmch_ctl);
	ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
3123

3124
	ggtt->base.clear_range = gen6_ggtt_clear_range;
3125
	ggtt->base.insert_page = gen6_ggtt_insert_page;
3126 3127 3128
	ggtt->base.insert_entries = gen6_ggtt_insert_entries;
	ggtt->base.bind_vma = ggtt_bind_vma;
	ggtt->base.unbind_vma = ggtt_unbind_vma;
3129 3130
	ggtt->base.cleanup = gen6_gmch_remove;

3131 3132
	ggtt->invalidate = gen6_ggtt_invalidate;

3133 3134 3135 3136 3137 3138 3139 3140 3141 3142
	if (HAS_EDRAM(dev_priv))
		ggtt->base.pte_encode = iris_pte_encode;
	else if (IS_HASWELL(dev_priv))
		ggtt->base.pte_encode = hsw_pte_encode;
	else if (IS_VALLEYVIEW(dev_priv))
		ggtt->base.pte_encode = byt_pte_encode;
	else if (INTEL_GEN(dev_priv) >= 7)
		ggtt->base.pte_encode = ivb_pte_encode;
	else
		ggtt->base.pte_encode = snb_pte_encode;
3143

3144
	return ggtt_probe_common(ggtt, size);
3145 3146
}

3147
static void i915_gmch_remove(struct i915_address_space *vm)
3148
{
3149
	intel_gmch_remove();
3150
}
3151

3152
static int i915_gmch_probe(struct i915_ggtt *ggtt)
3153
{
3154
	struct drm_i915_private *dev_priv = ggtt->base.i915;
3155 3156
	int ret;

3157
	ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
3158 3159 3160 3161 3162
	if (!ret) {
		DRM_ERROR("failed to set up gmch\n");
		return -EIO;
	}

3163 3164 3165 3166
	intel_gtt_get(&ggtt->base.total,
		      &ggtt->stolen_size,
		      &ggtt->mappable_base,
		      &ggtt->mappable_end);
3167

3168
	ggtt->do_idle_maps = needs_idle_maps(dev_priv);
3169
	ggtt->base.insert_page = i915_ggtt_insert_page;
3170 3171 3172 3173
	ggtt->base.insert_entries = i915_ggtt_insert_entries;
	ggtt->base.clear_range = i915_ggtt_clear_range;
	ggtt->base.bind_vma = ggtt_bind_vma;
	ggtt->base.unbind_vma = ggtt_unbind_vma;
3174
	ggtt->base.cleanup = i915_gmch_remove;
3175

3176 3177
	ggtt->invalidate = gmch_ggtt_invalidate;

3178
	if (unlikely(ggtt->do_idle_maps))
3179 3180
		DRM_INFO("applying Ironlake quirks for intel_iommu\n");

3181 3182 3183
	return 0;
}

3184
/**
3185
 * i915_ggtt_probe_hw - Probe GGTT hardware location
3186
 * @dev_priv: i915 device
3187
 */
3188
int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
3189
{
3190
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
3191 3192
	int ret;

3193
	ggtt->base.i915 = dev_priv;
3194

3195 3196 3197 3198 3199 3200
	if (INTEL_GEN(dev_priv) <= 5)
		ret = i915_gmch_probe(ggtt);
	else if (INTEL_GEN(dev_priv) < 8)
		ret = gen6_gmch_probe(ggtt);
	else
		ret = gen8_gmch_probe(ggtt);
3201
	if (ret)
3202 3203
		return ret;

3204 3205 3206 3207 3208 3209 3210 3211 3212 3213
	/* Trim the GGTT to fit the GuC mappable upper range (when enabled).
	 * This is easier than doing range restriction on the fly, as we
	 * currently don't have any bits spare to pass in this upper
	 * restriction!
	 */
	if (HAS_GUC(dev_priv) && i915.enable_guc_loading) {
		ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
		ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
	}

3214 3215
	if ((ggtt->base.total - 1) >> 32) {
		DRM_ERROR("We never expected a Global GTT with more than 32bits"
3216
			  " of address space! Found %lldM!\n",
3217 3218 3219 3220 3221
			  ggtt->base.total >> 20);
		ggtt->base.total = 1ULL << 32;
		ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
	}

3222 3223 3224 3225 3226 3227 3228
	if (ggtt->mappable_end > ggtt->base.total) {
		DRM_ERROR("mappable aperture extends past end of GGTT,"
			  " aperture=%llx, total=%llx\n",
			  ggtt->mappable_end, ggtt->base.total);
		ggtt->mappable_end = ggtt->base.total;
	}

3229
	/* GMADR is the PCI mmio aperture into the global GTT. */
3230
	DRM_INFO("Memory usable by graphics device = %lluM\n",
3231 3232
		 ggtt->base.total >> 20);
	DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
3233
	DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20);
3234 3235 3236 3237
#ifdef CONFIG_INTEL_IOMMU
	if (intel_iommu_gfx_mapped)
		DRM_INFO("VT-d active for gfx access\n");
#endif
3238 3239

	return 0;
3240 3241 3242 3243
}

/**
 * i915_ggtt_init_hw - Initialize GGTT hardware
3244
 * @dev_priv: i915 device
3245
 */
3246
int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
3247 3248 3249 3250
{
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
	int ret;

3251 3252 3253 3254 3255
	INIT_LIST_HEAD(&dev_priv->vm_list);

	/* Subtract the guard page before address space initialization to
	 * shrink the range used by drm_mm.
	 */
C
Chris Wilson 已提交
3256
	mutex_lock(&dev_priv->drm.struct_mutex);
3257
	ggtt->base.total -= PAGE_SIZE;
C
Chris Wilson 已提交
3258
	i915_address_space_init(&ggtt->base, dev_priv, "[global]");
3259 3260 3261
	ggtt->base.total += PAGE_SIZE;
	if (!HAS_LLC(dev_priv))
		ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
C
Chris Wilson 已提交
3262
	mutex_unlock(&dev_priv->drm.struct_mutex);
3263

3264 3265 3266
	if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
				dev_priv->ggtt.mappable_base,
				dev_priv->ggtt.mappable_end)) {
3267 3268 3269 3270 3271 3272
		ret = -EIO;
		goto out_gtt_cleanup;
	}

	ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end);

3273 3274 3275 3276
	/*
	 * Initialise stolen early so that we may reserve preallocated
	 * objects for the BIOS to KMS transition.
	 */
3277
	ret = i915_gem_init_stolen(dev_priv);
3278 3279 3280 3281
	if (ret)
		goto out_gtt_cleanup;

	return 0;
3282 3283

out_gtt_cleanup:
3284
	ggtt->base.cleanup(&ggtt->base);
3285
	return ret;
3286
}
3287

3288
int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
3289
{
3290
	if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
3291 3292 3293 3294 3295
		return -EIO;

	return 0;
}

3296 3297 3298 3299 3300 3301 3302 3303 3304 3305
void i915_ggtt_enable_guc(struct drm_i915_private *i915)
{
	i915->ggtt.invalidate = guc_ggtt_invalidate;
}

void i915_ggtt_disable_guc(struct drm_i915_private *i915)
{
	i915->ggtt.invalidate = gen6_ggtt_invalidate;
}

3306
void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
3307
{
3308
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
3309
	struct drm_i915_gem_object *obj, *on;
3310

3311
	i915_check_and_clear_faults(dev_priv);
3312 3313

	/* First fill our portion of the GTT with scratch pages */
3314
	ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total);
3315

3316 3317 3318 3319
	ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */

	/* clflush objects bound into the GGTT and rebind them. */
	list_for_each_entry_safe(obj, on,
3320
				 &dev_priv->mm.bound_list, global_link) {
3321 3322 3323
		bool ggtt_bound = false;
		struct i915_vma *vma;

3324
		list_for_each_entry(vma, &obj->vma_list, obj_link) {
3325
			if (vma->vm != &ggtt->base)
3326
				continue;
3327

3328 3329 3330
			if (!i915_vma_unbind(vma))
				continue;

3331 3332
			WARN_ON(i915_vma_bind(vma, obj->cache_level,
					      PIN_UPDATE));
3333
			ggtt_bound = true;
3334 3335
		}

3336
		if (ggtt_bound)
3337
			WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
3338
	}
3339

3340 3341
	ggtt->base.closed = false;

3342
	if (INTEL_GEN(dev_priv) >= 8) {
3343
		if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
3344 3345 3346 3347 3348 3349 3350
			chv_setup_private_ppat(dev_priv);
		else
			bdw_setup_private_ppat(dev_priv);

		return;
	}

3351
	if (USES_PPGTT(dev_priv)) {
3352 3353
		struct i915_address_space *vm;

3354 3355 3356
		list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
			/* TODO: Perhaps it shouldn't be gen6 specific */

3357
			struct i915_hw_ppgtt *ppgtt;
3358

3359
			if (i915_is_ggtt(vm))
3360
				ppgtt = dev_priv->mm.aliasing_ppgtt;
3361 3362
			else
				ppgtt = i915_vm_to_ppgtt(vm);
3363 3364 3365 3366 3367 3368

			gen6_write_page_range(dev_priv, &ppgtt->pd,
					      0, ppgtt->base.total);
		}
	}

3369
	i915_ggtt_invalidate(dev_priv);
3370 3371
}

3372
struct i915_vma *
C
Chris Wilson 已提交
3373 3374 3375
i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
		    struct i915_address_space *vm,
		    const struct i915_ggtt_view *view)
3376
{
3377
	struct rb_node *rb;
3378

3379 3380 3381 3382 3383
	rb = obj->vma_tree.rb_node;
	while (rb) {
		struct i915_vma *vma = rb_entry(rb, struct i915_vma, obj_node);
		long cmp;

J
Joonas Lahtinen 已提交
3384
		cmp = i915_vma_compare(vma, vm, view);
3385
		if (cmp == 0)
C
Chris Wilson 已提交
3386
			return vma;
3387

3388 3389 3390 3391 3392 3393
		if (cmp < 0)
			rb = rb->rb_right;
		else
			rb = rb->rb_left;
	}

C
Chris Wilson 已提交
3394
	return NULL;
3395 3396 3397
}

struct i915_vma *
C
Chris Wilson 已提交
3398 3399 3400
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
				  struct i915_address_space *vm,
				  const struct i915_ggtt_view *view)
3401
{
C
Chris Wilson 已提交
3402
	struct i915_vma *vma;
3403

3404
	lockdep_assert_held(&obj->base.dev->struct_mutex);
C
Chris Wilson 已提交
3405
	GEM_BUG_ON(view && !i915_is_ggtt(vm));
3406

C
Chris Wilson 已提交
3407
	vma = i915_gem_obj_to_vma(obj, vm, view);
3408
	if (!vma) {
J
Joonas Lahtinen 已提交
3409
		vma = i915_vma_create(obj, vm, view);
3410 3411
		GEM_BUG_ON(vma != i915_gem_obj_to_vma(obj, vm, view));
	}
3412

3413
	GEM_BUG_ON(i915_vma_is_closed(vma));
3414 3415
	return vma;
}
3416

3417
static struct scatterlist *
3418
rotate_pages(const dma_addr_t *in, unsigned int offset,
3419
	     unsigned int width, unsigned int height,
3420
	     unsigned int stride,
3421
	     struct sg_table *st, struct scatterlist *sg)
3422 3423 3424 3425 3426
{
	unsigned int column, row;
	unsigned int src_idx;

	for (column = 0; column < width; column++) {
3427
		src_idx = stride * (height - 1) + column;
3428 3429 3430 3431 3432 3433 3434
		for (row = 0; row < height; row++) {
			st->nents++;
			/* We don't need the pages, but need to initialize
			 * the entries so the sg list can be happily traversed.
			 * The only thing we need are DMA addresses.
			 */
			sg_set_page(sg, NULL, PAGE_SIZE, 0);
3435
			sg_dma_address(sg) = in[offset + src_idx];
3436 3437
			sg_dma_len(sg) = PAGE_SIZE;
			sg = sg_next(sg);
3438
			src_idx -= stride;
3439 3440
		}
	}
3441 3442

	return sg;
3443 3444 3445
}

static struct sg_table *
3446
intel_rotate_fb_obj_pages(const struct intel_rotation_info *rot_info,
3447 3448
			  struct drm_i915_gem_object *obj)
{
3449
	const size_t n_pages = obj->base.size / PAGE_SIZE;
3450
	unsigned int size = intel_rotation_info_size(rot_info);
3451 3452
	struct sgt_iter sgt_iter;
	dma_addr_t dma_addr;
3453 3454 3455
	unsigned long i;
	dma_addr_t *page_addr_list;
	struct sg_table *st;
3456
	struct scatterlist *sg;
3457
	int ret = -ENOMEM;
3458 3459

	/* Allocate a temporary list of source pages for random access. */
3460
	page_addr_list = drm_malloc_gfp(n_pages,
3461 3462
					sizeof(dma_addr_t),
					GFP_TEMPORARY);
3463 3464 3465 3466 3467 3468 3469 3470
	if (!page_addr_list)
		return ERR_PTR(ret);

	/* Allocate target SG list. */
	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (!st)
		goto err_st_alloc;

3471
	ret = sg_alloc_table(st, size, GFP_KERNEL);
3472 3473 3474 3475 3476
	if (ret)
		goto err_sg_alloc;

	/* Populate source page list from the object. */
	i = 0;
C
Chris Wilson 已提交
3477
	for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
3478
		page_addr_list[i++] = dma_addr;
3479

3480
	GEM_BUG_ON(i != n_pages);
3481 3482 3483
	st->nents = 0;
	sg = st->sgl;

3484 3485 3486 3487
	for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
		sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
				  rot_info->plane[i].width, rot_info->plane[i].height,
				  rot_info->plane[i].stride, st, sg);
3488 3489
	}

3490 3491
	DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
		      obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3492 3493 3494 3495 3496 3497 3498 3499 3500 3501

	drm_free_large(page_addr_list);

	return st;

err_sg_alloc:
	kfree(st);
err_st_alloc:
	drm_free_large(page_addr_list);

3502 3503 3504
	DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
		      obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);

3505 3506
	return ERR_PTR(ret);
}
3507

3508 3509 3510 3511 3512
static struct sg_table *
intel_partial_pages(const struct i915_ggtt_view *view,
		    struct drm_i915_gem_object *obj)
{
	struct sg_table *st;
3513 3514 3515
	struct scatterlist *sg, *iter;
	unsigned int count = view->params.partial.size;
	unsigned int offset;
3516 3517 3518 3519 3520 3521
	int ret = -ENOMEM;

	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (!st)
		goto err_st_alloc;

3522
	ret = sg_alloc_table(st, count, GFP_KERNEL);
3523 3524 3525
	if (ret)
		goto err_sg_alloc;

3526 3527 3528 3529 3530
	iter = i915_gem_object_get_sg(obj,
				      view->params.partial.offset,
				      &offset);
	GEM_BUG_ON(!iter);

3531 3532
	sg = st->sgl;
	st->nents = 0;
3533 3534
	do {
		unsigned int len;
3535

3536 3537 3538 3539 3540 3541
		len = min(iter->length - (offset << PAGE_SHIFT),
			  count << PAGE_SHIFT);
		sg_set_page(sg, NULL, len, 0);
		sg_dma_address(sg) =
			sg_dma_address(iter) + (offset << PAGE_SHIFT);
		sg_dma_len(sg) = len;
3542 3543

		st->nents++;
3544 3545 3546 3547 3548
		count -= len >> PAGE_SHIFT;
		if (count == 0) {
			sg_mark_end(sg);
			return st;
		}
3549

3550 3551 3552 3553
		sg = __sg_next(sg);
		iter = __sg_next(iter);
		offset = 0;
	} while (1);
3554 3555 3556 3557 3558 3559 3560

err_sg_alloc:
	kfree(st);
err_st_alloc:
	return ERR_PTR(ret);
}

3561
static int
3562
i915_get_ggtt_vma_pages(struct i915_vma *vma)
3563
{
3564 3565
	int ret = 0;

3566 3567 3568 3569 3570 3571 3572
	/* The vma->pages are only valid within the lifespan of the borrowed
	 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
	 * must be the vma->pages. A simple rule is that vma->pages must only
	 * be accessed when the obj->mm.pages are pinned.
	 */
	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));

3573
	if (vma->pages)
3574 3575 3576
		return 0;

	if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
C
Chris Wilson 已提交
3577
		vma->pages = vma->obj->mm.pages;
3578
	else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
3579
		vma->pages =
3580
			intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
3581
	else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
3582
		vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
3583 3584 3585 3586
	else
		WARN_ONCE(1, "GGTT view %u not implemented!\n",
			  vma->ggtt_view.type);

3587
	if (!vma->pages) {
3588
		DRM_ERROR("Failed to get pages for GGTT view type %u!\n",
3589
			  vma->ggtt_view.type);
3590
		ret = -EINVAL;
3591 3592 3593
	} else if (IS_ERR(vma->pages)) {
		ret = PTR_ERR(vma->pages);
		vma->pages = NULL;
3594 3595
		DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
			  vma->ggtt_view.type, ret);
3596 3597
	}

3598
	return ret;
3599 3600
}

3601 3602
/**
 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
3603 3604 3605 3606 3607 3608 3609 3610 3611 3612
 * @vm: the &struct i915_address_space
 * @node: the &struct drm_mm_node (typically i915_vma.mode)
 * @size: how much space to allocate inside the GTT,
 *        must be #I915_GTT_PAGE_SIZE aligned
 * @offset: where to insert inside the GTT,
 *          must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
 *          (@offset + @size) must fit within the address space
 * @color: color to apply to node, if this node is not from a VMA,
 *         color must be #I915_COLOR_UNEVICTABLE
 * @flags: control search and eviction behaviour
3613 3614 3615 3616 3617 3618 3619 3620 3621 3622 3623 3624 3625 3626 3627 3628 3629 3630 3631 3632 3633 3634 3635 3636 3637 3638 3639 3640 3641 3642 3643 3644 3645 3646 3647 3648 3649 3650 3651 3652
 *
 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
 * the address space (using @size and @color). If the @node does not fit, it
 * tries to evict any overlapping nodes from the GTT, including any
 * neighbouring nodes if the colors do not match (to ensure guard pages between
 * differing domains). See i915_gem_evict_for_node() for the gory details
 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
 * evicting active overlapping objects, and any overlapping node that is pinned
 * or marked as unevictable will also result in failure.
 *
 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
 * asked to wait for eviction and interrupted.
 */
int i915_gem_gtt_reserve(struct i915_address_space *vm,
			 struct drm_mm_node *node,
			 u64 size, u64 offset, unsigned long color,
			 unsigned int flags)
{
	int err;

	GEM_BUG_ON(!size);
	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
	GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
	GEM_BUG_ON(range_overflows(offset, size, vm->total));

	node->size = size;
	node->start = offset;
	node->color = color;

	err = drm_mm_reserve_node(&vm->mm, node);
	if (err != -ENOSPC)
		return err;

	err = i915_gem_evict_for_node(vm, node, flags);
	if (err == 0)
		err = drm_mm_reserve_node(&vm->mm, node);

	return err;
}

3653 3654 3655 3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676 3677
static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
{
	u64 range, addr;

	GEM_BUG_ON(range_overflows(start, len, end));
	GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));

	range = round_down(end - len, align) - round_up(start, align);
	if (range) {
		if (sizeof(unsigned long) == sizeof(u64)) {
			addr = get_random_long();
		} else {
			addr = get_random_int();
			if (range > U32_MAX) {
				addr <<= 32;
				addr |= get_random_int();
			}
		}
		div64_u64_rem(addr, range, &addr);
		start += addr;
	}

	return round_up(start, align);
}

3678 3679
/**
 * i915_gem_gtt_insert - insert a node into an address_space (GTT)
3680 3681 3682 3683 3684 3685 3686 3687 3688
 * @vm: the &struct i915_address_space
 * @node: the &struct drm_mm_node (typically i915_vma.node)
 * @size: how much space to allocate inside the GTT,
 *        must be #I915_GTT_PAGE_SIZE aligned
 * @alignment: required alignment of starting offset, may be 0 but
 *             if specified, this must be a power-of-two and at least
 *             #I915_GTT_MIN_ALIGNMENT
 * @color: color to apply to node
 * @start: start of any range restriction inside GTT (0 for all),
3689
 *         must be #I915_GTT_PAGE_SIZE aligned
3690 3691 3692
 * @end: end of any range restriction inside GTT (U64_MAX for all),
 *       must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
 * @flags: control search and eviction behaviour
3693 3694 3695 3696 3697 3698
 *
 * i915_gem_gtt_insert() first searches for an available hole into which
 * is can insert the node. The hole address is aligned to @alignment and
 * its @size must then fit entirely within the [@start, @end] bounds. The
 * nodes on either side of the hole must match @color, or else a guard page
 * will be inserted between the two nodes (or the node evicted). If no
3699 3700
 * suitable hole is found, first a victim is randomly selected and tested
 * for eviction, otherwise then the LRU list of objects within the GTT
3701 3702 3703 3704 3705 3706 3707 3708 3709 3710 3711 3712 3713 3714 3715 3716 3717
 * is scanned to find the first set of replacement nodes to create the hole.
 * Those old overlapping nodes are evicted from the GTT (and so must be
 * rebound before any future use). Any node that is currently pinned cannot
 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
 * active and #PIN_NONBLOCK is specified, that node is also skipped when
 * searching for an eviction candidate. See i915_gem_evict_something() for
 * the gory details on the eviction algorithm.
 *
 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
 * asked to wait for eviction and interrupted.
 */
int i915_gem_gtt_insert(struct i915_address_space *vm,
			struct drm_mm_node *node,
			u64 size, u64 alignment, unsigned long color,
			u64 start, u64 end, unsigned int flags)
{
	u32 search_flag, alloc_flag;
3718
	u64 offset;
3719 3720 3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737 3738 3739 3740 3741 3742 3743 3744 3745 3746 3747 3748 3749 3750 3751 3752 3753 3754 3755 3756 3757 3758 3759 3760
	int err;

	lockdep_assert_held(&vm->i915->drm.struct_mutex);
	GEM_BUG_ON(!size);
	GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
	GEM_BUG_ON(alignment && !is_power_of_2(alignment));
	GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
	GEM_BUG_ON(start >= end);
	GEM_BUG_ON(start > 0  && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
	GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));

	if (unlikely(range_overflows(start, size, end)))
		return -ENOSPC;

	if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
		return -ENOSPC;

	if (flags & PIN_HIGH) {
		search_flag = DRM_MM_SEARCH_BELOW;
		alloc_flag = DRM_MM_CREATE_TOP;
	} else {
		search_flag = DRM_MM_SEARCH_DEFAULT;
		alloc_flag = DRM_MM_CREATE_DEFAULT;
	}

	/* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
	 * so we know that we always have a minimum alignment of 4096.
	 * The drm_mm range manager is optimised to return results
	 * with zero alignment, so where possible use the optimal
	 * path.
	 */
	BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
	if (alignment <= I915_GTT_MIN_ALIGNMENT)
		alignment = 0;

	err = drm_mm_insert_node_in_range_generic(&vm->mm, node,
						  size, alignment, color,
						  start, end,
						  search_flag, alloc_flag);
	if (err != -ENOSPC)
		return err;

3761 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3774 3775 3776 3777 3778 3779 3780 3781 3782 3783 3784 3785 3786 3787 3788 3789
	/* No free space, pick a slot at random.
	 *
	 * There is a pathological case here using a GTT shared between
	 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
	 *
	 *    |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
	 *         (64k objects)             (448k objects)
	 *
	 * Now imagine that the eviction LRU is ordered top-down (just because
	 * pathology meets real life), and that we need to evict an object to
	 * make room inside the aperture. The eviction scan then has to walk
	 * the 448k list before it finds one within range. And now imagine that
	 * it has to search for a new hole between every byte inside the memcpy,
	 * for several simultaneous clients.
	 *
	 * On a full-ppgtt system, if we have run out of available space, there
	 * will be lots and lots of objects in the eviction list! Again,
	 * searching that LRU list may be slow if we are also applying any
	 * range restrictions (e.g. restriction to low 4GiB) and so, for
	 * simplicity and similarilty between different GTT, try the single
	 * random replacement first.
	 */
	offset = random_offset(start, end,
			       size, alignment ?: I915_GTT_MIN_ALIGNMENT);
	err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
	if (err != -ENOSPC)
		return err;

	/* Randomly selected placement is pinned, do a search */
3790 3791 3792 3793 3794 3795 3796 3797 3798 3799 3800
	err = i915_gem_evict_something(vm, size, alignment, color,
				       start, end, flags);
	if (err)
		return err;

	search_flag = DRM_MM_SEARCH_DEFAULT;
	return drm_mm_insert_node_in_range_generic(&vm->mm, node,
						   size, alignment, color,
						   start, end,
						   search_flag, alloc_flag);
}