i915_gem_gtt.c 72.6 KB
Newer Older
1 2
/*
 * Copyright © 2010 Daniel Vetter
3
 * Copyright © 2011-2014 Intel Corporation
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

26
#include <linux/seq_file.h>
27 28
#include <drm/drmP.h>
#include <drm/i915_drm.h>
29
#include "i915_drv.h"
30
#include "i915_vgpu.h"
31 32 33
#include "i915_trace.h"
#include "intel_drv.h"

34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
/**
 * DOC: Global GTT views
 *
 * Background and previous state
 *
 * Historically objects could exists (be bound) in global GTT space only as
 * singular instances with a view representing all of the object's backing pages
 * in a linear fashion. This view will be called a normal view.
 *
 * To support multiple views of the same object, where the number of mapped
 * pages is not equal to the backing store, or where the layout of the pages
 * is not linear, concept of a GGTT view was added.
 *
 * One example of an alternative view is a stereo display driven by a single
 * image. In this case we would have a framebuffer looking like this
 * (2x2 pages):
 *
 *    12
 *    34
 *
 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
 * rendering. In contrast, fed to the display engine would be an alternative
 * view which could look something like this:
 *
 *   1212
 *   3434
 *
 * In this example both the size and layout of pages in the alternative view is
 * different from the normal view.
 *
 * Implementation and usage
 *
 * GGTT views are implemented using VMAs and are distinguished via enum
 * i915_ggtt_view_type and struct i915_ggtt_view.
 *
 * A new flavour of core GEM functions which work with GGTT bound objects were
70 71 72
 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
 * renaming  in large amounts of code. They take the struct i915_ggtt_view
 * parameter encapsulating all metadata required to implement a view.
73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
 *
 * As a helper for callers which are only interested in the normal view,
 * globally const i915_ggtt_view_normal singleton instance exists. All old core
 * GEM API functions, the ones not taking the view parameter, are operating on,
 * or with the normal GGTT view.
 *
 * Code wanting to add or use a new GGTT view needs to:
 *
 * 1. Add a new enum with a suitable name.
 * 2. Extend the metadata in the i915_ggtt_view structure if required.
 * 3. Add support to i915_get_vma_pages().
 *
 * New views are required to build a scatter-gather table from within the
 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
 * exists for the lifetime of an VMA.
 *
 * Core API is designed to have copy semantics which means that passed in
 * struct i915_ggtt_view does not need to be persistent (left around after
 * calling the core API functions).
 *
 */

95
const struct i915_ggtt_view i915_ggtt_view_normal;
96 97 98
const struct i915_ggtt_view i915_ggtt_view_rotated = {
        .type = I915_GGTT_VIEW_ROTATED
};
99

100 101
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
102

103 104
static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
{
105 106 107 108 109 110
	bool has_aliasing_ppgtt;
	bool has_full_ppgtt;

	has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
	has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;

111 112 113
	if (intel_vgpu_active(dev))
		has_full_ppgtt = false; /* emulation is too hard */

114 115 116 117 118 119
	/*
	 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
	 * execlists, the sole mechanism available to submit work.
	 */
	if (INTEL_INFO(dev)->gen < 9 &&
	    (enable_ppgtt == 0 || !has_aliasing_ppgtt))
120 121 122 123 124
		return 0;

	if (enable_ppgtt == 1)
		return 1;

125
	if (enable_ppgtt == 2 && has_full_ppgtt)
126 127
		return 2;

128 129 130 131
#ifdef CONFIG_INTEL_IOMMU
	/* Disable ppgtt on SNB if VT-d is on. */
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
		DRM_INFO("Disabling PPGTT because VT-d is on\n");
132
		return 0;
133 134 135
	}
#endif

136
	/* Early VLV doesn't have this */
137 138
	if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
	    dev->pdev->revision < 0xb) {
139 140 141 142
		DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
		return 0;
	}

143 144 145 146
	if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
		return 2;
	else
		return has_aliasing_ppgtt ? 1 : 0;
147 148
}

149 150 151 152 153
static void ppgtt_bind_vma(struct i915_vma *vma,
			   enum i915_cache_level cache_level,
			   u32 flags);
static void ppgtt_unbind_vma(struct i915_vma *vma);

154 155 156
static inline gen8_pte_t gen8_pte_encode(dma_addr_t addr,
					 enum i915_cache_level level,
					 bool valid)
B
Ben Widawsky 已提交
157
{
158
	gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
B
Ben Widawsky 已提交
159
	pte |= addr;
160 161 162

	switch (level) {
	case I915_CACHE_NONE:
B
Ben Widawsky 已提交
163
		pte |= PPAT_UNCACHED_INDEX;
164 165 166 167 168 169 170 171 172
		break;
	case I915_CACHE_WT:
		pte |= PPAT_DISPLAY_ELLC_INDEX;
		break;
	default:
		pte |= PPAT_CACHED_INDEX;
		break;
	}

B
Ben Widawsky 已提交
173 174 175
	return pte;
}

176 177 178
static inline gen8_pde_t gen8_pde_encode(struct drm_device *dev,
					  dma_addr_t addr,
					  enum i915_cache_level level)
B
Ben Widawsky 已提交
179
{
180
	gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
B
Ben Widawsky 已提交
181 182 183 184 185 186 187 188
	pde |= addr;
	if (level != I915_CACHE_NONE)
		pde |= PPAT_CACHED_PDE_INDEX;
	else
		pde |= PPAT_UNCACHED_INDEX;
	return pde;
}

189 190 191
static gen6_pte_t snb_pte_encode(dma_addr_t addr,
				 enum i915_cache_level level,
				 bool valid, u32 unused)
192
{
193
	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
194
	pte |= GEN6_PTE_ADDR_ENCODE(addr);
195 196

	switch (level) {
197 198 199 200 201 202 203 204
	case I915_CACHE_L3_LLC:
	case I915_CACHE_LLC:
		pte |= GEN6_PTE_CACHE_LLC;
		break;
	case I915_CACHE_NONE:
		pte |= GEN6_PTE_UNCACHED;
		break;
	default:
205
		MISSING_CASE(level);
206 207 208 209 210
	}

	return pte;
}

211 212 213
static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
				 enum i915_cache_level level,
				 bool valid, u32 unused)
214
{
215
	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
216 217 218 219 220
	pte |= GEN6_PTE_ADDR_ENCODE(addr);

	switch (level) {
	case I915_CACHE_L3_LLC:
		pte |= GEN7_PTE_CACHE_L3_LLC;
221 222 223 224 225
		break;
	case I915_CACHE_LLC:
		pte |= GEN6_PTE_CACHE_LLC;
		break;
	case I915_CACHE_NONE:
226
		pte |= GEN6_PTE_UNCACHED;
227 228
		break;
	default:
229
		MISSING_CASE(level);
230 231
	}

232 233 234
	return pte;
}

235 236 237
static gen6_pte_t byt_pte_encode(dma_addr_t addr,
				 enum i915_cache_level level,
				 bool valid, u32 flags)
238
{
239
	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
240 241
	pte |= GEN6_PTE_ADDR_ENCODE(addr);

242 243
	if (!(flags & PTE_READ_ONLY))
		pte |= BYT_PTE_WRITEABLE;
244 245 246 247 248 249 250

	if (level != I915_CACHE_NONE)
		pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;

	return pte;
}

251 252 253
static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
				 enum i915_cache_level level,
				 bool valid, u32 unused)
254
{
255
	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
256
	pte |= HSW_PTE_ADDR_ENCODE(addr);
257 258

	if (level != I915_CACHE_NONE)
259
		pte |= HSW_WB_LLC_AGE3;
260 261 262 263

	return pte;
}

264 265 266
static gen6_pte_t iris_pte_encode(dma_addr_t addr,
				  enum i915_cache_level level,
				  bool valid, u32 unused)
267
{
268
	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
269 270
	pte |= HSW_PTE_ADDR_ENCODE(addr);

271 272 273 274
	switch (level) {
	case I915_CACHE_NONE:
		break;
	case I915_CACHE_WT:
275
		pte |= HSW_WT_ELLC_LLC_AGE3;
276 277
		break;
	default:
278
		pte |= HSW_WB_ELLC_LLC_AGE3;
279 280
		break;
	}
281 282 283 284

	return pte;
}

285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316
#define i915_dma_unmap_single(px, dev) \
	__i915_dma_unmap_single((px)->daddr, dev)

static inline void __i915_dma_unmap_single(dma_addr_t daddr,
					struct drm_device *dev)
{
	struct device *device = &dev->pdev->dev;

	dma_unmap_page(device, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
}

/**
 * i915_dma_map_single() - Create a dma mapping for a page table/dir/etc.
 * @px:	Page table/dir/etc to get a DMA map for
 * @dev:	drm device
 *
 * Page table allocations are unified across all gens. They always require a
 * single 4k allocation, as well as a DMA mapping. If we keep the structs
 * symmetric here, the simple macro covers us for every page table type.
 *
 * Return: 0 if success.
 */
#define i915_dma_map_single(px, dev) \
	i915_dma_map_page_single((px)->page, (dev), &(px)->daddr)

static inline int i915_dma_map_page_single(struct page *page,
					   struct drm_device *dev,
					   dma_addr_t *daddr)
{
	struct device *device = &dev->pdev->dev;

	*daddr = dma_map_page(device, page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
317 318 319 320
	if (dma_mapping_error(device, *daddr))
		return -ENOMEM;

	return 0;
321 322 323 324
}

static void unmap_and_free_pt(struct i915_page_table_entry *pt,
			       struct drm_device *dev)
325 326 327
{
	if (WARN_ON(!pt->page))
		return;
328 329

	i915_dma_unmap_single(pt, dev);
330
	__free_page(pt->page);
331
	kfree(pt->used_ptes);
332 333 334
	kfree(pt);
}

335
static struct i915_page_table_entry *alloc_pt_single(struct drm_device *dev)
336 337
{
	struct i915_page_table_entry *pt;
338 339 340
	const size_t count = INTEL_INFO(dev)->gen >= 8 ?
		GEN8_PTES : GEN6_PTES;
	int ret = -ENOMEM;
341 342 343 344 345

	pt = kzalloc(sizeof(*pt), GFP_KERNEL);
	if (!pt)
		return ERR_PTR(-ENOMEM);

346 347 348 349 350 351
	pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes),
				GFP_KERNEL);

	if (!pt->used_ptes)
		goto fail_bitmap;

352
	pt->page = alloc_page(GFP_KERNEL);
353 354 355 356 357 358
	if (!pt->page)
		goto fail_page;

	ret = i915_dma_map_single(pt, dev);
	if (ret)
		goto fail_dma;
359 360

	return pt;
361 362 363 364 365 366 367 368 369

fail_dma:
	__free_page(pt->page);
fail_page:
	kfree(pt->used_ptes);
fail_bitmap:
	kfree(pt);

	return ERR_PTR(ret);
370 371 372 373 374 375 376 377
}

/**
 * alloc_pt_range() - Allocate a multiple page tables
 * @pd:		The page directory which will have at least @count entries
 *		available to point to the allocated page tables.
 * @pde:	First page directory entry for which we are allocating.
 * @count:	Number of pages to allocate.
378
 * @dev:	DRM device.
379 380 381 382 383 384 385
 *
 * Allocates multiple page table pages and sets the appropriate entries in the
 * page table structure within the page directory. Function cleans up after
 * itself on any failures.
 *
 * Return: 0 if allocation succeeded.
 */
386
static int alloc_pt_range(struct i915_page_directory_entry *pd, uint16_t pde, size_t count,
387
			  struct drm_device *dev)
388 389 390 391
{
	int i, ret;

	/* 512 is the max page tables per page_directory on any platform. */
392
	if (WARN_ON(pde + count > I915_PDES))
393 394 395
		return -EINVAL;

	for (i = pde; i < pde + count; i++) {
396
		struct i915_page_table_entry *pt = alloc_pt_single(dev);
397 398 399 400 401 402

		if (IS_ERR(pt)) {
			ret = PTR_ERR(pt);
			goto err_out;
		}
		WARN(pd->page_table[i],
D
Dan Carpenter 已提交
403
		     "Leaking page directory entry %d (%p)\n",
404 405 406 407 408 409 410 411
		     i, pd->page_table[i]);
		pd->page_table[i] = pt;
	}

	return 0;

err_out:
	while (i-- > pde)
412
		unmap_and_free_pt(pd->page_table[i], dev);
413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
	return ret;
}

static void unmap_and_free_pd(struct i915_page_directory_entry *pd)
{
	if (pd->page) {
		__free_page(pd->page);
		kfree(pd);
	}
}

static struct i915_page_directory_entry *alloc_pd_single(void)
{
	struct i915_page_directory_entry *pd;

	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
	if (!pd)
		return ERR_PTR(-ENOMEM);

	pd->page = alloc_page(GFP_KERNEL | __GFP_ZERO);
	if (!pd->page) {
		kfree(pd);
		return ERR_PTR(-ENOMEM);
	}

	return pd;
}

441
/* Broadwell Page Directory Pointer Descriptors */
442
static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
443
			   uint64_t val)
444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463
{
	int ret;

	BUG_ON(entry >= 4);

	ret = intel_ring_begin(ring, 6);
	if (ret)
		return ret;

	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
	intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry));
	intel_ring_emit(ring, (u32)(val >> 32));
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
	intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry));
	intel_ring_emit(ring, (u32)(val));
	intel_ring_advance(ring);

	return 0;
}

464
static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
465
			  struct intel_engine_cs *ring)
466
{
467
	int i, ret;
468 469

	/* bit of a hack to find the actual last used pd */
470
	int used_pd = ppgtt->num_pd_entries / I915_PDES;
471 472

	for (i = used_pd - 1; i >= 0; i--) {
473
		dma_addr_t addr = ppgtt->pdp.page_directory[i]->daddr;
474
		ret = gen8_write_pdp(ring, i, addr);
475 476
		if (ret)
			return ret;
477
	}
B
Ben Widawsky 已提交
478

479
	return 0;
480 481
}

482
static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
483 484
				   uint64_t start,
				   uint64_t length,
485 486 487 488
				   bool use_scratch)
{
	struct i915_hw_ppgtt *ppgtt =
		container_of(vm, struct i915_hw_ppgtt, base);
489
	gen8_pte_t *pt_vaddr, scratch_pte;
490 491 492
	unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
	unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
	unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
493
	unsigned num_entries = length >> PAGE_SHIFT;
494 495 496 497 498 499
	unsigned last_pte, i;

	scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
				      I915_CACHE_LLC, use_scratch);

	while (num_entries) {
500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
		struct i915_page_directory_entry *pd;
		struct i915_page_table_entry *pt;
		struct page *page_table;

		if (WARN_ON(!ppgtt->pdp.page_directory[pdpe]))
			continue;

		pd = ppgtt->pdp.page_directory[pdpe];

		if (WARN_ON(!pd->page_table[pde]))
			continue;

		pt = pd->page_table[pde];

		if (WARN_ON(!pt->page))
			continue;

		page_table = pt->page;
518

519
		last_pte = pte + num_entries;
520 521
		if (last_pte > GEN8_PTES)
			last_pte = GEN8_PTES;
522 523 524

		pt_vaddr = kmap_atomic(page_table);

525
		for (i = pte; i < last_pte; i++) {
526
			pt_vaddr[i] = scratch_pte;
527 528
			num_entries--;
		}
529

530 531
		if (!HAS_LLC(ppgtt->base.dev))
			drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
532 533
		kunmap_atomic(pt_vaddr);

534
		pte = 0;
535
		if (++pde == I915_PDES) {
536 537 538
			pdpe++;
			pde = 0;
		}
539 540 541
	}
}

542 543
static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
				      struct sg_table *pages,
544
				      uint64_t start,
545
				      enum i915_cache_level cache_level, u32 unused)
546 547 548
{
	struct i915_hw_ppgtt *ppgtt =
		container_of(vm, struct i915_hw_ppgtt, base);
549
	gen8_pte_t *pt_vaddr;
550 551 552
	unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
	unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
	unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
553 554
	struct sg_page_iter sg_iter;

555
	pt_vaddr = NULL;
556

557
	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
558
		if (WARN_ON(pdpe >= GEN8_LEGACY_PDPES))
559 560
			break;

B
Ben Widawsky 已提交
561
		if (pt_vaddr == NULL) {
562 563 564
			struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[pdpe];
			struct i915_page_table_entry *pt = pd->page_table[pde];
			struct page *page_table = pt->page;
B
Ben Widawsky 已提交
565 566 567

			pt_vaddr = kmap_atomic(page_table);
		}
568

569
		pt_vaddr[pte] =
570 571
			gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
					cache_level, true);
572
		if (++pte == GEN8_PTES) {
573 574
			if (!HAS_LLC(ppgtt->base.dev))
				drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
575
			kunmap_atomic(pt_vaddr);
576
			pt_vaddr = NULL;
577
			if (++pde == I915_PDES) {
578 579 580 581
				pdpe++;
				pde = 0;
			}
			pte = 0;
582 583
		}
	}
584 585 586
	if (pt_vaddr) {
		if (!HAS_LLC(ppgtt->base.dev))
			drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
587
		kunmap_atomic(pt_vaddr);
588
	}
589 590
}

591
static void gen8_free_page_tables(struct i915_page_directory_entry *pd, struct drm_device *dev)
592 593 594
{
	int i;

595
	if (!pd->page)
596 597
		return;

598
	for (i = 0; i < I915_PDES; i++) {
599 600
		if (WARN_ON(!pd->page_table[i]))
			continue;
601

602
		unmap_and_free_pt(pd->page_table[i], dev);
603 604
		pd->page_table[i] = NULL;
	}
B
Ben Widawsky 已提交
605 606 607
}

static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
608 609 610
{
	int i;

611
	for (i = 0; i < ppgtt->num_pd_pages; i++) {
612 613 614
		if (WARN_ON(!ppgtt->pdp.page_directory[i]))
			continue;

615
		gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
616
		unmap_and_free_pd(ppgtt->pdp.page_directory[i]);
617
	}
618 619 620 621
}

static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
{
622
	struct pci_dev *hwdev = ppgtt->base.dev->pdev;
623 624 625 626 627
	int i, j;

	for (i = 0; i < ppgtt->num_pd_pages; i++) {
		/* TODO: In the future we'll support sparse mappings, so this
		 * will have to change. */
628
		if (!ppgtt->pdp.page_directory[i]->daddr)
629 630
			continue;

631
		pci_unmap_page(hwdev, ppgtt->pdp.page_directory[i]->daddr, PAGE_SIZE,
632
			       PCI_DMA_BIDIRECTIONAL);
633

634
		for (j = 0; j < I915_PDES; j++) {
635 636 637 638 639 640 641 642 643 644
			struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i];
			struct i915_page_table_entry *pt;
			dma_addr_t addr;

			if (WARN_ON(!pd->page_table[j]))
				continue;

			pt = pd->page_table[j];
			addr = pt->daddr;

645
			if (addr)
646 647
				pci_unmap_page(hwdev, addr, PAGE_SIZE,
					       PCI_DMA_BIDIRECTIONAL);
648 649 650 651
		}
	}
}

B
Ben Widawsky 已提交
652 653 654 655 656
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
	struct i915_hw_ppgtt *ppgtt =
		container_of(vm, struct i915_hw_ppgtt, base);

657 658
	gen8_ppgtt_unmap_pages(ppgtt);
	gen8_ppgtt_free(ppgtt);
B
Ben Widawsky 已提交
659 660
}

B
Ben Widawsky 已提交
661
static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
662
{
663
	int i, ret;
664

B
Ben Widawsky 已提交
665
	for (i = 0; i < ppgtt->num_pd_pages; i++) {
666
		ret = alloc_pt_range(ppgtt->pdp.page_directory[i],
667
				     0, I915_PDES, ppgtt->base.dev);
668 669
		if (ret)
			goto unwind_out;
670 671
	}

672
	return 0;
673 674

unwind_out:
B
Ben Widawsky 已提交
675
	while (i--)
676
		gen8_free_page_tables(ppgtt->pdp.page_directory[i], ppgtt->base.dev);
677

B
Ben Widawsky 已提交
678
	return -ENOMEM;
679 680
}

B
Ben Widawsky 已提交
681 682
static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
						const int max_pdp)
683 684 685
{
	int i;

B
Ben Widawsky 已提交
686
	for (i = 0; i < max_pdp; i++) {
687 688
		ppgtt->pdp.page_directory[i] = alloc_pd_single();
		if (IS_ERR(ppgtt->pdp.page_directory[i]))
B
Ben Widawsky 已提交
689 690 691 692
			goto unwind_out;
	}

	ppgtt->num_pd_pages = max_pdp;
693
	BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPES);
694 695

	return 0;
B
Ben Widawsky 已提交
696 697

unwind_out:
698 699
	while (i--)
		unmap_and_free_pd(ppgtt->pdp.page_directory[i]);
B
Ben Widawsky 已提交
700 701

	return -ENOMEM;
702 703 704 705 706 707 708 709 710 711 712
}

static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
			    const int max_pdp)
{
	int ret;

	ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp);
	if (ret)
		return ret;

B
Ben Widawsky 已提交
713 714 715
	ret = gen8_ppgtt_allocate_page_tables(ppgtt);
	if (ret)
		goto err_out;
716

717
	ppgtt->num_pd_entries = max_pdp * I915_PDES;
718

B
Ben Widawsky 已提交
719
	return 0;
720

B
Ben Widawsky 已提交
721 722
err_out:
	gen8_ppgtt_free(ppgtt);
723 724 725 726 727 728 729 730 731 732
	return ret;
}

static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
					     const int pd)
{
	dma_addr_t pd_addr;
	int ret;

	pd_addr = pci_map_page(ppgtt->base.dev->pdev,
733
			       ppgtt->pdp.page_directory[pd]->page, 0,
734 735 736 737 738 739
			       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);

	ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
	if (ret)
		return ret;

740
	ppgtt->pdp.page_directory[pd]->daddr = pd_addr;
741 742 743 744 745 746 747 748 749

	return 0;
}

static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
					const int pd,
					const int pt)
{
	dma_addr_t pt_addr;
750 751
	struct i915_page_directory_entry *pdir = ppgtt->pdp.page_directory[pd];
	struct i915_page_table_entry *ptab = pdir->page_table[pt];
752
	struct page *p = ptab->page;
753 754 755 756 757 758 759 760
	int ret;

	pt_addr = pci_map_page(ppgtt->base.dev->pdev,
			       p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
	ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr);
	if (ret)
		return ret;

761
	ptab->daddr = pt_addr;
762 763 764 765

	return 0;
}

766
/*
767 768 769 770
 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
 * with a net effect resembling a 2-level page table in normal x86 terms. Each
 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
 * space.
B
Ben Widawsky 已提交
771
 *
772 773
 * FIXME: split allocation into smaller pieces. For now we only ever do this
 * once, but with full PPGTT, the multiple contiguous allocations will be bad.
B
Ben Widawsky 已提交
774
 * TODO: Do something with the size parameter
775
 */
B
Ben Widawsky 已提交
776 777 778
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
{
	const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
779
	const int min_pt_pages = I915_PDES * max_pdp;
780
	int i, j, ret;
B
Ben Widawsky 已提交
781 782 783 784

	if (size % (1<<30))
		DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);

785 786 787 788 789 790
	/* 1. Do all our allocations for page directories and page tables.
	 * We allocate more than was asked so that we can point the unused parts
	 * to valid entries that point to scratch page. Dynamic page tables
	 * will fix this eventually.
	 */
	ret = gen8_ppgtt_alloc(ppgtt, GEN8_LEGACY_PDPES);
791 792
	if (ret)
		return ret;
793

B
Ben Widawsky 已提交
794
	/*
795
	 * 2. Create DMA mappings for the page directories and page tables.
B
Ben Widawsky 已提交
796
	 */
797
	for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
798
		ret = gen8_ppgtt_setup_page_directories(ppgtt, i);
799 800
		if (ret)
			goto bail;
B
Ben Widawsky 已提交
801

802
		for (j = 0; j < I915_PDES; j++) {
803
			ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j);
804 805
			if (ret)
				goto bail;
B
Ben Widawsky 已提交
806 807 808
		}
	}

809 810 811 812 813
	/*
	 * 3. Map all the page directory entires to point to the page tables
	 * we've allocated.
	 *
	 * For now, the PPGTT helper functions all require that the PDEs are
B
Ben Widawsky 已提交
814
	 * plugged in correctly. So we do that now/here. For aliasing PPGTT, we
815 816
	 * will never need to touch the PDEs again.
	 */
817
	for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
818
		struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i];
819
		gen8_pde_t *pd_vaddr;
820
		pd_vaddr = kmap_atomic(ppgtt->pdp.page_directory[i]->page);
821
		for (j = 0; j < I915_PDES; j++) {
822 823
			struct i915_page_table_entry *pt = pd->page_table[j];
			dma_addr_t addr = pt->daddr;
B
Ben Widawsky 已提交
824 825 826
			pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
						      I915_CACHE_LLC);
		}
827 828
		if (!HAS_LLC(ppgtt->base.dev))
			drm_clflush_virt_range(pd_vaddr, PAGE_SIZE);
B
Ben Widawsky 已提交
829 830 831
		kunmap_atomic(pd_vaddr);
	}

832 833 834 835 836 837
	ppgtt->switch_mm = gen8_mm_switch;
	ppgtt->base.clear_range = gen8_ppgtt_clear_range;
	ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
	ppgtt->base.cleanup = gen8_ppgtt_cleanup;
	ppgtt->base.start = 0;

838
	/* This is the area that we advertise as usable for the caller */
839
	ppgtt->base.total = max_pdp * I915_PDES * GEN8_PTES * PAGE_SIZE;
840 841 842

	/* Set all ptes to a valid scratch page. Also above requested space */
	ppgtt->base.clear_range(&ppgtt->base, 0,
843
				ppgtt->num_pd_pages * GEN8_PTES * PAGE_SIZE,
844
				true);
845

B
Ben Widawsky 已提交
846 847 848
	DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
			 ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
	DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
849 850
			 ppgtt->num_pd_entries,
			 (ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30));
B
Ben Widawsky 已提交
851
	return 0;
B
Ben Widawsky 已提交
852

853 854 855
bail:
	gen8_ppgtt_unmap_pages(ppgtt);
	gen8_ppgtt_free(ppgtt);
B
Ben Widawsky 已提交
856 857 858
	return ret;
}

B
Ben Widawsky 已提交
859 860 861 862
static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
	struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
	struct i915_address_space *vm = &ppgtt->base;
863 864
	gen6_pte_t __iomem *pd_addr;
	gen6_pte_t scratch_pte;
B
Ben Widawsky 已提交
865 866 867
	uint32_t pd_entry;
	int pte, pde;

868
	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
B
Ben Widawsky 已提交
869

870 871
	pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
		ppgtt->pd.pd_offset / sizeof(gen6_pte_t);
B
Ben Widawsky 已提交
872 873

	seq_printf(m, "  VM %p (pd_offset %x-%x):\n", vm,
874 875
		   ppgtt->pd.pd_offset,
		   ppgtt->pd.pd_offset + ppgtt->num_pd_entries);
B
Ben Widawsky 已提交
876 877
	for (pde = 0; pde < ppgtt->num_pd_entries; pde++) {
		u32 expected;
878
		gen6_pte_t *pt_vaddr;
879
		dma_addr_t pt_addr = ppgtt->pd.page_table[pde]->daddr;
B
Ben Widawsky 已提交
880 881 882 883 884 885 886 887 888 889
		pd_entry = readl(pd_addr + pde);
		expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);

		if (pd_entry != expected)
			seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
				   pde,
				   pd_entry,
				   expected);
		seq_printf(m, "\tPDE: %x\n", pd_entry);

890
		pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde]->page);
891
		for (pte = 0; pte < GEN6_PTES; pte+=4) {
B
Ben Widawsky 已提交
892
			unsigned long va =
893
				(pde * PAGE_SIZE * GEN6_PTES) +
B
Ben Widawsky 已提交
894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915
				(pte * PAGE_SIZE);
			int i;
			bool found = false;
			for (i = 0; i < 4; i++)
				if (pt_vaddr[pte + i] != scratch_pte)
					found = true;
			if (!found)
				continue;

			seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
			for (i = 0; i < 4; i++) {
				if (pt_vaddr[pte + i] != scratch_pte)
					seq_printf(m, " %08x", pt_vaddr[pte + i]);
				else
					seq_puts(m, "  SCRATCH ");
			}
			seq_puts(m, "\n");
		}
		kunmap_atomic(pt_vaddr);
	}
}

916 917 918
/* Write pde (index) from the page directory @pd to the page table @pt */
static void gen6_write_pde(struct i915_page_directory_entry *pd,
			    const int pde, struct i915_page_table_entry *pt)
B
Ben Widawsky 已提交
919
{
920 921 922 923
	/* Caller needs to make sure the write completes if necessary */
	struct i915_hw_ppgtt *ppgtt =
		container_of(pd, struct i915_hw_ppgtt, pd);
	u32 pd_entry;
B
Ben Widawsky 已提交
924

925 926
	pd_entry = GEN6_PDE_ADDR_ENCODE(pt->daddr);
	pd_entry |= GEN6_PDE_VALID;
B
Ben Widawsky 已提交
927

928 929
	writel(pd_entry, ppgtt->pd_addr + pde);
}
B
Ben Widawsky 已提交
930

931 932 933 934 935 936 937 938 939 940 941 942 943 944 945
/* Write all the page tables found in the ppgtt structure to incrementing page
 * directories. */
static void gen6_write_page_range(struct drm_i915_private *dev_priv,
				  struct i915_page_directory_entry *pd,
				  uint32_t start, uint32_t length)
{
	struct i915_page_table_entry *pt;
	uint32_t pde, temp;

	gen6_for_each_pde(pt, pd, start, length, temp, pde)
		gen6_write_pde(pd, pde, pt);

	/* Make sure write is complete before other code can use this page
	 * table. Also require for WC mapped PTEs */
	readl(dev_priv->gtt.gsm);
B
Ben Widawsky 已提交
946 947
}

948
static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
B
Ben Widawsky 已提交
949
{
950
	BUG_ON(ppgtt->pd.pd_offset & 0x3f);
951

952
	return (ppgtt->pd.pd_offset / 64) << 16;
953 954
}

955
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
956
			 struct intel_engine_cs *ring)
957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979
{
	int ret;

	/* NB: TLBs must be flushed and invalidated before a switch */
	ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
	if (ret)
		return ret;

	ret = intel_ring_begin(ring, 6);
	if (ret)
		return ret;

	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
	intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
	intel_ring_emit(ring, PP_DIR_DCLV_2G);
	intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
	intel_ring_emit(ring, get_pd_offset(ppgtt));
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_advance(ring);

	return 0;
}

980 981 982 983 984 985 986 987 988 989
static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
			  struct intel_engine_cs *ring)
{
	struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);

	I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
	I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
	return 0;
}

990
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
991
			  struct intel_engine_cs *ring)
992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011
{
	int ret;

	/* NB: TLBs must be flushed and invalidated before a switch */
	ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
	if (ret)
		return ret;

	ret = intel_ring_begin(ring, 6);
	if (ret)
		return ret;

	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
	intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
	intel_ring_emit(ring, PP_DIR_DCLV_2G);
	intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
	intel_ring_emit(ring, get_pd_offset(ppgtt));
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_advance(ring);

1012 1013 1014 1015 1016 1017 1018
	/* XXX: RCS is the only one to auto invalidate the TLBs? */
	if (ring->id != RCS) {
		ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
		if (ret)
			return ret;
	}

1019 1020 1021
	return 0;
}

1022
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
1023
			  struct intel_engine_cs *ring)
1024 1025 1026 1027
{
	struct drm_device *dev = ppgtt->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;

1028

1029 1030 1031 1032 1033 1034 1035 1036
	I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
	I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));

	POSTING_READ(RING_PP_DIR_DCLV(ring));

	return 0;
}

1037
static void gen8_ppgtt_enable(struct drm_device *dev)
1038 1039
{
	struct drm_i915_private *dev_priv = dev->dev_private;
1040
	struct intel_engine_cs *ring;
1041
	int j;
B
Ben Widawsky 已提交
1042

1043 1044 1045 1046 1047
	for_each_ring(ring, dev_priv, j) {
		I915_WRITE(RING_MODE_GEN7(ring),
			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
	}
}
B
Ben Widawsky 已提交
1048

1049
static void gen7_ppgtt_enable(struct drm_device *dev)
B
Ben Widawsky 已提交
1050
{
1051
	struct drm_i915_private *dev_priv = dev->dev_private;
1052
	struct intel_engine_cs *ring;
1053
	uint32_t ecochk, ecobits;
B
Ben Widawsky 已提交
1054
	int i;
B
Ben Widawsky 已提交
1055

1056 1057
	ecobits = I915_READ(GAC_ECO_BITS);
	I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
1058

1059 1060 1061 1062 1063 1064 1065 1066
	ecochk = I915_READ(GAM_ECOCHK);
	if (IS_HASWELL(dev)) {
		ecochk |= ECOCHK_PPGTT_WB_HSW;
	} else {
		ecochk |= ECOCHK_PPGTT_LLC_IVB;
		ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
	}
	I915_WRITE(GAM_ECOCHK, ecochk);
1067

1068
	for_each_ring(ring, dev_priv, i) {
B
Ben Widawsky 已提交
1069
		/* GFX_MODE is per-ring on gen7+ */
1070 1071
		I915_WRITE(RING_MODE_GEN7(ring),
			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
B
Ben Widawsky 已提交
1072
	}
1073
}
B
Ben Widawsky 已提交
1074

1075
static void gen6_ppgtt_enable(struct drm_device *dev)
1076
{
1077
	struct drm_i915_private *dev_priv = dev->dev_private;
1078
	uint32_t ecochk, gab_ctl, ecobits;
1079

1080 1081 1082
	ecobits = I915_READ(GAC_ECO_BITS);
	I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
		   ECOBITS_PPGTT_CACHE64B);
B
Ben Widawsky 已提交
1083

1084 1085 1086 1087 1088 1089 1090
	gab_ctl = I915_READ(GAB_CTL);
	I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);

	ecochk = I915_READ(GAM_ECOCHK);
	I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);

	I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
B
Ben Widawsky 已提交
1091 1092
}

1093
/* PPGTT support for Sandybdrige/Gen6 and later */
1094
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
1095 1096
				   uint64_t start,
				   uint64_t length,
1097
				   bool use_scratch)
1098
{
1099 1100
	struct i915_hw_ppgtt *ppgtt =
		container_of(vm, struct i915_hw_ppgtt, base);
1101
	gen6_pte_t *pt_vaddr, scratch_pte;
1102 1103
	unsigned first_entry = start >> PAGE_SHIFT;
	unsigned num_entries = length >> PAGE_SHIFT;
1104 1105
	unsigned act_pt = first_entry / GEN6_PTES;
	unsigned first_pte = first_entry % GEN6_PTES;
1106
	unsigned last_pte, i;
1107

1108
	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
1109

1110 1111
	while (num_entries) {
		last_pte = first_pte + num_entries;
1112 1113
		if (last_pte > GEN6_PTES)
			last_pte = GEN6_PTES;
1114

1115
		pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
1116

1117 1118
		for (i = first_pte; i < last_pte; i++)
			pt_vaddr[i] = scratch_pte;
1119 1120 1121

		kunmap_atomic(pt_vaddr);

1122 1123
		num_entries -= last_pte - first_pte;
		first_pte = 0;
1124
		act_pt++;
1125
	}
1126 1127
}

1128
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
D
Daniel Vetter 已提交
1129
				      struct sg_table *pages,
1130
				      uint64_t start,
1131
				      enum i915_cache_level cache_level, u32 flags)
D
Daniel Vetter 已提交
1132
{
1133 1134
	struct i915_hw_ppgtt *ppgtt =
		container_of(vm, struct i915_hw_ppgtt, base);
1135
	gen6_pte_t *pt_vaddr;
1136
	unsigned first_entry = start >> PAGE_SHIFT;
1137 1138
	unsigned act_pt = first_entry / GEN6_PTES;
	unsigned act_pte = first_entry % GEN6_PTES;
1139 1140
	struct sg_page_iter sg_iter;

1141
	pt_vaddr = NULL;
1142
	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
1143
		if (pt_vaddr == NULL)
1144
			pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt]->page);
1145

1146 1147
		pt_vaddr[act_pte] =
			vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
1148 1149
				       cache_level, true, flags);

1150
		if (++act_pte == GEN6_PTES) {
1151
			kunmap_atomic(pt_vaddr);
1152
			pt_vaddr = NULL;
1153
			act_pt++;
1154
			act_pte = 0;
D
Daniel Vetter 已提交
1155 1156
		}
	}
1157 1158
	if (pt_vaddr)
		kunmap_atomic(pt_vaddr);
D
Daniel Vetter 已提交
1159 1160
}

1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
/* PDE TLBs are a pain invalidate pre GEN8. It requires a context reload. If we
 * are switching between contexts with the same LRCA, we also must do a force
 * restore.
 */
static inline void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
{
	/* If current vm != vm, */
	ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
}

1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189
static void gen6_initialize_pt(struct i915_address_space *vm,
		struct i915_page_table_entry *pt)
{
	gen6_pte_t *pt_vaddr, scratch_pte;
	int i;

	WARN_ON(vm->scratch.addr == 0);

	scratch_pte = vm->pte_encode(vm->scratch.addr,
			I915_CACHE_LLC, true, 0);

	pt_vaddr = kmap_atomic(pt->page);

	for (i = 0; i < GEN6_PTES; i++)
		pt_vaddr[i] = scratch_pte;

	kunmap_atomic(pt_vaddr);
}

1190 1191 1192
static int gen6_alloc_va_range(struct i915_address_space *vm,
			       uint64_t start, uint64_t length)
{
1193 1194 1195
	DECLARE_BITMAP(new_page_tables, I915_PDES);
	struct drm_device *dev = vm->dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
1196 1197 1198
	struct i915_hw_ppgtt *ppgtt =
				container_of(vm, struct i915_hw_ppgtt, base);
	struct i915_page_table_entry *pt;
1199
	const uint32_t start_save = start, length_save = length;
1200
	uint32_t pde, temp;
1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230
	int ret;

	WARN_ON(upper_32_bits(start));

	bitmap_zero(new_page_tables, I915_PDES);

	/* The allocation is done in two stages so that we can bail out with
	 * minimal amount of pain. The first stage finds new page tables that
	 * need allocation. The second stage marks use ptes within the page
	 * tables.
	 */
	gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
		if (pt != ppgtt->scratch_pt) {
			WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
			continue;
		}

		/* We've already allocated a page table */
		WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));

		pt = alloc_pt_single(dev);
		if (IS_ERR(pt)) {
			ret = PTR_ERR(pt);
			goto unwind_out;
		}

		gen6_initialize_pt(vm, pt);

		ppgtt->pd.page_table[pde] = pt;
		set_bit(pde, new_page_tables);
1231
		trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT);
1232 1233 1234 1235
	}

	start = start_save;
	length = length_save;
1236 1237 1238 1239 1240 1241 1242 1243

	gen6_for_each_pde(pt, &ppgtt->pd, start, length, temp, pde) {
		DECLARE_BITMAP(tmp_bitmap, GEN6_PTES);

		bitmap_zero(tmp_bitmap, GEN6_PTES);
		bitmap_set(tmp_bitmap, gen6_pte_index(start),
			   gen6_pte_count(start, length));

1244 1245 1246
		if (test_and_clear_bit(pde, new_page_tables))
			gen6_write_pde(&ppgtt->pd, pde, pt);

1247 1248 1249 1250
		trace_i915_page_table_entry_map(vm, pde, pt,
					 gen6_pte_index(start),
					 gen6_pte_count(start, length),
					 GEN6_PTES);
1251
		bitmap_or(pt->used_ptes, tmp_bitmap, pt->used_ptes,
1252 1253 1254
				GEN6_PTES);
	}

1255 1256 1257 1258 1259 1260
	WARN_ON(!bitmap_empty(new_page_tables, I915_PDES));

	/* Make sure write is complete before other code can use this page
	 * table. Also require for WC mapped PTEs */
	readl(dev_priv->gtt.gsm);

1261
	mark_tlbs_dirty(ppgtt);
1262
	return 0;
1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273

unwind_out:
	for_each_set_bit(pde, new_page_tables, I915_PDES) {
		struct i915_page_table_entry *pt = ppgtt->pd.page_table[pde];

		ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
		unmap_and_free_pt(pt, vm->dev);
	}

	mark_tlbs_dirty(ppgtt);
	return ret;
1274 1275
}

1276 1277 1278
static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
{
	int i;
1279

1280 1281 1282 1283 1284 1285
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
		struct i915_page_table_entry *pt = ppgtt->pd.page_table[i];

		if (pt != ppgtt->scratch_pt)
			unmap_and_free_pt(ppgtt->pd.page_table[i], ppgtt->base.dev);
	}
1286

1287
	unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
1288
	unmap_and_free_pd(&ppgtt->pd);
1289 1290
}

1291 1292 1293 1294 1295 1296 1297 1298 1299 1300
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
	struct i915_hw_ppgtt *ppgtt =
		container_of(vm, struct i915_hw_ppgtt, base);

	drm_mm_remove_node(&ppgtt->node);

	gen6_ppgtt_free(ppgtt);
}

1301
static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
1302
{
1303
	struct drm_device *dev = ppgtt->base.dev;
1304
	struct drm_i915_private *dev_priv = dev->dev_private;
1305
	bool retried = false;
1306
	int ret;
1307

B
Ben Widawsky 已提交
1308 1309 1310 1311 1312
	/* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
	 * allocator works in address space sizes, so it's multiplied by page
	 * size. We allocate at the top of the GTT to avoid fragmentation.
	 */
	BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
1313 1314 1315 1316 1317 1318
	ppgtt->scratch_pt = alloc_pt_single(ppgtt->base.dev);
	if (IS_ERR(ppgtt->scratch_pt))
		return PTR_ERR(ppgtt->scratch_pt);

	gen6_initialize_pt(&ppgtt->base, ppgtt->scratch_pt);

1319
alloc:
B
Ben Widawsky 已提交
1320 1321 1322 1323
	ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
						  &ppgtt->node, GEN6_PD_SIZE,
						  GEN6_PD_ALIGN, 0,
						  0, dev_priv->gtt.base.total,
1324
						  DRM_MM_TOPDOWN);
1325 1326 1327
	if (ret == -ENOSPC && !retried) {
		ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
					       GEN6_PD_SIZE, GEN6_PD_ALIGN,
1328 1329 1330
					       I915_CACHE_NONE,
					       0, dev_priv->gtt.base.total,
					       0);
1331
		if (ret)
1332
			goto err_out;
1333 1334 1335 1336

		retried = true;
		goto alloc;
	}
B
Ben Widawsky 已提交
1337

1338
	if (ret)
1339 1340
		goto err_out;

1341

B
Ben Widawsky 已提交
1342 1343
	if (ppgtt->node.start < dev_priv->gtt.mappable_end)
		DRM_DEBUG("Forced to use aperture for PDEs\n");
1344

1345
	ppgtt->num_pd_entries = I915_PDES;
1346
	return 0;
1347 1348

err_out:
1349
	unmap_and_free_pt(ppgtt->scratch_pt, ppgtt->base.dev);
1350
	return ret;
1351 1352 1353 1354
}

static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
{
1355
	return gen6_ppgtt_allocate_page_directories(ppgtt);
1356
}
1357

1358 1359 1360 1361 1362
static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
				  uint64_t start, uint64_t length)
{
	struct i915_page_table_entry *unused;
	uint32_t pde, temp;
1363

1364 1365
	gen6_for_each_pde(unused, &ppgtt->pd, start, length, temp, pde)
		ppgtt->pd.page_table[pde] = ppgtt->scratch_pt;
1366 1367
}

1368
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt, bool aliasing)
1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383
{
	struct drm_device *dev = ppgtt->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
	if (IS_GEN6(dev)) {
		ppgtt->switch_mm = gen6_mm_switch;
	} else if (IS_HASWELL(dev)) {
		ppgtt->switch_mm = hsw_mm_switch;
	} else if (IS_GEN7(dev)) {
		ppgtt->switch_mm = gen7_mm_switch;
	} else
		BUG();

1384 1385 1386
	if (intel_vgpu_active(dev))
		ppgtt->switch_mm = vgpu_mm_switch;

1387 1388 1389 1390
	ret = gen6_ppgtt_alloc(ppgtt);
	if (ret)
		return ret;

1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401
	if (aliasing) {
		/* preallocate all pts */
		ret = alloc_pt_range(&ppgtt->pd, 0, ppgtt->num_pd_entries,
				ppgtt->base.dev);

		if (ret) {
			gen6_ppgtt_cleanup(&ppgtt->base);
			return ret;
		}
	}

1402
	ppgtt->base.allocate_va_range = gen6_alloc_va_range;
1403 1404 1405 1406
	ppgtt->base.clear_range = gen6_ppgtt_clear_range;
	ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
	ppgtt->base.cleanup = gen6_ppgtt_cleanup;
	ppgtt->base.start = 0;
1407
	ppgtt->base.total = ppgtt->num_pd_entries * GEN6_PTES * PAGE_SIZE;
B
Ben Widawsky 已提交
1408
	ppgtt->debug_dump = gen6_dump_ppgtt;
1409

1410
	ppgtt->pd.pd_offset =
1411
		ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
1412

1413 1414 1415
	ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
		ppgtt->pd.pd_offset / sizeof(gen6_pte_t);

1416 1417 1418 1419
	if (aliasing)
		ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
	else
		gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
1420

1421 1422
	gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total);

1423
	DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
1424 1425
			 ppgtt->node.size >> 20,
			 ppgtt->node.start / PAGE_SIZE);
1426

1427
	DRM_DEBUG("Adding PPGTT at offset %x\n",
1428
		  ppgtt->pd.pd_offset << 10);
1429

1430
	return 0;
1431 1432
}

1433 1434
static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt,
		bool aliasing)
1435 1436 1437
{
	struct drm_i915_private *dev_priv = dev->dev_private;

1438
	ppgtt->base.dev = dev;
1439
	ppgtt->base.scratch = dev_priv->gtt.base.scratch;
1440

B
Ben Widawsky 已提交
1441
	if (INTEL_INFO(dev)->gen < 8)
1442
		return gen6_ppgtt_init(ppgtt, aliasing);
B
Ben Widawsky 已提交
1443
	else
R
Rodrigo Vivi 已提交
1444
		return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
1445 1446 1447 1448 1449
}
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret = 0;
B
Ben Widawsky 已提交
1450

1451
	ret = __hw_ppgtt_init(dev, ppgtt, false);
1452
	if (ret == 0) {
B
Ben Widawsky 已提交
1453
		kref_init(&ppgtt->ref);
1454 1455
		drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
			    ppgtt->base.total);
1456
		i915_init_vm(dev_priv, &ppgtt->base);
1457
	}
1458 1459 1460 1461

	return ret;
}

1462 1463 1464 1465 1466 1467 1468
int i915_ppgtt_init_hw(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_engine_cs *ring;
	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
	int i, ret = 0;

1469 1470 1471 1472 1473 1474
	/* In the case of execlists, PPGTT is enabled by the context descriptor
	 * and the PDPs are contained within the context itself.  We don't
	 * need to do anything here. */
	if (i915.enable_execlists)
		return 0;

1475 1476 1477 1478 1479 1480 1481 1482 1483 1484
	if (!USES_PPGTT(dev))
		return 0;

	if (IS_GEN6(dev))
		gen6_ppgtt_enable(dev);
	else if (IS_GEN7(dev))
		gen7_ppgtt_enable(dev);
	else if (INTEL_INFO(dev)->gen >= 8)
		gen8_ppgtt_enable(dev);
	else
1485
		MISSING_CASE(INTEL_INFO(dev)->gen);
1486 1487 1488

	if (ppgtt) {
		for_each_ring(ring, dev_priv, i) {
1489
			ret = ppgtt->switch_mm(ppgtt, ring);
1490 1491
			if (ret != 0)
				return ret;
1492
		}
1493
	}
1494 1495 1496

	return ret;
}
1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514
struct i915_hw_ppgtt *
i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
{
	struct i915_hw_ppgtt *ppgtt;
	int ret;

	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
	if (!ppgtt)
		return ERR_PTR(-ENOMEM);

	ret = i915_ppgtt_init(dev, ppgtt);
	if (ret) {
		kfree(ppgtt);
		return ERR_PTR(ret);
	}

	ppgtt->file_priv = fpriv;

1515 1516
	trace_i915_ppgtt_create(&ppgtt->base);

1517 1518 1519
	return ppgtt;
}

1520 1521 1522 1523 1524
void  i915_ppgtt_release(struct kref *kref)
{
	struct i915_hw_ppgtt *ppgtt =
		container_of(kref, struct i915_hw_ppgtt, ref);

1525 1526
	trace_i915_ppgtt_release(&ppgtt->base);

1527 1528 1529 1530
	/* vmas should already be unbound */
	WARN_ON(!list_empty(&ppgtt->base.active_list));
	WARN_ON(!list_empty(&ppgtt->base.inactive_list));

1531 1532 1533
	list_del(&ppgtt->base.global_link);
	drm_mm_takedown(&ppgtt->base.mm);

1534 1535 1536
	ppgtt->base.cleanup(&ppgtt->base);
	kfree(ppgtt);
}
1537

1538
static void
1539 1540 1541
ppgtt_bind_vma(struct i915_vma *vma,
	       enum i915_cache_level cache_level,
	       u32 flags)
1542
{
1543 1544 1545 1546
	/* Currently applicable only to VLV */
	if (vma->obj->gt_ro)
		flags |= PTE_READ_ONLY;

1547
	vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
1548
				cache_level, flags);
1549 1550
}

1551
static void ppgtt_unbind_vma(struct i915_vma *vma)
1552
{
1553
	vma->vm->clear_range(vma->vm,
1554 1555
			     vma->node.start,
			     vma->obj->base.size,
1556
			     true);
1557 1558
}

1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574
extern int intel_iommu_gfx_mapped;
/* Certain Gen5 chipsets require require idling the GPU before
 * unmapping anything from the GTT when VT-d is enabled.
 */
static inline bool needs_idle_maps(struct drm_device *dev)
{
#ifdef CONFIG_INTEL_IOMMU
	/* Query intel_iommu to see if we need the workaround. Presumably that
	 * was loaded first.
	 */
	if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
		return true;
#endif
	return false;
}

B
Ben Widawsky 已提交
1575 1576 1577 1578
static bool do_idling(struct drm_i915_private *dev_priv)
{
	bool ret = dev_priv->mm.interruptible;

1579
	if (unlikely(dev_priv->gtt.do_idle_maps)) {
B
Ben Widawsky 已提交
1580
		dev_priv->mm.interruptible = false;
1581
		if (i915_gpu_idle(dev_priv->dev)) {
B
Ben Widawsky 已提交
1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592
			DRM_ERROR("Couldn't idle GPU\n");
			/* Wait a bit, in hopes it avoids the hang */
			udelay(10);
		}
	}

	return ret;
}

static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
{
1593
	if (unlikely(dev_priv->gtt.do_idle_maps))
B
Ben Widawsky 已提交
1594 1595 1596
		dev_priv->mm.interruptible = interruptible;
}

1597 1598 1599
void i915_check_and_clear_faults(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
1600
	struct intel_engine_cs *ring;
1601 1602 1603 1604 1605 1606 1607 1608 1609 1610
	int i;

	if (INTEL_INFO(dev)->gen < 6)
		return;

	for_each_ring(ring, dev_priv, i) {
		u32 fault_reg;
		fault_reg = I915_READ(RING_FAULT_REG(ring));
		if (fault_reg & RING_FAULT_VALID) {
			DRM_DEBUG_DRIVER("Unexpected fault\n"
1611
					 "\tAddr: 0x%08lx\n"
1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625
					 "\tAddress space: %s\n"
					 "\tSource ID: %d\n"
					 "\tType: %d\n",
					 fault_reg & PAGE_MASK,
					 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
					 RING_FAULT_SRCID(fault_reg),
					 RING_FAULT_FAULT_TYPE(fault_reg));
			I915_WRITE(RING_FAULT_REG(ring),
				   fault_reg & ~RING_FAULT_VALID);
		}
	}
	POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
}

1626 1627 1628 1629 1630 1631 1632 1633 1634 1635
static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
{
	if (INTEL_INFO(dev_priv->dev)->gen < 6) {
		intel_gtt_chipset_flush();
	} else {
		I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
		POSTING_READ(GFX_FLSH_CNTL_GEN6);
	}
}

1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648
void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	/* Don't bother messing with faults pre GEN6 as we have little
	 * documentation supporting that it's a good idea.
	 */
	if (INTEL_INFO(dev)->gen < 6)
		return;

	i915_check_and_clear_faults(dev);

	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
1649 1650
				       dev_priv->gtt.base.start,
				       dev_priv->gtt.base.total,
1651
				       true);
1652 1653

	i915_ggtt_flush(dev_priv);
1654 1655
}

1656 1657 1658
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
1659
	struct drm_i915_gem_object *obj;
B
Ben Widawsky 已提交
1660
	struct i915_address_space *vm;
1661

1662 1663
	i915_check_and_clear_faults(dev);

1664
	/* First fill our portion of the GTT with scratch pages */
1665
	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
1666 1667
				       dev_priv->gtt.base.start,
				       dev_priv->gtt.base.total,
1668
				       true);
1669

1670
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1671 1672 1673 1674 1675
		struct i915_vma *vma = i915_gem_obj_to_vma(obj,
							   &dev_priv->gtt.base);
		if (!vma)
			continue;

1676
		i915_gem_clflush_object(obj, obj->pin_display);
1677 1678 1679
		/* The bind_vma code tries to be smart about tracking mappings.
		 * Unfortunately above, we've just wiped out the mappings
		 * without telling our object about it. So we need to fake it.
1680 1681 1682
		 *
		 * Bind is not expected to fail since this is only called on
		 * resume and assumption is all requirements exist already.
1683
		 */
1684
		vma->bound &= ~GLOBAL_BIND;
1685
		WARN_ON(i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND));
1686 1687
	}

B
Ben Widawsky 已提交
1688

1689
	if (INTEL_INFO(dev)->gen >= 8) {
1690 1691 1692 1693 1694
		if (IS_CHERRYVIEW(dev))
			chv_setup_private_ppat(dev_priv);
		else
			bdw_setup_private_ppat(dev_priv);

B
Ben Widawsky 已提交
1695
		return;
1696
	}
B
Ben Widawsky 已提交
1697

1698 1699 1700 1701 1702 1703 1704
	if (USES_PPGTT(dev)) {
		list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
			/* TODO: Perhaps it shouldn't be gen6 specific */

			struct i915_hw_ppgtt *ppgtt =
					container_of(vm, struct i915_hw_ppgtt,
						     base);
B
Ben Widawsky 已提交
1705

1706 1707 1708 1709 1710 1711
			if (i915_is_ggtt(vm))
				ppgtt = dev_priv->mm.aliasing_ppgtt;

			gen6_write_page_range(dev_priv, &ppgtt->pd,
					      0, ppgtt->base.total);
		}
1712 1713
	}

1714
	i915_ggtt_flush(dev_priv);
1715
}
1716

1717
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
1718
{
1719
	if (obj->has_dma_mapping)
1720
		return 0;
1721 1722 1723 1724 1725 1726 1727

	if (!dma_map_sg(&obj->base.dev->pdev->dev,
			obj->pages->sgl, obj->pages->nents,
			PCI_DMA_BIDIRECTIONAL))
		return -ENOSPC;

	return 0;
1728 1729
}

1730
static inline void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
B
Ben Widawsky 已提交
1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741
{
#ifdef writeq
	writeq(pte, addr);
#else
	iowrite32((u32)pte, addr);
	iowrite32(pte >> 32, addr + 4);
#endif
}

static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
				     struct sg_table *st,
1742
				     uint64_t start,
1743
				     enum i915_cache_level level, u32 unused)
B
Ben Widawsky 已提交
1744 1745
{
	struct drm_i915_private *dev_priv = vm->dev->dev_private;
1746
	unsigned first_entry = start >> PAGE_SHIFT;
1747 1748
	gen8_pte_t __iomem *gtt_entries =
		(gen8_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
B
Ben Widawsky 已提交
1749 1750
	int i = 0;
	struct sg_page_iter sg_iter;
1751
	dma_addr_t addr = 0; /* shut up gcc */
B
Ben Widawsky 已提交
1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779

	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
		addr = sg_dma_address(sg_iter.sg) +
			(sg_iter.sg_pgoffset << PAGE_SHIFT);
		gen8_set_pte(&gtt_entries[i],
			     gen8_pte_encode(addr, level, true));
		i++;
	}

	/*
	 * XXX: This serves as a posting read to make sure that the PTE has
	 * actually been updated. There is some concern that even though
	 * registers and PTEs are within the same BAR that they are potentially
	 * of NUMA access patterns. Therefore, even with the way we assume
	 * hardware should work, we must keep this posting read for paranoia.
	 */
	if (i != 0)
		WARN_ON(readq(&gtt_entries[i-1])
			!= gen8_pte_encode(addr, level, true));

	/* This next bit makes the above posting read even more important. We
	 * want to flush the TLBs only after we're certain all the PTE updates
	 * have finished.
	 */
	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
	POSTING_READ(GFX_FLSH_CNTL_GEN6);
}

1780 1781 1782 1783 1784 1785
/*
 * Binds an object into the global gtt with the specified cache level. The object
 * will be accessible to the GPU via commands whose operands reference offsets
 * within the global GTT as well as accessible by the GPU through the GMADR
 * mapped BAR (dev_priv->mm.gtt->gtt).
 */
1786
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
1787
				     struct sg_table *st,
1788
				     uint64_t start,
1789
				     enum i915_cache_level level, u32 flags)
1790
{
1791
	struct drm_i915_private *dev_priv = vm->dev->dev_private;
1792
	unsigned first_entry = start >> PAGE_SHIFT;
1793 1794
	gen6_pte_t __iomem *gtt_entries =
		(gen6_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
1795 1796
	int i = 0;
	struct sg_page_iter sg_iter;
1797
	dma_addr_t addr = 0;
1798

1799
	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
1800
		addr = sg_page_iter_dma_address(&sg_iter);
1801
		iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]);
1802
		i++;
1803 1804 1805 1806 1807 1808 1809 1810
	}

	/* XXX: This serves as a posting read to make sure that the PTE has
	 * actually been updated. There is some concern that even though
	 * registers and PTEs are within the same BAR that they are potentially
	 * of NUMA access patterns. Therefore, even with the way we assume
	 * hardware should work, we must keep this posting read for paranoia.
	 */
1811 1812 1813 1814
	if (i != 0) {
		unsigned long gtt = readl(&gtt_entries[i-1]);
		WARN_ON(gtt != vm->pte_encode(addr, level, true, flags));
	}
1815 1816 1817 1818 1819 1820 1821

	/* This next bit makes the above posting read even more important. We
	 * want to flush the TLBs only after we're certain all the PTE updates
	 * have finished.
	 */
	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
	POSTING_READ(GFX_FLSH_CNTL_GEN6);
1822 1823
}

B
Ben Widawsky 已提交
1824
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
1825 1826
				  uint64_t start,
				  uint64_t length,
B
Ben Widawsky 已提交
1827 1828 1829
				  bool use_scratch)
{
	struct drm_i915_private *dev_priv = vm->dev->dev_private;
1830 1831
	unsigned first_entry = start >> PAGE_SHIFT;
	unsigned num_entries = length >> PAGE_SHIFT;
1832 1833
	gen8_pte_t scratch_pte, __iomem *gtt_base =
		(gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
B
Ben Widawsky 已提交
1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849
	const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
	int i;

	if (WARN(num_entries > max_entries,
		 "First entry = %d; Num entries = %d (max=%d)\n",
		 first_entry, num_entries, max_entries))
		num_entries = max_entries;

	scratch_pte = gen8_pte_encode(vm->scratch.addr,
				      I915_CACHE_LLC,
				      use_scratch);
	for (i = 0; i < num_entries; i++)
		gen8_set_pte(&gtt_base[i], scratch_pte);
	readl(gtt_base);
}

1850
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
1851 1852
				  uint64_t start,
				  uint64_t length,
1853
				  bool use_scratch)
1854
{
1855
	struct drm_i915_private *dev_priv = vm->dev->dev_private;
1856 1857
	unsigned first_entry = start >> PAGE_SHIFT;
	unsigned num_entries = length >> PAGE_SHIFT;
1858 1859
	gen6_pte_t scratch_pte, __iomem *gtt_base =
		(gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
1860
	const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
1861 1862 1863 1864 1865 1866 1867
	int i;

	if (WARN(num_entries > max_entries,
		 "First entry = %d; Num entries = %d (max=%d)\n",
		 first_entry, num_entries, max_entries))
		num_entries = max_entries;

1868
	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0);
1869

1870 1871 1872 1873 1874
	for (i = 0; i < num_entries; i++)
		iowrite32(scratch_pte, &gtt_base[i]);
	readl(gtt_base);
}

1875 1876 1877 1878

static void i915_ggtt_bind_vma(struct i915_vma *vma,
			       enum i915_cache_level cache_level,
			       u32 unused)
1879
{
1880
	const unsigned long entry = vma->node.start >> PAGE_SHIFT;
1881 1882 1883
	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;

1884
	BUG_ON(!i915_is_ggtt(vma->vm));
1885
	intel_gtt_insert_sg_entries(vma->ggtt_view.pages, entry, flags);
1886
	vma->bound = GLOBAL_BIND;
1887 1888
}

1889
static void i915_ggtt_clear_range(struct i915_address_space *vm,
1890 1891
				  uint64_t start,
				  uint64_t length,
1892
				  bool unused)
1893
{
1894 1895
	unsigned first_entry = start >> PAGE_SHIFT;
	unsigned num_entries = length >> PAGE_SHIFT;
1896 1897 1898
	intel_gtt_clear_range(first_entry, num_entries);
}

1899 1900 1901 1902
static void i915_ggtt_unbind_vma(struct i915_vma *vma)
{
	const unsigned int first = vma->node.start >> PAGE_SHIFT;
	const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
1903

1904
	BUG_ON(!i915_is_ggtt(vma->vm));
1905
	vma->bound = 0;
1906 1907
	intel_gtt_clear_range(first, size);
}
1908

1909 1910 1911
static void ggtt_bind_vma(struct i915_vma *vma,
			  enum i915_cache_level cache_level,
			  u32 flags)
1912
{
1913
	struct drm_device *dev = vma->vm->dev;
1914
	struct drm_i915_private *dev_priv = dev->dev_private;
1915
	struct drm_i915_gem_object *obj = vma->obj;
1916
	struct sg_table *pages = obj->pages;
1917

1918 1919 1920 1921
	/* Currently applicable only to VLV */
	if (obj->gt_ro)
		flags |= PTE_READ_ONLY;

1922 1923 1924
	if (i915_is_ggtt(vma->vm))
		pages = vma->ggtt_view.pages;

1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936
	/* If there is no aliasing PPGTT, or the caller needs a global mapping,
	 * or we have a global mapping already but the cacheability flags have
	 * changed, set the global PTEs.
	 *
	 * If there is an aliasing PPGTT it is anecdotally faster, so use that
	 * instead if none of the above hold true.
	 *
	 * NB: A global mapping should only be needed for special regions like
	 * "gtt mappable", SNB errata, or if specified via special execbuf
	 * flags. At all other times, the GPU will use the aliasing PPGTT.
	 */
	if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
1937
		if (!(vma->bound & GLOBAL_BIND) ||
1938
		    (cache_level != obj->cache_level)) {
1939
			vma->vm->insert_entries(vma->vm, pages,
1940
						vma->node.start,
1941
						cache_level, flags);
1942
			vma->bound |= GLOBAL_BIND;
1943 1944
		}
	}
1945

1946
	if (dev_priv->mm.aliasing_ppgtt &&
1947
	    (!(vma->bound & LOCAL_BIND) ||
1948 1949
	     (cache_level != obj->cache_level))) {
		struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
1950
		appgtt->base.insert_entries(&appgtt->base, pages,
1951
					    vma->node.start,
1952
					    cache_level, flags);
1953
		vma->bound |= LOCAL_BIND;
1954
	}
1955 1956
}

1957
static void ggtt_unbind_vma(struct i915_vma *vma)
1958
{
1959
	struct drm_device *dev = vma->vm->dev;
1960
	struct drm_i915_private *dev_priv = dev->dev_private;
1961 1962
	struct drm_i915_gem_object *obj = vma->obj;

1963
	if (vma->bound & GLOBAL_BIND) {
1964 1965 1966
		vma->vm->clear_range(vma->vm,
				     vma->node.start,
				     obj->base.size,
1967
				     true);
1968
		vma->bound &= ~GLOBAL_BIND;
1969
	}
1970

1971
	if (vma->bound & LOCAL_BIND) {
1972 1973
		struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
		appgtt->base.clear_range(&appgtt->base,
1974 1975
					 vma->node.start,
					 obj->base.size,
1976
					 true);
1977
		vma->bound &= ~LOCAL_BIND;
1978
	}
1979 1980 1981
}

void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
1982
{
B
Ben Widawsky 已提交
1983 1984 1985 1986 1987 1988
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	bool interruptible;

	interruptible = do_idling(dev_priv);

1989 1990 1991 1992
	if (!obj->has_dma_mapping)
		dma_unmap_sg(&dev->pdev->dev,
			     obj->pages->sgl, obj->pages->nents,
			     PCI_DMA_BIDIRECTIONAL);
B
Ben Widawsky 已提交
1993 1994

	undo_idling(dev_priv, interruptible);
1995
}
1996

1997 1998
static void i915_gtt_color_adjust(struct drm_mm_node *node,
				  unsigned long color,
1999 2000
				  u64 *start,
				  u64 *end)
2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012
{
	if (node->color != color)
		*start += 4096;

	if (!list_empty(&node->node_list)) {
		node = list_entry(node->node_list.next,
				  struct drm_mm_node,
				  node_list);
		if (node->allocated && node->color != color)
			*end -= 4096;
	}
}
B
Ben Widawsky 已提交
2013

D
Daniel Vetter 已提交
2014 2015 2016 2017
static int i915_gem_setup_global_gtt(struct drm_device *dev,
				     unsigned long start,
				     unsigned long mappable_end,
				     unsigned long end)
2018
{
2019 2020 2021 2022 2023 2024 2025 2026 2027
	/* Let GEM Manage all of the aperture.
	 *
	 * However, leave one page at the end still bound to the scratch page.
	 * There are a number of places where the hardware apparently prefetches
	 * past the end of the object, and we've seen multiple hangs with the
	 * GPU head pointer stuck in a batchbuffer bound at the last page of the
	 * aperture.  One page should be enough to keep any prefetching inside
	 * of the aperture.
	 */
2028 2029
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
2030 2031 2032
	struct drm_mm_node *entry;
	struct drm_i915_gem_object *obj;
	unsigned long hole_start, hole_end;
2033
	int ret;
2034

2035 2036
	BUG_ON(mappable_end > end);

2037
	/* Subtract the guard page ... */
2038
	drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
2039 2040 2041 2042 2043 2044 2045 2046 2047 2048

	dev_priv->gtt.base.start = start;
	dev_priv->gtt.base.total = end - start;

	if (intel_vgpu_active(dev)) {
		ret = intel_vgt_balloon(dev);
		if (ret)
			return ret;
	}

2049
	if (!HAS_LLC(dev))
2050
		dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
2051

2052
	/* Mark any preallocated objects as occupied */
2053
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
2054
		struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
2055

B
Ben Widawsky 已提交
2056
		DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
2057 2058 2059
			      i915_gem_obj_ggtt_offset(obj), obj->base.size);

		WARN_ON(i915_gem_obj_ggtt_bound(obj));
2060
		ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
2061 2062 2063 2064
		if (ret) {
			DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
			return ret;
		}
2065
		vma->bound |= GLOBAL_BIND;
2066 2067 2068
	}

	/* Clear any non-preallocated blocks */
2069
	drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
2070 2071
		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
			      hole_start, hole_end);
2072 2073
		ggtt_vm->clear_range(ggtt_vm, hole_start,
				     hole_end - hole_start, true);
2074 2075 2076
	}

	/* And finally clear the reserved guard page */
2077
	ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
2078

2079 2080 2081 2082 2083 2084 2085
	if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
		struct i915_hw_ppgtt *ppgtt;

		ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
		if (!ppgtt)
			return -ENOMEM;

2086 2087 2088
		ret = __hw_ppgtt_init(dev, ppgtt, true);
		if (ret) {
			kfree(ppgtt);
2089
			return ret;
2090
		}
2091 2092 2093 2094

		dev_priv->mm.aliasing_ppgtt = ppgtt;
	}

2095
	return 0;
2096 2097
}

2098 2099 2100 2101 2102
void i915_gem_init_global_gtt(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned long gtt_size, mappable_size;

2103
	gtt_size = dev_priv->gtt.base.total;
2104
	mappable_size = dev_priv->gtt.mappable_end;
2105

2106
	i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
2107 2108
}

2109 2110 2111 2112 2113
void i915_global_gtt_cleanup(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_address_space *vm = &dev_priv->gtt.base;

2114 2115 2116 2117 2118 2119
	if (dev_priv->mm.aliasing_ppgtt) {
		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;

		ppgtt->base.cleanup(&ppgtt->base);
	}

2120
	if (drm_mm_initialized(&vm->mm)) {
2121 2122 2123
		if (intel_vgpu_active(dev))
			intel_vgt_deballoon();

2124 2125 2126 2127 2128 2129
		drm_mm_takedown(&vm->mm);
		list_del(&vm->global_link);
	}

	vm->cleanup(vm);
}
2130

2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149
static int setup_scratch_page(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct page *page;
	dma_addr_t dma_addr;

	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
	if (page == NULL)
		return -ENOMEM;
	set_pages_uc(page, 1);

#ifdef CONFIG_INTEL_IOMMU
	dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
				PCI_DMA_BIDIRECTIONAL);
	if (pci_dma_mapping_error(dev->pdev, dma_addr))
		return -EINVAL;
#else
	dma_addr = page_to_phys(page);
#endif
2150 2151
	dev_priv->gtt.base.scratch.page = page;
	dev_priv->gtt.base.scratch.addr = dma_addr;
2152 2153 2154 2155 2156 2157 2158

	return 0;
}

static void teardown_scratch_page(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
2159 2160 2161 2162
	struct page *page = dev_priv->gtt.base.scratch.page;

	set_pages_wb(page, 1);
	pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
2163
		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
2164
	__free_page(page);
2165 2166 2167 2168 2169 2170 2171 2172 2173
}

static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
{
	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
	snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
	return snb_gmch_ctl << 20;
}

2174 2175 2176 2177 2178 2179
static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
{
	bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
	bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
	if (bdw_gmch_ctl)
		bdw_gmch_ctl = 1 << bdw_gmch_ctl;
2180 2181 2182 2183 2184 2185 2186

#ifdef CONFIG_X86_32
	/* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
	if (bdw_gmch_ctl > 4)
		bdw_gmch_ctl = 4;
#endif

2187 2188 2189
	return bdw_gmch_ctl << 20;
}

2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200
static inline unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
{
	gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
	gmch_ctrl &= SNB_GMCH_GGMS_MASK;

	if (gmch_ctrl)
		return 1 << (20 + gmch_ctrl);

	return 0;
}

2201
static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
2202 2203 2204 2205 2206 2207
{
	snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
	snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
	return snb_gmch_ctl << 25; /* 32 MB units */
}

2208 2209 2210 2211 2212 2213 2214
static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
{
	bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
	bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
	return bdw_gmch_ctl << 25; /* 32 MB units */
}

2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232
static size_t chv_get_stolen_size(u16 gmch_ctrl)
{
	gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
	gmch_ctrl &= SNB_GMCH_GMS_MASK;

	/*
	 * 0x0  to 0x10: 32MB increments starting at 0MB
	 * 0x11 to 0x16: 4MB increments starting at 8MB
	 * 0x17 to 0x1d: 4MB increments start at 36MB
	 */
	if (gmch_ctrl < 0x11)
		return gmch_ctrl << 25;
	else if (gmch_ctrl < 0x17)
		return (gmch_ctrl - 0x11 + 2) << 22;
	else
		return (gmch_ctrl - 0x17 + 9) << 22;
}

2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244
static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
{
	gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
	gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;

	if (gen9_gmch_ctl < 0xf0)
		return gen9_gmch_ctl << 25; /* 32 MB units */
	else
		/* 4MB increments starting at 0xf0 for 4MB */
		return (gen9_gmch_ctl - 0xf0 + 1) << 22;
}

B
Ben Widawsky 已提交
2245 2246 2247 2248
static int ggtt_probe_common(struct drm_device *dev,
			     size_t gtt_size)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
2249
	phys_addr_t gtt_phys_addr;
B
Ben Widawsky 已提交
2250 2251 2252
	int ret;

	/* For Modern GENs the PTEs and register space are split in the BAR */
2253
	gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
B
Ben Widawsky 已提交
2254 2255
		(pci_resource_len(dev->pdev, 0) / 2);

2256
	dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
B
Ben Widawsky 已提交
2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271
	if (!dev_priv->gtt.gsm) {
		DRM_ERROR("Failed to map the gtt page table\n");
		return -ENOMEM;
	}

	ret = setup_scratch_page(dev);
	if (ret) {
		DRM_ERROR("Scratch setup failed\n");
		/* iounmap will also get called at remove, but meh */
		iounmap(dev_priv->gtt.gsm);
	}

	return ret;
}

B
Ben Widawsky 已提交
2272 2273 2274
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
 * bits. When using advanced contexts each context stores its own PAT, but
 * writing this data shouldn't be harmful even in those cases. */
2275
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
B
Ben Widawsky 已提交
2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287
{
	uint64_t pat;

	pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC)     | /* for normal objects, no eLLC */
	      GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
	      GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
	      GEN8_PPAT(3, GEN8_PPAT_UC)                     | /* Uncached objects, mostly for scanout */
	      GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
	      GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
	      GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
	      GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));

2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303
	if (!USES_PPGTT(dev_priv->dev))
		/* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
		 * so RTL will always use the value corresponding to
		 * pat_sel = 000".
		 * So let's disable cache for GGTT to avoid screen corruptions.
		 * MOCS still can be used though.
		 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
		 * before this patch, i.e. the same uncached + snooping access
		 * like on gen6/7 seems to be in effect.
		 * - So this just fixes blitter/render access. Again it looks
		 * like it's not just uncached access, but uncached + snooping.
		 * So we can still hold onto all our assumptions wrt cpu
		 * clflushing on LLC machines.
		 */
		pat = GEN8_PPAT(0, GEN8_PPAT_UC);

B
Ben Widawsky 已提交
2304 2305 2306 2307 2308 2309
	/* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
	 * write would work. */
	I915_WRITE(GEN8_PRIVATE_PAT, pat);
	I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
}

2310 2311 2312 2313 2314 2315 2316 2317 2318 2319
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
{
	uint64_t pat;

	/*
	 * Map WB on BDW to snooped on CHV.
	 *
	 * Only the snoop bit has meaning for CHV, the rest is
	 * ignored.
	 *
2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330
	 * The hardware will never snoop for certain types of accesses:
	 * - CPU GTT (GMADR->GGTT->no snoop->memory)
	 * - PPGTT page tables
	 * - some other special cycles
	 *
	 * As with BDW, we also need to consider the following for GT accesses:
	 * "For GGTT, there is NO pat_sel[2:0] from the entry,
	 * so RTL will always use the value corresponding to
	 * pat_sel = 000".
	 * Which means we must set the snoop bit in PAT entry 0
	 * in order to keep the global status page working.
2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344
	 */
	pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
	      GEN8_PPAT(1, 0) |
	      GEN8_PPAT(2, 0) |
	      GEN8_PPAT(3, 0) |
	      GEN8_PPAT(4, CHV_PPAT_SNOOP) |
	      GEN8_PPAT(5, CHV_PPAT_SNOOP) |
	      GEN8_PPAT(6, CHV_PPAT_SNOOP) |
	      GEN8_PPAT(7, CHV_PPAT_SNOOP);

	I915_WRITE(GEN8_PRIVATE_PAT, pat);
	I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
}

B
Ben Widawsky 已提交
2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364
static int gen8_gmch_probe(struct drm_device *dev,
			   size_t *gtt_total,
			   size_t *stolen,
			   phys_addr_t *mappable_base,
			   unsigned long *mappable_end)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned int gtt_size;
	u16 snb_gmch_ctl;
	int ret;

	/* TODO: We're not aware of mappable constraints on gen8 yet */
	*mappable_base = pci_resource_start(dev->pdev, 2);
	*mappable_end = pci_resource_len(dev->pdev, 2);

	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));

	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);

2365 2366 2367 2368
	if (INTEL_INFO(dev)->gen >= 9) {
		*stolen = gen9_get_stolen_size(snb_gmch_ctl);
		gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
	} else if (IS_CHERRYVIEW(dev)) {
2369 2370 2371 2372 2373 2374
		*stolen = chv_get_stolen_size(snb_gmch_ctl);
		gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
	} else {
		*stolen = gen8_get_stolen_size(snb_gmch_ctl);
		gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
	}
B
Ben Widawsky 已提交
2375

2376
	*gtt_total = (gtt_size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
B
Ben Widawsky 已提交
2377

2378 2379 2380 2381
	if (IS_CHERRYVIEW(dev))
		chv_setup_private_ppat(dev_priv);
	else
		bdw_setup_private_ppat(dev_priv);
B
Ben Widawsky 已提交
2382

B
Ben Widawsky 已提交
2383 2384
	ret = ggtt_probe_common(dev, gtt_size);

B
Ben Widawsky 已提交
2385 2386
	dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
	dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
B
Ben Widawsky 已提交
2387 2388 2389 2390

	return ret;
}

2391 2392
static int gen6_gmch_probe(struct drm_device *dev,
			   size_t *gtt_total,
2393 2394 2395
			   size_t *stolen,
			   phys_addr_t *mappable_base,
			   unsigned long *mappable_end)
2396 2397
{
	struct drm_i915_private *dev_priv = dev->dev_private;
2398
	unsigned int gtt_size;
2399 2400 2401
	u16 snb_gmch_ctl;
	int ret;

2402 2403 2404
	*mappable_base = pci_resource_start(dev->pdev, 2);
	*mappable_end = pci_resource_len(dev->pdev, 2);

2405 2406
	/* 64/512MB is the current min/max we actually know of, but this is just
	 * a coarse sanity check.
2407
	 */
2408
	if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
2409 2410 2411
		DRM_ERROR("Unknown GMADR size (%lx)\n",
			  dev_priv->gtt.mappable_end);
		return -ENXIO;
2412 2413 2414 2415 2416 2417
	}

	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);

2418
	*stolen = gen6_get_stolen_size(snb_gmch_ctl);
2419

B
Ben Widawsky 已提交
2420
	gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
2421
	*gtt_total = (gtt_size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
2422

B
Ben Widawsky 已提交
2423
	ret = ggtt_probe_common(dev, gtt_size);
2424

2425 2426
	dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
	dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
2427

2428 2429 2430
	return ret;
}

2431
static void gen6_gmch_remove(struct i915_address_space *vm)
2432
{
2433 2434

	struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
2435

2436 2437
	iounmap(gtt->gsm);
	teardown_scratch_page(vm->dev);
2438
}
2439 2440 2441

static int i915_gmch_probe(struct drm_device *dev,
			   size_t *gtt_total,
2442 2443 2444
			   size_t *stolen,
			   phys_addr_t *mappable_base,
			   unsigned long *mappable_end)
2445 2446 2447 2448 2449 2450 2451 2452 2453 2454
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
	if (!ret) {
		DRM_ERROR("failed to set up gmch\n");
		return -EIO;
	}

2455
	intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
2456 2457

	dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
2458
	dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
2459

2460 2461 2462
	if (unlikely(dev_priv->gtt.do_idle_maps))
		DRM_INFO("applying Ironlake quirks for intel_iommu\n");

2463 2464 2465
	return 0;
}

2466
static void i915_gmch_remove(struct i915_address_space *vm)
2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477
{
	intel_gmch_remove();
}

int i915_gem_gtt_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_gtt *gtt = &dev_priv->gtt;
	int ret;

	if (INTEL_INFO(dev)->gen <= 5) {
2478
		gtt->gtt_probe = i915_gmch_probe;
2479
		gtt->base.cleanup = i915_gmch_remove;
B
Ben Widawsky 已提交
2480
	} else if (INTEL_INFO(dev)->gen < 8) {
2481
		gtt->gtt_probe = gen6_gmch_probe;
2482
		gtt->base.cleanup = gen6_gmch_remove;
2483
		if (IS_HASWELL(dev) && dev_priv->ellc_size)
2484
			gtt->base.pte_encode = iris_pte_encode;
2485
		else if (IS_HASWELL(dev))
2486
			gtt->base.pte_encode = hsw_pte_encode;
2487
		else if (IS_VALLEYVIEW(dev))
2488
			gtt->base.pte_encode = byt_pte_encode;
2489 2490
		else if (INTEL_INFO(dev)->gen >= 7)
			gtt->base.pte_encode = ivb_pte_encode;
2491
		else
2492
			gtt->base.pte_encode = snb_pte_encode;
B
Ben Widawsky 已提交
2493 2494 2495
	} else {
		dev_priv->gtt.gtt_probe = gen8_gmch_probe;
		dev_priv->gtt.base.cleanup = gen6_gmch_remove;
2496 2497
	}

2498
	ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
2499
			     &gtt->mappable_base, &gtt->mappable_end);
2500
	if (ret)
2501 2502
		return ret;

2503 2504
	gtt->base.dev = dev;

2505
	/* GMADR is the PCI mmio aperture into the global GTT. */
2506 2507
	DRM_INFO("Memory usable by graphics device = %zdM\n",
		 gtt->base.total >> 20);
2508 2509
	DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
	DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
2510 2511 2512 2513
#ifdef CONFIG_INTEL_IOMMU
	if (intel_iommu_gfx_mapped)
		DRM_INFO("VT-d active for gfx access\n");
#endif
2514 2515 2516 2517 2518 2519 2520 2521
	/*
	 * i915.enable_ppgtt is read-only, so do an early pass to validate the
	 * user's requested state against the hardware/driver capabilities.  We
	 * do this now so that we can print out any log messages once rather
	 * than every time we check intel_enable_ppgtt().
	 */
	i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
	DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
2522 2523 2524

	return 0;
}
2525

2526 2527 2528 2529
static struct i915_vma *
__i915_gem_vma_create(struct drm_i915_gem_object *obj,
		      struct i915_address_space *vm,
		      const struct i915_ggtt_view *ggtt_view)
2530
{
2531
	struct i915_vma *vma;
2532

2533 2534
	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
		return ERR_PTR(-EINVAL);
2535 2536 2537
	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
	if (vma == NULL)
		return ERR_PTR(-ENOMEM);
2538

2539 2540 2541 2542 2543 2544
	INIT_LIST_HEAD(&vma->vma_link);
	INIT_LIST_HEAD(&vma->mm_list);
	INIT_LIST_HEAD(&vma->exec_list);
	vma->vm = vm;
	vma->obj = obj;

R
Rodrigo Vivi 已提交
2545
	if (INTEL_INFO(vm->dev)->gen >= 6) {
2546
		if (i915_is_ggtt(vm)) {
2547 2548
			vma->ggtt_view = *ggtt_view;

2549 2550 2551 2552 2553 2554
			vma->unbind_vma = ggtt_unbind_vma;
			vma->bind_vma = ggtt_bind_vma;
		} else {
			vma->unbind_vma = ppgtt_unbind_vma;
			vma->bind_vma = ppgtt_bind_vma;
		}
R
Rodrigo Vivi 已提交
2555
	} else {
2556
		BUG_ON(!i915_is_ggtt(vm));
2557
		vma->ggtt_view = *ggtt_view;
2558 2559 2560 2561
		vma->unbind_vma = i915_ggtt_unbind_vma;
		vma->bind_vma = i915_ggtt_bind_vma;
	}

2562 2563
	list_add_tail(&vma->vma_link, &obj->vma_list);
	if (!i915_is_ggtt(vm))
2564
		i915_ppgtt_get(i915_vm_to_ppgtt(vm));
2565 2566 2567 2568 2569

	return vma;
}

struct i915_vma *
2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
				  struct i915_address_space *vm)
{
	struct i915_vma *vma;

	vma = i915_gem_obj_to_vma(obj, vm);
	if (!vma)
		vma = __i915_gem_vma_create(obj, vm,
					    i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL);

	return vma;
}

struct i915_vma *
i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
2585
				       const struct i915_ggtt_view *view)
2586
{
2587
	struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
2588 2589
	struct i915_vma *vma;

2590 2591 2592 2593 2594 2595 2596 2597
	if (WARN_ON(!view))
		return ERR_PTR(-EINVAL);

	vma = i915_gem_obj_to_ggtt_view(obj, view);

	if (IS_ERR(vma))
		return vma;

2598
	if (!vma)
2599
		vma = __i915_gem_vma_create(obj, ggtt, view);
2600 2601

	return vma;
2602

2603
}
2604

2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644
static void
rotate_pages(dma_addr_t *in, unsigned int width, unsigned int height,
	     struct sg_table *st)
{
	unsigned int column, row;
	unsigned int src_idx;
	struct scatterlist *sg = st->sgl;

	st->nents = 0;

	for (column = 0; column < width; column++) {
		src_idx = width * (height - 1) + column;
		for (row = 0; row < height; row++) {
			st->nents++;
			/* We don't need the pages, but need to initialize
			 * the entries so the sg list can be happily traversed.
			 * The only thing we need are DMA addresses.
			 */
			sg_set_page(sg, NULL, PAGE_SIZE, 0);
			sg_dma_address(sg) = in[src_idx];
			sg_dma_len(sg) = PAGE_SIZE;
			sg = sg_next(sg);
			src_idx -= width;
		}
	}
}

static struct sg_table *
intel_rotate_fb_obj_pages(struct i915_ggtt_view *ggtt_view,
			  struct drm_i915_gem_object *obj)
{
	struct drm_device *dev = obj->base.dev;
	struct intel_rotation_info *rot_info = &ggtt_view->rotation_info;
	unsigned long size, pages, rot_pages;
	struct sg_page_iter sg_iter;
	unsigned long i;
	dma_addr_t *page_addr_list;
	struct sg_table *st;
	unsigned int tile_pitch, tile_height;
	unsigned int width_pages, height_pages;
2645
	int ret = -ENOMEM;
2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703

	pages = obj->base.size / PAGE_SIZE;

	/* Calculate tiling geometry. */
	tile_height = intel_tile_height(dev, rot_info->pixel_format,
					rot_info->fb_modifier);
	tile_pitch = PAGE_SIZE / tile_height;
	width_pages = DIV_ROUND_UP(rot_info->pitch, tile_pitch);
	height_pages = DIV_ROUND_UP(rot_info->height, tile_height);
	rot_pages = width_pages * height_pages;
	size = rot_pages * PAGE_SIZE;

	/* Allocate a temporary list of source pages for random access. */
	page_addr_list = drm_malloc_ab(pages, sizeof(dma_addr_t));
	if (!page_addr_list)
		return ERR_PTR(ret);

	/* Allocate target SG list. */
	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (!st)
		goto err_st_alloc;

	ret = sg_alloc_table(st, rot_pages, GFP_KERNEL);
	if (ret)
		goto err_sg_alloc;

	/* Populate source page list from the object. */
	i = 0;
	for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
		page_addr_list[i] = sg_page_iter_dma_address(&sg_iter);
		i++;
	}

	/* Rotate the pages. */
	rotate_pages(page_addr_list, width_pages, height_pages, st);

	DRM_DEBUG_KMS(
		      "Created rotated page mapping for object size %lu (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages).\n",
		      size, rot_info->pitch, rot_info->height,
		      rot_info->pixel_format, width_pages, height_pages,
		      rot_pages);

	drm_free_large(page_addr_list);

	return st;

err_sg_alloc:
	kfree(st);
err_st_alloc:
	drm_free_large(page_addr_list);

	DRM_DEBUG_KMS(
		      "Failed to create rotated mapping for object size %lu! (%d) (pitch=%u, height=%u, pixel_format=0x%x, %ux%u tiles, %lu pages)\n",
		      size, ret, rot_info->pitch, rot_info->height,
		      rot_info->pixel_format, width_pages, height_pages,
		      rot_pages);
	return ERR_PTR(ret);
}
2704

2705 2706
static inline int
i915_get_ggtt_vma_pages(struct i915_vma *vma)
2707
{
2708 2709
	int ret = 0;

2710 2711 2712 2713 2714
	if (vma->ggtt_view.pages)
		return 0;

	if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
		vma->ggtt_view.pages = vma->obj->pages;
2715 2716 2717
	else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
		vma->ggtt_view.pages =
			intel_rotate_fb_obj_pages(&vma->ggtt_view, vma->obj);
2718 2719 2720 2721 2722
	else
		WARN_ONCE(1, "GGTT view %u not implemented!\n",
			  vma->ggtt_view.type);

	if (!vma->ggtt_view.pages) {
2723
		DRM_ERROR("Failed to get pages for GGTT view type %u!\n",
2724
			  vma->ggtt_view.type);
2725 2726 2727 2728 2729 2730
		ret = -EINVAL;
	} else if (IS_ERR(vma->ggtt_view.pages)) {
		ret = PTR_ERR(vma->ggtt_view.pages);
		vma->ggtt_view.pages = NULL;
		DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
			  vma->ggtt_view.type, ret);
2731 2732
	}

2733
	return ret;
2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2748
}

/**
 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
 * @vma: VMA to map
 * @cache_level: mapping cache level
 * @flags: flags like global or local mapping
 *
 * DMA addresses are taken from the scatter-gather table of this object (or of
 * this VMA in case of non-default GGTT views) and PTE entries set up.
 * Note that DMA addresses are also the only part of the SG table we care about.
 */
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
		  u32 flags)
{
2749 2750
	if (i915_is_ggtt(vma->vm)) {
		int ret = i915_get_ggtt_vma_pages(vma);
2751

2752 2753 2754
		if (ret)
			return ret;
	}
2755 2756 2757 2758 2759

	vma->bind_vma(vma, cache_level, flags);

	return 0;
}