i915_gem_gtt.c 62.1 KB
Newer Older
1 2
/*
 * Copyright © 2010 Daniel Vetter
3
 * Copyright © 2011-2014 Intel Corporation
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

26
#include <linux/seq_file.h>
27 28
#include <drm/drmP.h>
#include <drm/i915_drm.h>
29
#include "i915_drv.h"
30
#include "i915_vgpu.h"
31 32 33
#include "i915_trace.h"
#include "intel_drv.h"

34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93
/**
 * DOC: Global GTT views
 *
 * Background and previous state
 *
 * Historically objects could exists (be bound) in global GTT space only as
 * singular instances with a view representing all of the object's backing pages
 * in a linear fashion. This view will be called a normal view.
 *
 * To support multiple views of the same object, where the number of mapped
 * pages is not equal to the backing store, or where the layout of the pages
 * is not linear, concept of a GGTT view was added.
 *
 * One example of an alternative view is a stereo display driven by a single
 * image. In this case we would have a framebuffer looking like this
 * (2x2 pages):
 *
 *    12
 *    34
 *
 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
 * rendering. In contrast, fed to the display engine would be an alternative
 * view which could look something like this:
 *
 *   1212
 *   3434
 *
 * In this example both the size and layout of pages in the alternative view is
 * different from the normal view.
 *
 * Implementation and usage
 *
 * GGTT views are implemented using VMAs and are distinguished via enum
 * i915_ggtt_view_type and struct i915_ggtt_view.
 *
 * A new flavour of core GEM functions which work with GGTT bound objects were
 * added with the _view suffix. They take the struct i915_ggtt_view parameter
 * encapsulating all metadata required to implement a view.
 *
 * As a helper for callers which are only interested in the normal view,
 * globally const i915_ggtt_view_normal singleton instance exists. All old core
 * GEM API functions, the ones not taking the view parameter, are operating on,
 * or with the normal GGTT view.
 *
 * Code wanting to add or use a new GGTT view needs to:
 *
 * 1. Add a new enum with a suitable name.
 * 2. Extend the metadata in the i915_ggtt_view structure if required.
 * 3. Add support to i915_get_vma_pages().
 *
 * New views are required to build a scatter-gather table from within the
 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
 * exists for the lifetime of an VMA.
 *
 * Core API is designed to have copy semantics which means that passed in
 * struct i915_ggtt_view does not need to be persistent (left around after
 * calling the core API functions).
 *
 */

94 95
const struct i915_ggtt_view i915_ggtt_view_normal;

96 97
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
98

99 100
static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
{
101 102 103 104 105 106
	bool has_aliasing_ppgtt;
	bool has_full_ppgtt;

	has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
	has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;

107 108 109
	if (intel_vgpu_active(dev))
		has_full_ppgtt = false; /* emulation is too hard */

110 111 112 113 114 115
	/*
	 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
	 * execlists, the sole mechanism available to submit work.
	 */
	if (INTEL_INFO(dev)->gen < 9 &&
	    (enable_ppgtt == 0 || !has_aliasing_ppgtt))
116 117 118 119 120
		return 0;

	if (enable_ppgtt == 1)
		return 1;

121
	if (enable_ppgtt == 2 && has_full_ppgtt)
122 123
		return 2;

124 125 126 127
#ifdef CONFIG_INTEL_IOMMU
	/* Disable ppgtt on SNB if VT-d is on. */
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
		DRM_INFO("Disabling PPGTT because VT-d is on\n");
128
		return 0;
129 130 131
	}
#endif

132
	/* Early VLV doesn't have this */
133 134
	if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
	    dev->pdev->revision < 0xb) {
135 136 137 138
		DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
		return 0;
	}

139 140 141 142
	if (INTEL_INFO(dev)->gen >= 8 && i915.enable_execlists)
		return 2;
	else
		return has_aliasing_ppgtt ? 1 : 0;
143 144
}

B
Ben Widawsky 已提交
145

146 147 148 149 150
static void ppgtt_bind_vma(struct i915_vma *vma,
			   enum i915_cache_level cache_level,
			   u32 flags);
static void ppgtt_unbind_vma(struct i915_vma *vma);

B
Ben Widawsky 已提交
151 152 153 154 155 156
static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
					     enum i915_cache_level level,
					     bool valid)
{
	gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
	pte |= addr;
157 158 159

	switch (level) {
	case I915_CACHE_NONE:
B
Ben Widawsky 已提交
160
		pte |= PPAT_UNCACHED_INDEX;
161 162 163 164 165 166 167 168 169
		break;
	case I915_CACHE_WT:
		pte |= PPAT_DISPLAY_ELLC_INDEX;
		break;
	default:
		pte |= PPAT_CACHED_INDEX;
		break;
	}

B
Ben Widawsky 已提交
170 171 172
	return pte;
}

B
Ben Widawsky 已提交
173 174 175 176 177 178 179 180 181 182 183 184 185
static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
					     dma_addr_t addr,
					     enum i915_cache_level level)
{
	gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
	pde |= addr;
	if (level != I915_CACHE_NONE)
		pde |= PPAT_CACHED_PDE_INDEX;
	else
		pde |= PPAT_UNCACHED_INDEX;
	return pde;
}

186
static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
187
				     enum i915_cache_level level,
188
				     bool valid, u32 unused)
189
{
190
	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
191
	pte |= GEN6_PTE_ADDR_ENCODE(addr);
192 193

	switch (level) {
194 195 196 197 198 199 200 201
	case I915_CACHE_L3_LLC:
	case I915_CACHE_LLC:
		pte |= GEN6_PTE_CACHE_LLC;
		break;
	case I915_CACHE_NONE:
		pte |= GEN6_PTE_UNCACHED;
		break;
	default:
202
		MISSING_CASE(level);
203 204 205 206 207 208
	}

	return pte;
}

static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
209
				     enum i915_cache_level level,
210
				     bool valid, u32 unused)
211
{
212
	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
213 214 215 216 217
	pte |= GEN6_PTE_ADDR_ENCODE(addr);

	switch (level) {
	case I915_CACHE_L3_LLC:
		pte |= GEN7_PTE_CACHE_L3_LLC;
218 219 220 221 222
		break;
	case I915_CACHE_LLC:
		pte |= GEN6_PTE_CACHE_LLC;
		break;
	case I915_CACHE_NONE:
223
		pte |= GEN6_PTE_UNCACHED;
224 225
		break;
	default:
226
		MISSING_CASE(level);
227 228
	}

229 230 231
	return pte;
}

232
static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
233
				     enum i915_cache_level level,
234
				     bool valid, u32 flags)
235
{
236
	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
237 238
	pte |= GEN6_PTE_ADDR_ENCODE(addr);

239 240
	if (!(flags & PTE_READ_ONLY))
		pte |= BYT_PTE_WRITEABLE;
241 242 243 244 245 246 247

	if (level != I915_CACHE_NONE)
		pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;

	return pte;
}

248
static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
249
				     enum i915_cache_level level,
250
				     bool valid, u32 unused)
251
{
252
	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
253
	pte |= HSW_PTE_ADDR_ENCODE(addr);
254 255

	if (level != I915_CACHE_NONE)
256
		pte |= HSW_WB_LLC_AGE3;
257 258 259 260

	return pte;
}

261
static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
262
				      enum i915_cache_level level,
263
				      bool valid, u32 unused)
264
{
265
	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
266 267
	pte |= HSW_PTE_ADDR_ENCODE(addr);

268 269 270 271
	switch (level) {
	case I915_CACHE_NONE:
		break;
	case I915_CACHE_WT:
272
		pte |= HSW_WT_ELLC_LLC_AGE3;
273 274
		break;
	default:
275
		pte |= HSW_WB_ELLC_LLC_AGE3;
276 277
		break;
	}
278 279 280 281

	return pte;
}

282
/* Broadwell Page Directory Pointer Descriptors */
283
static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
284
			   uint64_t val)
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304
{
	int ret;

	BUG_ON(entry >= 4);

	ret = intel_ring_begin(ring, 6);
	if (ret)
		return ret;

	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
	intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry));
	intel_ring_emit(ring, (u32)(val >> 32));
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
	intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry));
	intel_ring_emit(ring, (u32)(val));
	intel_ring_advance(ring);

	return 0;
}

305
static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
306
			  struct intel_engine_cs *ring)
307
{
308
	int i, ret;
309 310 311 312 313 314

	/* bit of a hack to find the actual last used pd */
	int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;

	for (i = used_pd - 1; i >= 0; i--) {
		dma_addr_t addr = ppgtt->pd_dma_addr[i];
315
		ret = gen8_write_pdp(ring, i, addr);
316 317
		if (ret)
			return ret;
318
	}
B
Ben Widawsky 已提交
319

320
	return 0;
321 322
}

323
static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
324 325
				   uint64_t start,
				   uint64_t length,
326 327 328 329 330
				   bool use_scratch)
{
	struct i915_hw_ppgtt *ppgtt =
		container_of(vm, struct i915_hw_ppgtt, base);
	gen8_gtt_pte_t *pt_vaddr, scratch_pte;
331 332 333
	unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
	unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
	unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
334
	unsigned num_entries = length >> PAGE_SHIFT;
335 336 337 338 339 340
	unsigned last_pte, i;

	scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
				      I915_CACHE_LLC, use_scratch);

	while (num_entries) {
B
Ben Widawsky 已提交
341 342
		struct i915_page_directory_entry *pd = &ppgtt->pdp.page_directory[pdpe];
		struct page *page_table = pd->page_table[pde].page;
343

344
		last_pte = pte + num_entries;
345 346 347 348 349
		if (last_pte > GEN8_PTES_PER_PAGE)
			last_pte = GEN8_PTES_PER_PAGE;

		pt_vaddr = kmap_atomic(page_table);

350
		for (i = pte; i < last_pte; i++) {
351
			pt_vaddr[i] = scratch_pte;
352 353
			num_entries--;
		}
354

355 356
		if (!HAS_LLC(ppgtt->base.dev))
			drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
357 358
		kunmap_atomic(pt_vaddr);

359 360 361 362 363
		pte = 0;
		if (++pde == GEN8_PDES_PER_PAGE) {
			pdpe++;
			pde = 0;
		}
364 365 366
	}
}

367 368
static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
				      struct sg_table *pages,
369
				      uint64_t start,
370
				      enum i915_cache_level cache_level, u32 unused)
371 372 373 374
{
	struct i915_hw_ppgtt *ppgtt =
		container_of(vm, struct i915_hw_ppgtt, base);
	gen8_gtt_pte_t *pt_vaddr;
375 376 377
	unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
	unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
	unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
378 379
	struct sg_page_iter sg_iter;

380
	pt_vaddr = NULL;
381

382
	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
383
		if (WARN_ON(pdpe >= GEN8_LEGACY_PDPES))
384 385
			break;

B
Ben Widawsky 已提交
386 387 388 389 390 391
		if (pt_vaddr == NULL) {
			struct i915_page_directory_entry *pd = &ppgtt->pdp.page_directory[pdpe];
			struct page *page_table = pd->page_table[pde].page;

			pt_vaddr = kmap_atomic(page_table);
		}
392

393
		pt_vaddr[pte] =
394 395
			gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
					cache_level, true);
396
		if (++pte == GEN8_PTES_PER_PAGE) {
397 398
			if (!HAS_LLC(ppgtt->base.dev))
				drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
399
			kunmap_atomic(pt_vaddr);
400
			pt_vaddr = NULL;
401 402 403 404 405
			if (++pde == GEN8_PDES_PER_PAGE) {
				pdpe++;
				pde = 0;
			}
			pte = 0;
406 407
		}
	}
408 409 410
	if (pt_vaddr) {
		if (!HAS_LLC(ppgtt->base.dev))
			drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
411
		kunmap_atomic(pt_vaddr);
412
	}
413 414
}

B
Ben Widawsky 已提交
415
static void gen8_free_page_tables(struct i915_page_directory_entry *pd)
416 417 418
{
	int i;

B
Ben Widawsky 已提交
419
	if (pd->page_table == NULL)
420 421 422
		return;

	for (i = 0; i < GEN8_PDES_PER_PAGE; i++)
B
Ben Widawsky 已提交
423 424
		if (pd->page_table[i].page)
			__free_page(pd->page_table[i].page);
425 426
}

B
Ben Widawsky 已提交
427 428 429 430 431 432 433 434
static void gen8_free_page_directory(struct i915_page_directory_entry *pd)
{
	gen8_free_page_tables(pd);
	kfree(pd->page_table);
	__free_page(pd->page);
}

static void gen8_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
435 436 437
{
	int i;

438
	for (i = 0; i < ppgtt->num_pd_pages; i++) {
B
Ben Widawsky 已提交
439
		gen8_free_page_directory(&ppgtt->pdp.page_directory[i]);
440
		kfree(ppgtt->gen8_pt_dma_addr[i]);
441
	}
442 443 444 445
}

static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
{
446
	struct pci_dev *hwdev = ppgtt->base.dev->pdev;
447 448 449 450 451 452 453 454
	int i, j;

	for (i = 0; i < ppgtt->num_pd_pages; i++) {
		/* TODO: In the future we'll support sparse mappings, so this
		 * will have to change. */
		if (!ppgtt->pd_dma_addr[i])
			continue;

455 456
		pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE,
			       PCI_DMA_BIDIRECTIONAL);
457 458 459 460

		for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
			dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
			if (addr)
461 462
				pci_unmap_page(hwdev, addr, PAGE_SIZE,
					       PCI_DMA_BIDIRECTIONAL);
463 464 465 466
		}
	}
}

B
Ben Widawsky 已提交
467 468 469 470 471
static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
	struct i915_hw_ppgtt *ppgtt =
		container_of(vm, struct i915_hw_ppgtt, base);

472 473
	gen8_ppgtt_unmap_pages(ppgtt);
	gen8_ppgtt_free(ppgtt);
B
Ben Widawsky 已提交
474 475
}

B
Ben Widawsky 已提交
476
static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt)
477 478 479
{
	int i;

B
Ben Widawsky 已提交
480 481 482 483 484 485
	for (i = 0; i < ppgtt->num_pd_pages; i++) {
		ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE,
						     sizeof(dma_addr_t),
						     GFP_KERNEL);
		if (!ppgtt->gen8_pt_dma_addr[i])
			return -ENOMEM;
486 487
	}

B
Ben Widawsky 已提交
488
	return 0;
489 490
}

B
Ben Widawsky 已提交
491
static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
492
{
B
Ben Widawsky 已提交
493
	int i, j;
494

B
Ben Widawsky 已提交
495 496 497 498 499 500 501
	for (i = 0; i < ppgtt->num_pd_pages; i++) {
		for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
			struct i915_page_table_entry *pt = &ppgtt->pdp.page_directory[i].page_table[j];

			pt->page = alloc_page(GFP_KERNEL | __GFP_ZERO);
			if (!pt->page)
				goto unwind_out;
502 503 504
		}
	}

505
	return 0;
506 507

unwind_out:
B
Ben Widawsky 已提交
508 509
	while (i--)
		gen8_free_page_tables(&ppgtt->pdp.page_directory[i]);
510

B
Ben Widawsky 已提交
511
	return -ENOMEM;
512 513
}

B
Ben Widawsky 已提交
514 515
static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
						const int max_pdp)
516 517 518
{
	int i;

B
Ben Widawsky 已提交
519 520
	for (i = 0; i < max_pdp; i++) {
		struct i915_page_table_entry *pt;
521

B
Ben Widawsky 已提交
522 523 524
		pt = kcalloc(GEN8_PDES_PER_PAGE, sizeof(*pt), GFP_KERNEL);
		if (!pt)
			goto unwind_out;
525

B
Ben Widawsky 已提交
526 527 528 529 530
		ppgtt->pdp.page_directory[i].page = alloc_page(GFP_KERNEL);
		if (!ppgtt->pdp.page_directory[i].page) {
			kfree(pt);
			goto unwind_out;
		}
531

B
Ben Widawsky 已提交
532 533 534 535
		ppgtt->pdp.page_directory[i].page_table = pt;
	}

	ppgtt->num_pd_pages = max_pdp;
536
	BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPES);
537 538

	return 0;
B
Ben Widawsky 已提交
539 540 541 542 543 544 545 546

unwind_out:
	while (i--) {
		kfree(ppgtt->pdp.page_directory[i].page_table);
		__free_page(ppgtt->pdp.page_directory[i].page);
	}

	return -ENOMEM;
547 548 549 550 551 552 553 554 555 556 557
}

static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
			    const int max_pdp)
{
	int ret;

	ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp);
	if (ret)
		return ret;

B
Ben Widawsky 已提交
558 559 560
	ret = gen8_ppgtt_allocate_page_tables(ppgtt);
	if (ret)
		goto err_out;
561 562 563 564 565

	ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;

	ret = gen8_ppgtt_allocate_dma(ppgtt);
	if (ret)
B
Ben Widawsky 已提交
566 567 568
		goto err_out;

	return 0;
569

B
Ben Widawsky 已提交
570 571
err_out:
	gen8_ppgtt_free(ppgtt);
572 573 574 575 576 577 578 579 580 581
	return ret;
}

static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
					     const int pd)
{
	dma_addr_t pd_addr;
	int ret;

	pd_addr = pci_map_page(ppgtt->base.dev->pdev,
B
Ben Widawsky 已提交
582
			       ppgtt->pdp.page_directory[pd].page, 0,
583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601
			       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);

	ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
	if (ret)
		return ret;

	ppgtt->pd_dma_addr[pd] = pd_addr;

	return 0;
}

static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
					const int pd,
					const int pt)
{
	dma_addr_t pt_addr;
	struct page *p;
	int ret;

B
Ben Widawsky 已提交
602
	p = ppgtt->pdp.page_directory[pd].page_table[pt].page;
603 604 605 606 607 608 609 610 611 612 613
	pt_addr = pci_map_page(ppgtt->base.dev->pdev,
			       p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
	ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr);
	if (ret)
		return ret;

	ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr;

	return 0;
}

B
Ben Widawsky 已提交
614
/**
615 616 617 618
 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
 * with a net effect resembling a 2-level page table in normal x86 terms. Each
 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
 * space.
B
Ben Widawsky 已提交
619
 *
620 621
 * FIXME: split allocation into smaller pieces. For now we only ever do this
 * once, but with full PPGTT, the multiple contiguous allocations will be bad.
B
Ben Widawsky 已提交
622
 * TODO: Do something with the size parameter
623
 */
B
Ben Widawsky 已提交
624 625 626
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
{
	const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
627
	const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
628
	int i, j, ret;
B
Ben Widawsky 已提交
629 630 631 632

	if (size % (1<<30))
		DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);

633 634 635 636
	/* 1. Do all our allocations for page directories and page tables. */
	ret = gen8_ppgtt_alloc(ppgtt, max_pdp);
	if (ret)
		return ret;
637

B
Ben Widawsky 已提交
638
	/*
639
	 * 2. Create DMA mappings for the page directories and page tables.
B
Ben Widawsky 已提交
640 641
	 */
	for (i = 0; i < max_pdp; i++) {
642
		ret = gen8_ppgtt_setup_page_directories(ppgtt, i);
643 644
		if (ret)
			goto bail;
B
Ben Widawsky 已提交
645 646

		for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
647
			ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j);
648 649
			if (ret)
				goto bail;
B
Ben Widawsky 已提交
650 651 652
		}
	}

653 654 655 656 657
	/*
	 * 3. Map all the page directory entires to point to the page tables
	 * we've allocated.
	 *
	 * For now, the PPGTT helper functions all require that the PDEs are
B
Ben Widawsky 已提交
658
	 * plugged in correctly. So we do that now/here. For aliasing PPGTT, we
659 660
	 * will never need to touch the PDEs again.
	 */
B
Ben Widawsky 已提交
661 662
	for (i = 0; i < max_pdp; i++) {
		gen8_ppgtt_pde_t *pd_vaddr;
B
Ben Widawsky 已提交
663
		pd_vaddr = kmap_atomic(ppgtt->pdp.page_directory[i].page);
B
Ben Widawsky 已提交
664 665 666 667 668
		for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
			dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
			pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
						      I915_CACHE_LLC);
		}
669 670
		if (!HAS_LLC(ppgtt->base.dev))
			drm_clflush_virt_range(pd_vaddr, PAGE_SIZE);
B
Ben Widawsky 已提交
671 672 673
		kunmap_atomic(pd_vaddr);
	}

674 675 676 677 678
	ppgtt->switch_mm = gen8_mm_switch;
	ppgtt->base.clear_range = gen8_ppgtt_clear_range;
	ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
	ppgtt->base.cleanup = gen8_ppgtt_cleanup;
	ppgtt->base.start = 0;
679
	ppgtt->base.total = ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE;
680

681
	ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
682

B
Ben Widawsky 已提交
683 684 685
	DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
			 ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
	DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
686 687
			 ppgtt->num_pd_entries,
			 (ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30));
B
Ben Widawsky 已提交
688
	return 0;
B
Ben Widawsky 已提交
689

690 691 692
bail:
	gen8_ppgtt_unmap_pages(ppgtt);
	gen8_ppgtt_free(ppgtt);
B
Ben Widawsky 已提交
693 694 695
	return ret;
}

B
Ben Widawsky 已提交
696 697 698 699 700 701 702 703 704
static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
	struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
	struct i915_address_space *vm = &ppgtt->base;
	gen6_gtt_pte_t __iomem *pd_addr;
	gen6_gtt_pte_t scratch_pte;
	uint32_t pd_entry;
	int pte, pde;

705
	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
B
Ben Widawsky 已提交
706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725

	pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
		ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);

	seq_printf(m, "  VM %p (pd_offset %x-%x):\n", vm,
		   ppgtt->pd_offset, ppgtt->pd_offset + ppgtt->num_pd_entries);
	for (pde = 0; pde < ppgtt->num_pd_entries; pde++) {
		u32 expected;
		gen6_gtt_pte_t *pt_vaddr;
		dma_addr_t pt_addr = ppgtt->pt_dma_addr[pde];
		pd_entry = readl(pd_addr + pde);
		expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);

		if (pd_entry != expected)
			seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
				   pde,
				   pd_entry,
				   expected);
		seq_printf(m, "\tPDE: %x\n", pd_entry);

B
Ben Widawsky 已提交
726
		pt_vaddr = kmap_atomic(ppgtt->pd.page_table[pde].page);
B
Ben Widawsky 已提交
727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751
		for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) {
			unsigned long va =
				(pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) +
				(pte * PAGE_SIZE);
			int i;
			bool found = false;
			for (i = 0; i < 4; i++)
				if (pt_vaddr[pte + i] != scratch_pte)
					found = true;
			if (!found)
				continue;

			seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
			for (i = 0; i < 4; i++) {
				if (pt_vaddr[pte + i] != scratch_pte)
					seq_printf(m, " %08x", pt_vaddr[pte + i]);
				else
					seq_puts(m, "  SCRATCH ");
			}
			seq_puts(m, "\n");
		}
		kunmap_atomic(pt_vaddr);
	}
}

B
Ben Widawsky 已提交
752
static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
B
Ben Widawsky 已提交
753
{
754
	struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
B
Ben Widawsky 已提交
755 756 757 758
	gen6_gtt_pte_t __iomem *pd_addr;
	uint32_t pd_entry;
	int i;

B
Ben Widawsky 已提交
759
	WARN_ON(ppgtt->pd_offset & 0x3f);
B
Ben Widawsky 已提交
760 761 762 763 764 765 766 767 768 769 770 771
	pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
		ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
		dma_addr_t pt_addr;

		pt_addr = ppgtt->pt_dma_addr[i];
		pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
		pd_entry |= GEN6_PDE_VALID;

		writel(pd_entry, pd_addr + i);
	}
	readl(pd_addr);
B
Ben Widawsky 已提交
772 773
}

774
static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
B
Ben Widawsky 已提交
775
{
776 777 778 779 780
	BUG_ON(ppgtt->pd_offset & 0x3f);

	return (ppgtt->pd_offset / 64) << 16;
}

781
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
782
			 struct intel_engine_cs *ring)
783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
{
	int ret;

	/* NB: TLBs must be flushed and invalidated before a switch */
	ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
	if (ret)
		return ret;

	ret = intel_ring_begin(ring, 6);
	if (ret)
		return ret;

	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
	intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
	intel_ring_emit(ring, PP_DIR_DCLV_2G);
	intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
	intel_ring_emit(ring, get_pd_offset(ppgtt));
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_advance(ring);

	return 0;
}

806 807 808 809 810 811 812 813 814 815
static int vgpu_mm_switch(struct i915_hw_ppgtt *ppgtt,
			  struct intel_engine_cs *ring)
{
	struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);

	I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
	I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
	return 0;
}

816
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
817
			  struct intel_engine_cs *ring)
818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837
{
	int ret;

	/* NB: TLBs must be flushed and invalidated before a switch */
	ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
	if (ret)
		return ret;

	ret = intel_ring_begin(ring, 6);
	if (ret)
		return ret;

	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
	intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
	intel_ring_emit(ring, PP_DIR_DCLV_2G);
	intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
	intel_ring_emit(ring, get_pd_offset(ppgtt));
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_advance(ring);

838 839 840 841 842 843 844
	/* XXX: RCS is the only one to auto invalidate the TLBs? */
	if (ring->id != RCS) {
		ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
		if (ret)
			return ret;
	}

845 846 847
	return 0;
}

848
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
849
			  struct intel_engine_cs *ring)
850 851 852 853
{
	struct drm_device *dev = ppgtt->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;

854

855 856 857 858 859 860 861 862
	I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
	I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));

	POSTING_READ(RING_PP_DIR_DCLV(ring));

	return 0;
}

863
static void gen8_ppgtt_enable(struct drm_device *dev)
864 865
{
	struct drm_i915_private *dev_priv = dev->dev_private;
866
	struct intel_engine_cs *ring;
867
	int j;
B
Ben Widawsky 已提交
868

869 870 871 872 873
	for_each_ring(ring, dev_priv, j) {
		I915_WRITE(RING_MODE_GEN7(ring),
			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
	}
}
B
Ben Widawsky 已提交
874

875
static void gen7_ppgtt_enable(struct drm_device *dev)
B
Ben Widawsky 已提交
876
{
877
	struct drm_i915_private *dev_priv = dev->dev_private;
878
	struct intel_engine_cs *ring;
879
	uint32_t ecochk, ecobits;
B
Ben Widawsky 已提交
880
	int i;
B
Ben Widawsky 已提交
881

882 883
	ecobits = I915_READ(GAC_ECO_BITS);
	I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
884

885 886 887 888 889 890 891 892
	ecochk = I915_READ(GAM_ECOCHK);
	if (IS_HASWELL(dev)) {
		ecochk |= ECOCHK_PPGTT_WB_HSW;
	} else {
		ecochk |= ECOCHK_PPGTT_LLC_IVB;
		ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
	}
	I915_WRITE(GAM_ECOCHK, ecochk);
893

894
	for_each_ring(ring, dev_priv, i) {
B
Ben Widawsky 已提交
895
		/* GFX_MODE is per-ring on gen7+ */
896 897
		I915_WRITE(RING_MODE_GEN7(ring),
			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
B
Ben Widawsky 已提交
898
	}
899
}
B
Ben Widawsky 已提交
900

901
static void gen6_ppgtt_enable(struct drm_device *dev)
902
{
903
	struct drm_i915_private *dev_priv = dev->dev_private;
904
	uint32_t ecochk, gab_ctl, ecobits;
905

906 907 908
	ecobits = I915_READ(GAC_ECO_BITS);
	I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
		   ECOBITS_PPGTT_CACHE64B);
B
Ben Widawsky 已提交
909

910 911 912 913 914 915 916
	gab_ctl = I915_READ(GAB_CTL);
	I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);

	ecochk = I915_READ(GAM_ECOCHK);
	I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);

	I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
B
Ben Widawsky 已提交
917 918
}

919
/* PPGTT support for Sandybdrige/Gen6 and later */
920
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
921 922
				   uint64_t start,
				   uint64_t length,
923
				   bool use_scratch)
924
{
925 926
	struct i915_hw_ppgtt *ppgtt =
		container_of(vm, struct i915_hw_ppgtt, base);
927
	gen6_gtt_pte_t *pt_vaddr, scratch_pte;
928 929
	unsigned first_entry = start >> PAGE_SHIFT;
	unsigned num_entries = length >> PAGE_SHIFT;
930
	unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
931 932
	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
	unsigned last_pte, i;
933

934
	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
935

936 937 938 939 940
	while (num_entries) {
		last_pte = first_pte + num_entries;
		if (last_pte > I915_PPGTT_PT_ENTRIES)
			last_pte = I915_PPGTT_PT_ENTRIES;

B
Ben Widawsky 已提交
941
		pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt].page);
942

943 944
		for (i = first_pte; i < last_pte; i++)
			pt_vaddr[i] = scratch_pte;
945 946 947

		kunmap_atomic(pt_vaddr);

948 949
		num_entries -= last_pte - first_pte;
		first_pte = 0;
950
		act_pt++;
951
	}
952 953
}

954
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
D
Daniel Vetter 已提交
955
				      struct sg_table *pages,
956
				      uint64_t start,
957
				      enum i915_cache_level cache_level, u32 flags)
D
Daniel Vetter 已提交
958
{
959 960
	struct i915_hw_ppgtt *ppgtt =
		container_of(vm, struct i915_hw_ppgtt, base);
961
	gen6_gtt_pte_t *pt_vaddr;
962
	unsigned first_entry = start >> PAGE_SHIFT;
963
	unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
964 965 966
	unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
	struct sg_page_iter sg_iter;

967
	pt_vaddr = NULL;
968
	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
969
		if (pt_vaddr == NULL)
B
Ben Widawsky 已提交
970
			pt_vaddr = kmap_atomic(ppgtt->pd.page_table[act_pt].page);
971

972 973
		pt_vaddr[act_pte] =
			vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
974 975
				       cache_level, true, flags);

976 977
		if (++act_pte == I915_PPGTT_PT_ENTRIES) {
			kunmap_atomic(pt_vaddr);
978
			pt_vaddr = NULL;
979
			act_pt++;
980
			act_pte = 0;
D
Daniel Vetter 已提交
981 982
		}
	}
983 984
	if (pt_vaddr)
		kunmap_atomic(pt_vaddr);
D
Daniel Vetter 已提交
985 986
}

987
static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
988
{
989 990 991 992
	int i;

	if (ppgtt->pt_dma_addr) {
		for (i = 0; i < ppgtt->num_pd_entries; i++)
993
			pci_unmap_page(ppgtt->base.dev->pdev,
994 995 996
				       ppgtt->pt_dma_addr[i],
				       4096, PCI_DMA_BIDIRECTIONAL);
	}
997 998 999 1000 1001
}

static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
{
	int i;
1002 1003 1004

	kfree(ppgtt->pt_dma_addr);
	for (i = 0; i < ppgtt->num_pd_entries; i++)
B
Ben Widawsky 已提交
1005 1006 1007
		if (ppgtt->pd.page_table[i].page)
			__free_page(ppgtt->pd.page_table[i].page);
	kfree(ppgtt->pd.page_table);
1008 1009
}

1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
{
	struct i915_hw_ppgtt *ppgtt =
		container_of(vm, struct i915_hw_ppgtt, base);

	drm_mm_remove_node(&ppgtt->node);

	gen6_ppgtt_unmap_pages(ppgtt);
	gen6_ppgtt_free(ppgtt);
}

1021
static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
1022
{
1023
	struct drm_device *dev = ppgtt->base.dev;
1024
	struct drm_i915_private *dev_priv = dev->dev_private;
1025
	bool retried = false;
1026
	int ret;
1027

B
Ben Widawsky 已提交
1028 1029 1030 1031 1032
	/* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
	 * allocator works in address space sizes, so it's multiplied by page
	 * size. We allocate at the top of the GTT to avoid fragmentation.
	 */
	BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
1033
alloc:
B
Ben Widawsky 已提交
1034 1035 1036 1037
	ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
						  &ppgtt->node, GEN6_PD_SIZE,
						  GEN6_PD_ALIGN, 0,
						  0, dev_priv->gtt.base.total,
1038
						  DRM_MM_TOPDOWN);
1039 1040 1041
	if (ret == -ENOSPC && !retried) {
		ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
					       GEN6_PD_SIZE, GEN6_PD_ALIGN,
1042 1043 1044
					       I915_CACHE_NONE,
					       0, dev_priv->gtt.base.total,
					       0);
1045 1046 1047 1048 1049 1050
		if (ret)
			return ret;

		retried = true;
		goto alloc;
	}
B
Ben Widawsky 已提交
1051

1052 1053 1054
	if (ret)
		return ret;

B
Ben Widawsky 已提交
1055 1056
	if (ppgtt->node.start < dev_priv->gtt.mappable_end)
		DRM_DEBUG("Forced to use aperture for PDEs\n");
1057

1058
	ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
1059
	return 0;
1060 1061 1062 1063
}

static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
{
B
Ben Widawsky 已提交
1064
	struct i915_page_table_entry *pt;
1065 1066
	int i;

B
Ben Widawsky 已提交
1067 1068
	pt = kcalloc(ppgtt->num_pd_entries, sizeof(*pt), GFP_KERNEL);
	if (!pt)
1069
		return -ENOMEM;
1070

B
Ben Widawsky 已提交
1071 1072
	ppgtt->pd.page_table = pt;

1073
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
B
Ben Widawsky 已提交
1074 1075
		pt[i].page = alloc_page(GFP_KERNEL);
		if (!pt->page) {
1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095
			gen6_ppgtt_free(ppgtt);
			return -ENOMEM;
		}
	}

	return 0;
}

static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
{
	int ret;

	ret = gen6_ppgtt_allocate_page_directories(ppgtt);
	if (ret)
		return ret;

	ret = gen6_ppgtt_allocate_page_tables(ppgtt);
	if (ret) {
		drm_mm_remove_node(&ppgtt->node);
		return ret;
1096 1097
	}

D
Daniel Vetter 已提交
1098
	ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
B
Ben Widawsky 已提交
1099
				     GFP_KERNEL);
1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112
	if (!ppgtt->pt_dma_addr) {
		drm_mm_remove_node(&ppgtt->node);
		gen6_ppgtt_free(ppgtt);
		return -ENOMEM;
	}

	return 0;
}

static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
{
	struct drm_device *dev = ppgtt->base.dev;
	int i;
1113

B
Ben Widawsky 已提交
1114
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
B
Ben Widawsky 已提交
1115
		struct page *page;
B
Ben Widawsky 已提交
1116
		dma_addr_t pt_addr;
D
Daniel Vetter 已提交
1117

B
Ben Widawsky 已提交
1118 1119
		page = ppgtt->pd.page_table[i].page;
		pt_addr = pci_map_page(dev->pdev, page, 0, 4096,
B
Ben Widawsky 已提交
1120
				       PCI_DMA_BIDIRECTIONAL);
1121

B
Ben Widawsky 已提交
1122
		if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
1123 1124
			gen6_ppgtt_unmap_pages(ppgtt);
			return -EIO;
D
Daniel Vetter 已提交
1125
		}
1126

B
Ben Widawsky 已提交
1127
		ppgtt->pt_dma_addr[i] = pt_addr;
1128 1129
	}

1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148
	return 0;
}

static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
	struct drm_device *dev = ppgtt->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
	if (IS_GEN6(dev)) {
		ppgtt->switch_mm = gen6_mm_switch;
	} else if (IS_HASWELL(dev)) {
		ppgtt->switch_mm = hsw_mm_switch;
	} else if (IS_GEN7(dev)) {
		ppgtt->switch_mm = gen7_mm_switch;
	} else
		BUG();

1149 1150 1151
	if (intel_vgpu_active(dev))
		ppgtt->switch_mm = vgpu_mm_switch;

1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165
	ret = gen6_ppgtt_alloc(ppgtt);
	if (ret)
		return ret;

	ret = gen6_ppgtt_setup_page_tables(ppgtt);
	if (ret) {
		gen6_ppgtt_free(ppgtt);
		return ret;
	}

	ppgtt->base.clear_range = gen6_ppgtt_clear_range;
	ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
	ppgtt->base.cleanup = gen6_ppgtt_cleanup;
	ppgtt->base.start = 0;
B
Ben Widawsky 已提交
1166
	ppgtt->base.total = ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
B
Ben Widawsky 已提交
1167
	ppgtt->debug_dump = gen6_dump_ppgtt;
1168

B
Ben Widawsky 已提交
1169 1170
	ppgtt->pd_offset =
		ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
1171

1172
	ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
1173

1174 1175 1176
	DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
			 ppgtt->node.size >> 20,
			 ppgtt->node.start / PAGE_SIZE);
1177

1178 1179 1180 1181
	gen6_write_pdes(ppgtt);
	DRM_DEBUG("Adding PPGTT at offset %x\n",
		  ppgtt->pd_offset << 10);

1182
	return 0;
1183 1184
}

1185
static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
1186 1187 1188
{
	struct drm_i915_private *dev_priv = dev->dev_private;

1189
	ppgtt->base.dev = dev;
1190
	ppgtt->base.scratch = dev_priv->gtt.base.scratch;
1191

B
Ben Widawsky 已提交
1192
	if (INTEL_INFO(dev)->gen < 8)
1193
		return gen6_ppgtt_init(ppgtt);
B
Ben Widawsky 已提交
1194
	else
R
Rodrigo Vivi 已提交
1195
		return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
1196 1197 1198 1199 1200
}
int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret = 0;
B
Ben Widawsky 已提交
1201

1202 1203
	ret = __hw_ppgtt_init(dev, ppgtt);
	if (ret == 0) {
B
Ben Widawsky 已提交
1204
		kref_init(&ppgtt->ref);
1205 1206
		drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
			    ppgtt->base.total);
1207
		i915_init_vm(dev_priv, &ppgtt->base);
1208
	}
1209 1210 1211 1212

	return ret;
}

1213 1214 1215 1216 1217 1218 1219
int i915_ppgtt_init_hw(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_engine_cs *ring;
	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
	int i, ret = 0;

1220 1221 1222 1223 1224 1225
	/* In the case of execlists, PPGTT is enabled by the context descriptor
	 * and the PDPs are contained within the context itself.  We don't
	 * need to do anything here. */
	if (i915.enable_execlists)
		return 0;

1226 1227 1228 1229 1230 1231 1232 1233 1234 1235
	if (!USES_PPGTT(dev))
		return 0;

	if (IS_GEN6(dev))
		gen6_ppgtt_enable(dev);
	else if (IS_GEN7(dev))
		gen7_ppgtt_enable(dev);
	else if (INTEL_INFO(dev)->gen >= 8)
		gen8_ppgtt_enable(dev);
	else
1236
		MISSING_CASE(INTEL_INFO(dev)->gen);
1237 1238 1239

	if (ppgtt) {
		for_each_ring(ring, dev_priv, i) {
1240
			ret = ppgtt->switch_mm(ppgtt, ring);
1241 1242
			if (ret != 0)
				return ret;
1243
		}
1244
	}
1245 1246 1247

	return ret;
}
1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265
struct i915_hw_ppgtt *
i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
{
	struct i915_hw_ppgtt *ppgtt;
	int ret;

	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
	if (!ppgtt)
		return ERR_PTR(-ENOMEM);

	ret = i915_ppgtt_init(dev, ppgtt);
	if (ret) {
		kfree(ppgtt);
		return ERR_PTR(ret);
	}

	ppgtt->file_priv = fpriv;

1266 1267
	trace_i915_ppgtt_create(&ppgtt->base);

1268 1269 1270
	return ppgtt;
}

1271 1272 1273 1274 1275
void  i915_ppgtt_release(struct kref *kref)
{
	struct i915_hw_ppgtt *ppgtt =
		container_of(kref, struct i915_hw_ppgtt, ref);

1276 1277
	trace_i915_ppgtt_release(&ppgtt->base);

1278 1279 1280 1281
	/* vmas should already be unbound */
	WARN_ON(!list_empty(&ppgtt->base.active_list));
	WARN_ON(!list_empty(&ppgtt->base.inactive_list));

1282 1283 1284
	list_del(&ppgtt->base.global_link);
	drm_mm_takedown(&ppgtt->base.mm);

1285 1286 1287
	ppgtt->base.cleanup(&ppgtt->base);
	kfree(ppgtt);
}
1288

1289
static void
1290 1291 1292
ppgtt_bind_vma(struct i915_vma *vma,
	       enum i915_cache_level cache_level,
	       u32 flags)
1293
{
1294 1295 1296 1297
	/* Currently applicable only to VLV */
	if (vma->obj->gt_ro)
		flags |= PTE_READ_ONLY;

1298
	vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
1299
				cache_level, flags);
1300 1301
}

1302
static void ppgtt_unbind_vma(struct i915_vma *vma)
1303
{
1304
	vma->vm->clear_range(vma->vm,
1305 1306
			     vma->node.start,
			     vma->obj->base.size,
1307
			     true);
1308 1309
}

1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325
extern int intel_iommu_gfx_mapped;
/* Certain Gen5 chipsets require require idling the GPU before
 * unmapping anything from the GTT when VT-d is enabled.
 */
static inline bool needs_idle_maps(struct drm_device *dev)
{
#ifdef CONFIG_INTEL_IOMMU
	/* Query intel_iommu to see if we need the workaround. Presumably that
	 * was loaded first.
	 */
	if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
		return true;
#endif
	return false;
}

B
Ben Widawsky 已提交
1326 1327 1328 1329
static bool do_idling(struct drm_i915_private *dev_priv)
{
	bool ret = dev_priv->mm.interruptible;

1330
	if (unlikely(dev_priv->gtt.do_idle_maps)) {
B
Ben Widawsky 已提交
1331
		dev_priv->mm.interruptible = false;
1332
		if (i915_gpu_idle(dev_priv->dev)) {
B
Ben Widawsky 已提交
1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343
			DRM_ERROR("Couldn't idle GPU\n");
			/* Wait a bit, in hopes it avoids the hang */
			udelay(10);
		}
	}

	return ret;
}

static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
{
1344
	if (unlikely(dev_priv->gtt.do_idle_maps))
B
Ben Widawsky 已提交
1345 1346 1347
		dev_priv->mm.interruptible = interruptible;
}

1348 1349 1350
void i915_check_and_clear_faults(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
1351
	struct intel_engine_cs *ring;
1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
	int i;

	if (INTEL_INFO(dev)->gen < 6)
		return;

	for_each_ring(ring, dev_priv, i) {
		u32 fault_reg;
		fault_reg = I915_READ(RING_FAULT_REG(ring));
		if (fault_reg & RING_FAULT_VALID) {
			DRM_DEBUG_DRIVER("Unexpected fault\n"
1362
					 "\tAddr: 0x%08lx\n"
1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376
					 "\tAddress space: %s\n"
					 "\tSource ID: %d\n"
					 "\tType: %d\n",
					 fault_reg & PAGE_MASK,
					 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
					 RING_FAULT_SRCID(fault_reg),
					 RING_FAULT_FAULT_TYPE(fault_reg));
			I915_WRITE(RING_FAULT_REG(ring),
				   fault_reg & ~RING_FAULT_VALID);
		}
	}
	POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
}

1377 1378 1379 1380 1381 1382 1383 1384 1385 1386
static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
{
	if (INTEL_INFO(dev_priv->dev)->gen < 6) {
		intel_gtt_chipset_flush();
	} else {
		I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
		POSTING_READ(GFX_FLSH_CNTL_GEN6);
	}
}

1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399
void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	/* Don't bother messing with faults pre GEN6 as we have little
	 * documentation supporting that it's a good idea.
	 */
	if (INTEL_INFO(dev)->gen < 6)
		return;

	i915_check_and_clear_faults(dev);

	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
1400 1401
				       dev_priv->gtt.base.start,
				       dev_priv->gtt.base.total,
1402
				       true);
1403 1404

	i915_ggtt_flush(dev_priv);
1405 1406
}

1407 1408 1409
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
1410
	struct drm_i915_gem_object *obj;
B
Ben Widawsky 已提交
1411
	struct i915_address_space *vm;
1412

1413 1414
	i915_check_and_clear_faults(dev);

1415
	/* First fill our portion of the GTT with scratch pages */
1416
	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
1417 1418
				       dev_priv->gtt.base.start,
				       dev_priv->gtt.base.total,
1419
				       true);
1420

1421
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1422 1423 1424 1425 1426
		struct i915_vma *vma = i915_gem_obj_to_vma(obj,
							   &dev_priv->gtt.base);
		if (!vma)
			continue;

1427
		i915_gem_clflush_object(obj, obj->pin_display);
1428 1429 1430
		/* The bind_vma code tries to be smart about tracking mappings.
		 * Unfortunately above, we've just wiped out the mappings
		 * without telling our object about it. So we need to fake it.
1431 1432 1433
		 *
		 * Bind is not expected to fail since this is only called on
		 * resume and assumption is all requirements exist already.
1434
		 */
1435
		vma->bound &= ~GLOBAL_BIND;
1436
		WARN_ON(i915_vma_bind(vma, obj->cache_level, GLOBAL_BIND));
1437 1438
	}

B
Ben Widawsky 已提交
1439

1440
	if (INTEL_INFO(dev)->gen >= 8) {
1441 1442 1443 1444 1445
		if (IS_CHERRYVIEW(dev))
			chv_setup_private_ppat(dev_priv);
		else
			bdw_setup_private_ppat(dev_priv);

B
Ben Widawsky 已提交
1446
		return;
1447
	}
B
Ben Widawsky 已提交
1448 1449 1450 1451 1452 1453 1454 1455 1456 1457

	list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
		/* TODO: Perhaps it shouldn't be gen6 specific */
		if (i915_is_ggtt(vm)) {
			if (dev_priv->mm.aliasing_ppgtt)
				gen6_write_pdes(dev_priv->mm.aliasing_ppgtt);
			continue;
		}

		gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base));
1458 1459
	}

1460
	i915_ggtt_flush(dev_priv);
1461
}
1462

1463
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
1464
{
1465
	if (obj->has_dma_mapping)
1466
		return 0;
1467 1468 1469 1470 1471 1472 1473

	if (!dma_map_sg(&obj->base.dev->pdev->dev,
			obj->pages->sgl, obj->pages->nents,
			PCI_DMA_BIDIRECTIONAL))
		return -ENOSPC;

	return 0;
1474 1475
}

B
Ben Widawsky 已提交
1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487
static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
{
#ifdef writeq
	writeq(pte, addr);
#else
	iowrite32((u32)pte, addr);
	iowrite32(pte >> 32, addr + 4);
#endif
}

static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
				     struct sg_table *st,
1488
				     uint64_t start,
1489
				     enum i915_cache_level level, u32 unused)
B
Ben Widawsky 已提交
1490 1491
{
	struct drm_i915_private *dev_priv = vm->dev->dev_private;
1492
	unsigned first_entry = start >> PAGE_SHIFT;
B
Ben Widawsky 已提交
1493 1494 1495 1496
	gen8_gtt_pte_t __iomem *gtt_entries =
		(gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
	int i = 0;
	struct sg_page_iter sg_iter;
1497
	dma_addr_t addr = 0; /* shut up gcc */
B
Ben Widawsky 已提交
1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525

	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
		addr = sg_dma_address(sg_iter.sg) +
			(sg_iter.sg_pgoffset << PAGE_SHIFT);
		gen8_set_pte(&gtt_entries[i],
			     gen8_pte_encode(addr, level, true));
		i++;
	}

	/*
	 * XXX: This serves as a posting read to make sure that the PTE has
	 * actually been updated. There is some concern that even though
	 * registers and PTEs are within the same BAR that they are potentially
	 * of NUMA access patterns. Therefore, even with the way we assume
	 * hardware should work, we must keep this posting read for paranoia.
	 */
	if (i != 0)
		WARN_ON(readq(&gtt_entries[i-1])
			!= gen8_pte_encode(addr, level, true));

	/* This next bit makes the above posting read even more important. We
	 * want to flush the TLBs only after we're certain all the PTE updates
	 * have finished.
	 */
	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
	POSTING_READ(GFX_FLSH_CNTL_GEN6);
}

1526 1527 1528 1529 1530 1531
/*
 * Binds an object into the global gtt with the specified cache level. The object
 * will be accessible to the GPU via commands whose operands reference offsets
 * within the global GTT as well as accessible by the GPU through the GMADR
 * mapped BAR (dev_priv->mm.gtt->gtt).
 */
1532
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
1533
				     struct sg_table *st,
1534
				     uint64_t start,
1535
				     enum i915_cache_level level, u32 flags)
1536
{
1537
	struct drm_i915_private *dev_priv = vm->dev->dev_private;
1538
	unsigned first_entry = start >> PAGE_SHIFT;
1539 1540
	gen6_gtt_pte_t __iomem *gtt_entries =
		(gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
1541 1542
	int i = 0;
	struct sg_page_iter sg_iter;
1543
	dma_addr_t addr = 0;
1544

1545
	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
1546
		addr = sg_page_iter_dma_address(&sg_iter);
1547
		iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]);
1548
		i++;
1549 1550 1551 1552 1553 1554 1555 1556
	}

	/* XXX: This serves as a posting read to make sure that the PTE has
	 * actually been updated. There is some concern that even though
	 * registers and PTEs are within the same BAR that they are potentially
	 * of NUMA access patterns. Therefore, even with the way we assume
	 * hardware should work, we must keep this posting read for paranoia.
	 */
1557 1558 1559 1560
	if (i != 0) {
		unsigned long gtt = readl(&gtt_entries[i-1]);
		WARN_ON(gtt != vm->pte_encode(addr, level, true, flags));
	}
1561 1562 1563 1564 1565 1566 1567

	/* This next bit makes the above posting read even more important. We
	 * want to flush the TLBs only after we're certain all the PTE updates
	 * have finished.
	 */
	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
	POSTING_READ(GFX_FLSH_CNTL_GEN6);
1568 1569
}

B
Ben Widawsky 已提交
1570
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
1571 1572
				  uint64_t start,
				  uint64_t length,
B
Ben Widawsky 已提交
1573 1574 1575
				  bool use_scratch)
{
	struct drm_i915_private *dev_priv = vm->dev->dev_private;
1576 1577
	unsigned first_entry = start >> PAGE_SHIFT;
	unsigned num_entries = length >> PAGE_SHIFT;
B
Ben Widawsky 已提交
1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595
	gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
		(gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
	const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
	int i;

	if (WARN(num_entries > max_entries,
		 "First entry = %d; Num entries = %d (max=%d)\n",
		 first_entry, num_entries, max_entries))
		num_entries = max_entries;

	scratch_pte = gen8_pte_encode(vm->scratch.addr,
				      I915_CACHE_LLC,
				      use_scratch);
	for (i = 0; i < num_entries; i++)
		gen8_set_pte(&gtt_base[i], scratch_pte);
	readl(gtt_base);
}

1596
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
1597 1598
				  uint64_t start,
				  uint64_t length,
1599
				  bool use_scratch)
1600
{
1601
	struct drm_i915_private *dev_priv = vm->dev->dev_private;
1602 1603
	unsigned first_entry = start >> PAGE_SHIFT;
	unsigned num_entries = length >> PAGE_SHIFT;
1604 1605
	gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
		(gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
1606
	const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
1607 1608 1609 1610 1611 1612 1613
	int i;

	if (WARN(num_entries > max_entries,
		 "First entry = %d; Num entries = %d (max=%d)\n",
		 first_entry, num_entries, max_entries))
		num_entries = max_entries;

1614
	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0);
1615

1616 1617 1618 1619 1620
	for (i = 0; i < num_entries; i++)
		iowrite32(scratch_pte, &gtt_base[i]);
	readl(gtt_base);
}

1621 1622 1623 1624

static void i915_ggtt_bind_vma(struct i915_vma *vma,
			       enum i915_cache_level cache_level,
			       u32 unused)
1625
{
1626
	const unsigned long entry = vma->node.start >> PAGE_SHIFT;
1627 1628 1629
	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;

1630
	BUG_ON(!i915_is_ggtt(vma->vm));
1631
	intel_gtt_insert_sg_entries(vma->ggtt_view.pages, entry, flags);
1632
	vma->bound = GLOBAL_BIND;
1633 1634
}

1635
static void i915_ggtt_clear_range(struct i915_address_space *vm,
1636 1637
				  uint64_t start,
				  uint64_t length,
1638
				  bool unused)
1639
{
1640 1641
	unsigned first_entry = start >> PAGE_SHIFT;
	unsigned num_entries = length >> PAGE_SHIFT;
1642 1643 1644
	intel_gtt_clear_range(first_entry, num_entries);
}

1645 1646 1647 1648
static void i915_ggtt_unbind_vma(struct i915_vma *vma)
{
	const unsigned int first = vma->node.start >> PAGE_SHIFT;
	const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
1649

1650
	BUG_ON(!i915_is_ggtt(vma->vm));
1651
	vma->bound = 0;
1652 1653
	intel_gtt_clear_range(first, size);
}
1654

1655 1656 1657
static void ggtt_bind_vma(struct i915_vma *vma,
			  enum i915_cache_level cache_level,
			  u32 flags)
1658
{
1659
	struct drm_device *dev = vma->vm->dev;
1660
	struct drm_i915_private *dev_priv = dev->dev_private;
1661
	struct drm_i915_gem_object *obj = vma->obj;
1662

1663 1664 1665 1666
	/* Currently applicable only to VLV */
	if (obj->gt_ro)
		flags |= PTE_READ_ONLY;

1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678
	/* If there is no aliasing PPGTT, or the caller needs a global mapping,
	 * or we have a global mapping already but the cacheability flags have
	 * changed, set the global PTEs.
	 *
	 * If there is an aliasing PPGTT it is anecdotally faster, so use that
	 * instead if none of the above hold true.
	 *
	 * NB: A global mapping should only be needed for special regions like
	 * "gtt mappable", SNB errata, or if specified via special execbuf
	 * flags. At all other times, the GPU will use the aliasing PPGTT.
	 */
	if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
1679
		if (!(vma->bound & GLOBAL_BIND) ||
1680
		    (cache_level != obj->cache_level)) {
1681
			vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages,
1682
						vma->node.start,
1683
						cache_level, flags);
1684
			vma->bound |= GLOBAL_BIND;
1685 1686
		}
	}
1687

1688
	if (dev_priv->mm.aliasing_ppgtt &&
1689
	    (!(vma->bound & LOCAL_BIND) ||
1690 1691 1692
	     (cache_level != obj->cache_level))) {
		struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
		appgtt->base.insert_entries(&appgtt->base,
1693
					    vma->ggtt_view.pages,
1694
					    vma->node.start,
1695
					    cache_level, flags);
1696
		vma->bound |= LOCAL_BIND;
1697
	}
1698 1699
}

1700
static void ggtt_unbind_vma(struct i915_vma *vma)
1701
{
1702
	struct drm_device *dev = vma->vm->dev;
1703
	struct drm_i915_private *dev_priv = dev->dev_private;
1704 1705
	struct drm_i915_gem_object *obj = vma->obj;

1706
	if (vma->bound & GLOBAL_BIND) {
1707 1708 1709
		vma->vm->clear_range(vma->vm,
				     vma->node.start,
				     obj->base.size,
1710
				     true);
1711
		vma->bound &= ~GLOBAL_BIND;
1712
	}
1713

1714
	if (vma->bound & LOCAL_BIND) {
1715 1716
		struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
		appgtt->base.clear_range(&appgtt->base,
1717 1718
					 vma->node.start,
					 obj->base.size,
1719
					 true);
1720
		vma->bound &= ~LOCAL_BIND;
1721
	}
1722 1723 1724
}

void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
1725
{
B
Ben Widawsky 已提交
1726 1727 1728 1729 1730 1731
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	bool interruptible;

	interruptible = do_idling(dev_priv);

1732 1733 1734 1735
	if (!obj->has_dma_mapping)
		dma_unmap_sg(&dev->pdev->dev,
			     obj->pages->sgl, obj->pages->nents,
			     PCI_DMA_BIDIRECTIONAL);
B
Ben Widawsky 已提交
1736 1737

	undo_idling(dev_priv, interruptible);
1738
}
1739

1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755
static void i915_gtt_color_adjust(struct drm_mm_node *node,
				  unsigned long color,
				  unsigned long *start,
				  unsigned long *end)
{
	if (node->color != color)
		*start += 4096;

	if (!list_empty(&node->node_list)) {
		node = list_entry(node->node_list.next,
				  struct drm_mm_node,
				  node_list);
		if (node->allocated && node->color != color)
			*end -= 4096;
	}
}
B
Ben Widawsky 已提交
1756

D
Daniel Vetter 已提交
1757 1758 1759 1760
static int i915_gem_setup_global_gtt(struct drm_device *dev,
				     unsigned long start,
				     unsigned long mappable_end,
				     unsigned long end)
1761
{
1762 1763 1764 1765 1766 1767 1768 1769 1770
	/* Let GEM Manage all of the aperture.
	 *
	 * However, leave one page at the end still bound to the scratch page.
	 * There are a number of places where the hardware apparently prefetches
	 * past the end of the object, and we've seen multiple hangs with the
	 * GPU head pointer stuck in a batchbuffer bound at the last page of the
	 * aperture.  One page should be enough to keep any prefetching inside
	 * of the aperture.
	 */
1771 1772
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
1773 1774 1775
	struct drm_mm_node *entry;
	struct drm_i915_gem_object *obj;
	unsigned long hole_start, hole_end;
1776
	int ret;
1777

1778 1779
	BUG_ON(mappable_end > end);

1780
	/* Subtract the guard page ... */
1781
	drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
1782 1783 1784 1785 1786 1787 1788 1789 1790 1791

	dev_priv->gtt.base.start = start;
	dev_priv->gtt.base.total = end - start;

	if (intel_vgpu_active(dev)) {
		ret = intel_vgt_balloon(dev);
		if (ret)
			return ret;
	}

1792
	if (!HAS_LLC(dev))
1793
		dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
1794

1795
	/* Mark any preallocated objects as occupied */
1796
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1797
		struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
1798

B
Ben Widawsky 已提交
1799
		DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
1800 1801 1802
			      i915_gem_obj_ggtt_offset(obj), obj->base.size);

		WARN_ON(i915_gem_obj_ggtt_bound(obj));
1803
		ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
1804 1805 1806 1807
		if (ret) {
			DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
			return ret;
		}
1808
		vma->bound |= GLOBAL_BIND;
1809 1810 1811
	}

	/* Clear any non-preallocated blocks */
1812
	drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
1813 1814
		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
			      hole_start, hole_end);
1815 1816
		ggtt_vm->clear_range(ggtt_vm, hole_start,
				     hole_end - hole_start, true);
1817 1818 1819
	}

	/* And finally clear the reserved guard page */
1820
	ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
1821

1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835
	if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
		struct i915_hw_ppgtt *ppgtt;

		ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
		if (!ppgtt)
			return -ENOMEM;

		ret = __hw_ppgtt_init(dev, ppgtt);
		if (ret != 0)
			return ret;

		dev_priv->mm.aliasing_ppgtt = ppgtt;
	}

1836
	return 0;
1837 1838
}

1839 1840 1841 1842 1843
void i915_gem_init_global_gtt(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned long gtt_size, mappable_size;

1844
	gtt_size = dev_priv->gtt.base.total;
1845
	mappable_size = dev_priv->gtt.mappable_end;
1846

1847
	i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
1848 1849
}

1850 1851 1852 1853 1854
void i915_global_gtt_cleanup(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_address_space *vm = &dev_priv->gtt.base;

1855 1856 1857 1858 1859 1860
	if (dev_priv->mm.aliasing_ppgtt) {
		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;

		ppgtt->base.cleanup(&ppgtt->base);
	}

1861
	if (drm_mm_initialized(&vm->mm)) {
1862 1863 1864
		if (intel_vgpu_active(dev))
			intel_vgt_deballoon();

1865 1866 1867 1868 1869 1870
		drm_mm_takedown(&vm->mm);
		list_del(&vm->global_link);
	}

	vm->cleanup(vm);
}
1871

1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890
static int setup_scratch_page(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct page *page;
	dma_addr_t dma_addr;

	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
	if (page == NULL)
		return -ENOMEM;
	set_pages_uc(page, 1);

#ifdef CONFIG_INTEL_IOMMU
	dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
				PCI_DMA_BIDIRECTIONAL);
	if (pci_dma_mapping_error(dev->pdev, dma_addr))
		return -EINVAL;
#else
	dma_addr = page_to_phys(page);
#endif
1891 1892
	dev_priv->gtt.base.scratch.page = page;
	dev_priv->gtt.base.scratch.addr = dma_addr;
1893 1894 1895 1896 1897 1898 1899

	return 0;
}

static void teardown_scratch_page(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
1900 1901 1902 1903
	struct page *page = dev_priv->gtt.base.scratch.page;

	set_pages_wb(page, 1);
	pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
1904
		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1905
	__free_page(page);
1906 1907 1908 1909 1910 1911 1912 1913 1914
}

static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
{
	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
	snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
	return snb_gmch_ctl << 20;
}

1915 1916 1917 1918 1919 1920
static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
{
	bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
	bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
	if (bdw_gmch_ctl)
		bdw_gmch_ctl = 1 << bdw_gmch_ctl;
1921 1922 1923 1924 1925 1926 1927

#ifdef CONFIG_X86_32
	/* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
	if (bdw_gmch_ctl > 4)
		bdw_gmch_ctl = 4;
#endif

1928 1929 1930
	return bdw_gmch_ctl << 20;
}

1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941
static inline unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
{
	gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
	gmch_ctrl &= SNB_GMCH_GGMS_MASK;

	if (gmch_ctrl)
		return 1 << (20 + gmch_ctrl);

	return 0;
}

1942
static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
1943 1944 1945 1946 1947 1948
{
	snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
	snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
	return snb_gmch_ctl << 25; /* 32 MB units */
}

1949 1950 1951 1952 1953 1954 1955
static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
{
	bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
	bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
	return bdw_gmch_ctl << 25; /* 32 MB units */
}

1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973
static size_t chv_get_stolen_size(u16 gmch_ctrl)
{
	gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
	gmch_ctrl &= SNB_GMCH_GMS_MASK;

	/*
	 * 0x0  to 0x10: 32MB increments starting at 0MB
	 * 0x11 to 0x16: 4MB increments starting at 8MB
	 * 0x17 to 0x1d: 4MB increments start at 36MB
	 */
	if (gmch_ctrl < 0x11)
		return gmch_ctrl << 25;
	else if (gmch_ctrl < 0x17)
		return (gmch_ctrl - 0x11 + 2) << 22;
	else
		return (gmch_ctrl - 0x17 + 9) << 22;
}

1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985
static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
{
	gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
	gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;

	if (gen9_gmch_ctl < 0xf0)
		return gen9_gmch_ctl << 25; /* 32 MB units */
	else
		/* 4MB increments starting at 0xf0 for 4MB */
		return (gen9_gmch_ctl - 0xf0 + 1) << 22;
}

B
Ben Widawsky 已提交
1986 1987 1988 1989
static int ggtt_probe_common(struct drm_device *dev,
			     size_t gtt_size)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
1990
	phys_addr_t gtt_phys_addr;
B
Ben Widawsky 已提交
1991 1992 1993
	int ret;

	/* For Modern GENs the PTEs and register space are split in the BAR */
1994
	gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
B
Ben Widawsky 已提交
1995 1996
		(pci_resource_len(dev->pdev, 0) / 2);

1997
	dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
B
Ben Widawsky 已提交
1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012
	if (!dev_priv->gtt.gsm) {
		DRM_ERROR("Failed to map the gtt page table\n");
		return -ENOMEM;
	}

	ret = setup_scratch_page(dev);
	if (ret) {
		DRM_ERROR("Scratch setup failed\n");
		/* iounmap will also get called at remove, but meh */
		iounmap(dev_priv->gtt.gsm);
	}

	return ret;
}

B
Ben Widawsky 已提交
2013 2014 2015
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
 * bits. When using advanced contexts each context stores its own PAT, but
 * writing this data shouldn't be harmful even in those cases. */
2016
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
B
Ben Widawsky 已提交
2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028
{
	uint64_t pat;

	pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC)     | /* for normal objects, no eLLC */
	      GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
	      GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
	      GEN8_PPAT(3, GEN8_PPAT_UC)                     | /* Uncached objects, mostly for scanout */
	      GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
	      GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
	      GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
	      GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));

2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044
	if (!USES_PPGTT(dev_priv->dev))
		/* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
		 * so RTL will always use the value corresponding to
		 * pat_sel = 000".
		 * So let's disable cache for GGTT to avoid screen corruptions.
		 * MOCS still can be used though.
		 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
		 * before this patch, i.e. the same uncached + snooping access
		 * like on gen6/7 seems to be in effect.
		 * - So this just fixes blitter/render access. Again it looks
		 * like it's not just uncached access, but uncached + snooping.
		 * So we can still hold onto all our assumptions wrt cpu
		 * clflushing on LLC machines.
		 */
		pat = GEN8_PPAT(0, GEN8_PPAT_UC);

B
Ben Widawsky 已提交
2045 2046 2047 2048 2049 2050
	/* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
	 * write would work. */
	I915_WRITE(GEN8_PRIVATE_PAT, pat);
	I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
}

2051 2052 2053 2054 2055 2056 2057 2058 2059 2060
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
{
	uint64_t pat;

	/*
	 * Map WB on BDW to snooped on CHV.
	 *
	 * Only the snoop bit has meaning for CHV, the rest is
	 * ignored.
	 *
2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071
	 * The hardware will never snoop for certain types of accesses:
	 * - CPU GTT (GMADR->GGTT->no snoop->memory)
	 * - PPGTT page tables
	 * - some other special cycles
	 *
	 * As with BDW, we also need to consider the following for GT accesses:
	 * "For GGTT, there is NO pat_sel[2:0] from the entry,
	 * so RTL will always use the value corresponding to
	 * pat_sel = 000".
	 * Which means we must set the snoop bit in PAT entry 0
	 * in order to keep the global status page working.
2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085
	 */
	pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
	      GEN8_PPAT(1, 0) |
	      GEN8_PPAT(2, 0) |
	      GEN8_PPAT(3, 0) |
	      GEN8_PPAT(4, CHV_PPAT_SNOOP) |
	      GEN8_PPAT(5, CHV_PPAT_SNOOP) |
	      GEN8_PPAT(6, CHV_PPAT_SNOOP) |
	      GEN8_PPAT(7, CHV_PPAT_SNOOP);

	I915_WRITE(GEN8_PRIVATE_PAT, pat);
	I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
}

B
Ben Widawsky 已提交
2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105
static int gen8_gmch_probe(struct drm_device *dev,
			   size_t *gtt_total,
			   size_t *stolen,
			   phys_addr_t *mappable_base,
			   unsigned long *mappable_end)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned int gtt_size;
	u16 snb_gmch_ctl;
	int ret;

	/* TODO: We're not aware of mappable constraints on gen8 yet */
	*mappable_base = pci_resource_start(dev->pdev, 2);
	*mappable_end = pci_resource_len(dev->pdev, 2);

	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));

	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);

2106 2107 2108 2109
	if (INTEL_INFO(dev)->gen >= 9) {
		*stolen = gen9_get_stolen_size(snb_gmch_ctl);
		gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
	} else if (IS_CHERRYVIEW(dev)) {
2110 2111 2112 2113 2114 2115
		*stolen = chv_get_stolen_size(snb_gmch_ctl);
		gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
	} else {
		*stolen = gen8_get_stolen_size(snb_gmch_ctl);
		gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
	}
B
Ben Widawsky 已提交
2116

B
Ben Widawsky 已提交
2117
	*gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT;
B
Ben Widawsky 已提交
2118

2119 2120 2121 2122
	if (IS_CHERRYVIEW(dev))
		chv_setup_private_ppat(dev_priv);
	else
		bdw_setup_private_ppat(dev_priv);
B
Ben Widawsky 已提交
2123

B
Ben Widawsky 已提交
2124 2125
	ret = ggtt_probe_common(dev, gtt_size);

B
Ben Widawsky 已提交
2126 2127
	dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
	dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
B
Ben Widawsky 已提交
2128 2129 2130 2131

	return ret;
}

2132 2133
static int gen6_gmch_probe(struct drm_device *dev,
			   size_t *gtt_total,
2134 2135 2136
			   size_t *stolen,
			   phys_addr_t *mappable_base,
			   unsigned long *mappable_end)
2137 2138
{
	struct drm_i915_private *dev_priv = dev->dev_private;
2139
	unsigned int gtt_size;
2140 2141 2142
	u16 snb_gmch_ctl;
	int ret;

2143 2144 2145
	*mappable_base = pci_resource_start(dev->pdev, 2);
	*mappable_end = pci_resource_len(dev->pdev, 2);

2146 2147
	/* 64/512MB is the current min/max we actually know of, but this is just
	 * a coarse sanity check.
2148
	 */
2149
	if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
2150 2151 2152
		DRM_ERROR("Unknown GMADR size (%lx)\n",
			  dev_priv->gtt.mappable_end);
		return -ENXIO;
2153 2154 2155 2156 2157 2158
	}

	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);

2159
	*stolen = gen6_get_stolen_size(snb_gmch_ctl);
2160

B
Ben Widawsky 已提交
2161 2162
	gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
	*gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
2163

B
Ben Widawsky 已提交
2164
	ret = ggtt_probe_common(dev, gtt_size);
2165

2166 2167
	dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
	dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
2168

2169 2170 2171
	return ret;
}

2172
static void gen6_gmch_remove(struct i915_address_space *vm)
2173
{
2174 2175

	struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
2176

2177 2178
	iounmap(gtt->gsm);
	teardown_scratch_page(vm->dev);
2179
}
2180 2181 2182

static int i915_gmch_probe(struct drm_device *dev,
			   size_t *gtt_total,
2183 2184 2185
			   size_t *stolen,
			   phys_addr_t *mappable_base,
			   unsigned long *mappable_end)
2186 2187 2188 2189 2190 2191 2192 2193 2194 2195
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
	if (!ret) {
		DRM_ERROR("failed to set up gmch\n");
		return -EIO;
	}

2196
	intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
2197 2198

	dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
2199
	dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
2200

2201 2202 2203
	if (unlikely(dev_priv->gtt.do_idle_maps))
		DRM_INFO("applying Ironlake quirks for intel_iommu\n");

2204 2205 2206
	return 0;
}

2207
static void i915_gmch_remove(struct i915_address_space *vm)
2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218
{
	intel_gmch_remove();
}

int i915_gem_gtt_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_gtt *gtt = &dev_priv->gtt;
	int ret;

	if (INTEL_INFO(dev)->gen <= 5) {
2219
		gtt->gtt_probe = i915_gmch_probe;
2220
		gtt->base.cleanup = i915_gmch_remove;
B
Ben Widawsky 已提交
2221
	} else if (INTEL_INFO(dev)->gen < 8) {
2222
		gtt->gtt_probe = gen6_gmch_probe;
2223
		gtt->base.cleanup = gen6_gmch_remove;
2224
		if (IS_HASWELL(dev) && dev_priv->ellc_size)
2225
			gtt->base.pte_encode = iris_pte_encode;
2226
		else if (IS_HASWELL(dev))
2227
			gtt->base.pte_encode = hsw_pte_encode;
2228
		else if (IS_VALLEYVIEW(dev))
2229
			gtt->base.pte_encode = byt_pte_encode;
2230 2231
		else if (INTEL_INFO(dev)->gen >= 7)
			gtt->base.pte_encode = ivb_pte_encode;
2232
		else
2233
			gtt->base.pte_encode = snb_pte_encode;
B
Ben Widawsky 已提交
2234 2235 2236
	} else {
		dev_priv->gtt.gtt_probe = gen8_gmch_probe;
		dev_priv->gtt.base.cleanup = gen6_gmch_remove;
2237 2238
	}

2239
	ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
2240
			     &gtt->mappable_base, &gtt->mappable_end);
2241
	if (ret)
2242 2243
		return ret;

2244 2245
	gtt->base.dev = dev;

2246
	/* GMADR is the PCI mmio aperture into the global GTT. */
2247 2248
	DRM_INFO("Memory usable by graphics device = %zdM\n",
		 gtt->base.total >> 20);
2249 2250
	DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
	DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
2251 2252 2253 2254
#ifdef CONFIG_INTEL_IOMMU
	if (intel_iommu_gfx_mapped)
		DRM_INFO("VT-d active for gfx access\n");
#endif
2255 2256 2257 2258 2259 2260 2261 2262
	/*
	 * i915.enable_ppgtt is read-only, so do an early pass to validate the
	 * user's requested state against the hardware/driver capabilities.  We
	 * do this now so that we can print out any log messages once rather
	 * than every time we check intel_enable_ppgtt().
	 */
	i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
	DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
2263 2264 2265

	return 0;
}
2266 2267

static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
2268 2269
					      struct i915_address_space *vm,
					      const struct i915_ggtt_view *view)
2270 2271 2272 2273 2274 2275 2276 2277 2278 2279
{
	struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
	if (vma == NULL)
		return ERR_PTR(-ENOMEM);

	INIT_LIST_HEAD(&vma->vma_link);
	INIT_LIST_HEAD(&vma->mm_list);
	INIT_LIST_HEAD(&vma->exec_list);
	vma->vm = vm;
	vma->obj = obj;
2280
	vma->ggtt_view = *view;
2281

R
Rodrigo Vivi 已提交
2282
	if (INTEL_INFO(vm->dev)->gen >= 6) {
2283 2284 2285 2286 2287 2288 2289
		if (i915_is_ggtt(vm)) {
			vma->unbind_vma = ggtt_unbind_vma;
			vma->bind_vma = ggtt_bind_vma;
		} else {
			vma->unbind_vma = ppgtt_unbind_vma;
			vma->bind_vma = ppgtt_bind_vma;
		}
R
Rodrigo Vivi 已提交
2290
	} else {
2291 2292 2293 2294 2295
		BUG_ON(!i915_is_ggtt(vm));
		vma->unbind_vma = i915_ggtt_unbind_vma;
		vma->bind_vma = i915_ggtt_bind_vma;
	}

2296 2297
	list_add_tail(&vma->vma_link, &obj->vma_list);
	if (!i915_is_ggtt(vm))
2298
		i915_ppgtt_get(i915_vm_to_ppgtt(vm));
2299 2300 2301 2302 2303

	return vma;
}

struct i915_vma *
2304 2305 2306
i915_gem_obj_lookup_or_create_vma_view(struct drm_i915_gem_object *obj,
				       struct i915_address_space *vm,
				       const struct i915_ggtt_view *view)
2307 2308 2309
{
	struct i915_vma *vma;

2310
	vma = i915_gem_obj_to_vma_view(obj, vm, view);
2311
	if (!vma)
2312
		vma = __i915_gem_vma_create(obj, vm, view);
2313 2314 2315

	return vma;
}
2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359

static inline
int i915_get_vma_pages(struct i915_vma *vma)
{
	if (vma->ggtt_view.pages)
		return 0;

	if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
		vma->ggtt_view.pages = vma->obj->pages;
	else
		WARN_ONCE(1, "GGTT view %u not implemented!\n",
			  vma->ggtt_view.type);

	if (!vma->ggtt_view.pages) {
		DRM_ERROR("Failed to get pages for VMA view type %u!\n",
			  vma->ggtt_view.type);
		return -EINVAL;
	}

	return 0;
}

/**
 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
 * @vma: VMA to map
 * @cache_level: mapping cache level
 * @flags: flags like global or local mapping
 *
 * DMA addresses are taken from the scatter-gather table of this object (or of
 * this VMA in case of non-default GGTT views) and PTE entries set up.
 * Note that DMA addresses are also the only part of the SG table we care about.
 */
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
		  u32 flags)
{
	int ret = i915_get_vma_pages(vma);

	if (ret)
		return ret;

	vma->bind_vma(vma, cache_level, flags);

	return 0;
}