i915_gem_gtt.c 95.3 KB
Newer Older
1 2
/*
 * Copyright © 2010 Daniel Vetter
3
 * Copyright © 2011-2014 Intel Corporation
4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

26
#include <linux/seq_file.h>
27
#include <linux/stop_machine.h>
28 29
#include <drm/drmP.h>
#include <drm/i915_drm.h>
30
#include "i915_drv.h"
31
#include "i915_vgpu.h"
32 33 34
#include "i915_trace.h"
#include "intel_drv.h"

35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
/**
 * DOC: Global GTT views
 *
 * Background and previous state
 *
 * Historically objects could exists (be bound) in global GTT space only as
 * singular instances with a view representing all of the object's backing pages
 * in a linear fashion. This view will be called a normal view.
 *
 * To support multiple views of the same object, where the number of mapped
 * pages is not equal to the backing store, or where the layout of the pages
 * is not linear, concept of a GGTT view was added.
 *
 * One example of an alternative view is a stereo display driven by a single
 * image. In this case we would have a framebuffer looking like this
 * (2x2 pages):
 *
 *    12
 *    34
 *
 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
 * rendering. In contrast, fed to the display engine would be an alternative
 * view which could look something like this:
 *
 *   1212
 *   3434
 *
 * In this example both the size and layout of pages in the alternative view is
 * different from the normal view.
 *
 * Implementation and usage
 *
 * GGTT views are implemented using VMAs and are distinguished via enum
 * i915_ggtt_view_type and struct i915_ggtt_view.
 *
 * A new flavour of core GEM functions which work with GGTT bound objects were
71 72 73
 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
 * renaming  in large amounts of code. They take the struct i915_ggtt_view
 * parameter encapsulating all metadata required to implement a view.
74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
 *
 * As a helper for callers which are only interested in the normal view,
 * globally const i915_ggtt_view_normal singleton instance exists. All old core
 * GEM API functions, the ones not taking the view parameter, are operating on,
 * or with the normal GGTT view.
 *
 * Code wanting to add or use a new GGTT view needs to:
 *
 * 1. Add a new enum with a suitable name.
 * 2. Extend the metadata in the i915_ggtt_view structure if required.
 * 3. Add support to i915_get_vma_pages().
 *
 * New views are required to build a scatter-gather table from within the
 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
 * exists for the lifetime of an VMA.
 *
 * Core API is designed to have copy semantics which means that passed in
 * struct i915_ggtt_view does not need to be persistent (left around after
 * calling the core API functions).
 *
 */

96 97 98 99 100 101 102
static inline struct i915_ggtt *
i915_vm_to_ggtt(struct i915_address_space *vm)
{
	GEM_BUG_ON(!i915_is_ggtt(vm));
	return container_of(vm, struct i915_ggtt, base);
}

103 104 105
static int
i915_get_ggtt_vma_pages(struct i915_vma *vma);

106 107 108
const struct i915_ggtt_view i915_ggtt_view_normal = {
	.type = I915_GGTT_VIEW_NORMAL,
};
109
const struct i915_ggtt_view i915_ggtt_view_rotated = {
110
	.type = I915_GGTT_VIEW_ROTATED,
111
};
112

113 114
int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
			       	int enable_ppgtt)
115
{
116 117
	bool has_aliasing_ppgtt;
	bool has_full_ppgtt;
118
	bool has_full_48bit_ppgtt;
119

120 121 122 123
	has_aliasing_ppgtt = INTEL_GEN(dev_priv) >= 6;
	has_full_ppgtt = INTEL_GEN(dev_priv) >= 7;
	has_full_48bit_ppgtt =
	       	IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
124

125
	if (intel_vgpu_active(dev_priv))
126 127
		has_full_ppgtt = false; /* emulation is too hard */

128 129 130
	if (!has_aliasing_ppgtt)
		return 0;

131 132 133 134
	/*
	 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
	 * execlists, the sole mechanism available to submit work.
	 */
135
	if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
136 137 138 139 140
		return 0;

	if (enable_ppgtt == 1)
		return 1;

141
	if (enable_ppgtt == 2 && has_full_ppgtt)
142 143
		return 2;

144 145 146
	if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
		return 3;

147 148
#ifdef CONFIG_INTEL_IOMMU
	/* Disable ppgtt on SNB if VT-d is on. */
149
	if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) {
150
		DRM_INFO("Disabling PPGTT because VT-d is on\n");
151
		return 0;
152 153 154
	}
#endif

155
	/* Early VLV doesn't have this */
156
	if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
157 158 159 160
		DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
		return 0;
	}

161
	if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists)
162
		return has_full_48bit_ppgtt ? 3 : 2;
163 164
	else
		return has_aliasing_ppgtt ? 1 : 0;
165 166
}

167 168 169
static int ppgtt_bind_vma(struct i915_vma *vma,
			  enum i915_cache_level cache_level,
			  u32 unused)
170 171 172 173 174 175 176 177 178
{
	u32 pte_flags = 0;

	/* Currently applicable only to VLV */
	if (vma->obj->gt_ro)
		pte_flags |= PTE_READ_ONLY;

	vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
				cache_level, pte_flags);
179 180

	return 0;
181 182 183 184 185 186 187 188 189
}

static void ppgtt_unbind_vma(struct i915_vma *vma)
{
	vma->vm->clear_range(vma->vm,
			     vma->node.start,
			     vma->obj->base.size,
			     true);
}
190

191 192 193
static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
				  enum i915_cache_level level,
				  bool valid)
B
Ben Widawsky 已提交
194
{
195
	gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
B
Ben Widawsky 已提交
196
	pte |= addr;
197 198 199

	switch (level) {
	case I915_CACHE_NONE:
B
Ben Widawsky 已提交
200
		pte |= PPAT_UNCACHED_INDEX;
201 202 203 204 205 206 207 208 209
		break;
	case I915_CACHE_WT:
		pte |= PPAT_DISPLAY_ELLC_INDEX;
		break;
	default:
		pte |= PPAT_CACHED_INDEX;
		break;
	}

B
Ben Widawsky 已提交
210 211 212
	return pte;
}

213 214
static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
				  const enum i915_cache_level level)
B
Ben Widawsky 已提交
215
{
216
	gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
B
Ben Widawsky 已提交
217 218 219 220 221 222 223 224
	pde |= addr;
	if (level != I915_CACHE_NONE)
		pde |= PPAT_CACHED_PDE_INDEX;
	else
		pde |= PPAT_UNCACHED_INDEX;
	return pde;
}

225 226 227
#define gen8_pdpe_encode gen8_pde_encode
#define gen8_pml4e_encode gen8_pde_encode

228 229 230
static gen6_pte_t snb_pte_encode(dma_addr_t addr,
				 enum i915_cache_level level,
				 bool valid, u32 unused)
231
{
232
	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
233
	pte |= GEN6_PTE_ADDR_ENCODE(addr);
234 235

	switch (level) {
236 237 238 239 240 241 242 243
	case I915_CACHE_L3_LLC:
	case I915_CACHE_LLC:
		pte |= GEN6_PTE_CACHE_LLC;
		break;
	case I915_CACHE_NONE:
		pte |= GEN6_PTE_UNCACHED;
		break;
	default:
244
		MISSING_CASE(level);
245 246 247 248 249
	}

	return pte;
}

250 251 252
static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
				 enum i915_cache_level level,
				 bool valid, u32 unused)
253
{
254
	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
255 256 257 258 259
	pte |= GEN6_PTE_ADDR_ENCODE(addr);

	switch (level) {
	case I915_CACHE_L3_LLC:
		pte |= GEN7_PTE_CACHE_L3_LLC;
260 261 262 263 264
		break;
	case I915_CACHE_LLC:
		pte |= GEN6_PTE_CACHE_LLC;
		break;
	case I915_CACHE_NONE:
265
		pte |= GEN6_PTE_UNCACHED;
266 267
		break;
	default:
268
		MISSING_CASE(level);
269 270
	}

271 272 273
	return pte;
}

274 275 276
static gen6_pte_t byt_pte_encode(dma_addr_t addr,
				 enum i915_cache_level level,
				 bool valid, u32 flags)
277
{
278
	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
279 280
	pte |= GEN6_PTE_ADDR_ENCODE(addr);

281 282
	if (!(flags & PTE_READ_ONLY))
		pte |= BYT_PTE_WRITEABLE;
283 284 285 286 287 288 289

	if (level != I915_CACHE_NONE)
		pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;

	return pte;
}

290 291 292
static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
				 enum i915_cache_level level,
				 bool valid, u32 unused)
293
{
294
	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
295
	pte |= HSW_PTE_ADDR_ENCODE(addr);
296 297

	if (level != I915_CACHE_NONE)
298
		pte |= HSW_WB_LLC_AGE3;
299 300 301 302

	return pte;
}

303 304 305
static gen6_pte_t iris_pte_encode(dma_addr_t addr,
				  enum i915_cache_level level,
				  bool valid, u32 unused)
306
{
307
	gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
308 309
	pte |= HSW_PTE_ADDR_ENCODE(addr);

310 311 312 313
	switch (level) {
	case I915_CACHE_NONE:
		break;
	case I915_CACHE_WT:
314
		pte |= HSW_WT_ELLC_LLC_AGE3;
315 316
		break;
	default:
317
		pte |= HSW_WB_ELLC_LLC_AGE3;
318 319
		break;
	}
320 321 322 323

	return pte;
}

324 325
static int __setup_page_dma(struct drm_device *dev,
			    struct i915_page_dma *p, gfp_t flags)
326 327 328
{
	struct device *device = &dev->pdev->dev;

329
	p->page = alloc_page(flags);
330 331
	if (!p->page)
		return -ENOMEM;
332

333 334
	p->daddr = dma_map_page(device,
				p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
335

336 337 338 339
	if (dma_mapping_error(device, p->daddr)) {
		__free_page(p->page);
		return -EINVAL;
	}
340 341

	return 0;
342 343
}

344 345 346 347 348
static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
{
	return __setup_page_dma(dev, p, GFP_KERNEL);
}

349
static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
350
{
351
	if (WARN_ON(!p->page))
352
		return;
353

354 355 356 357 358
	dma_unmap_page(&dev->pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
	__free_page(p->page);
	memset(p, 0, sizeof(*p));
}

359
static void *kmap_page_dma(struct i915_page_dma *p)
360
{
361 362
	return kmap_atomic(p->page);
}
363

364 365 366 367 368
/* We use the flushing unmap only with ppgtt structures:
 * page directories, page tables and scratch pages.
 */
static void kunmap_page_dma(struct drm_device *dev, void *vaddr)
{
369 370 371 372 373 374 375 376 377
	/* There are only few exceptions for gen >=6. chv and bxt.
	 * And we are not sure about the latter so play safe for now.
	 */
	if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
		drm_clflush_virt_range(vaddr, PAGE_SIZE);

	kunmap_atomic(vaddr);
}

378
#define kmap_px(px) kmap_page_dma(px_base(px))
379 380
#define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr))

381 382 383 384 385
#define setup_px(dev, px) setup_page_dma((dev), px_base(px))
#define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px))
#define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v))
#define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v))

386 387 388 389 390 391 392 393 394 395 396 397
static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
			  const uint64_t val)
{
	int i;
	uint64_t * const vaddr = kmap_page_dma(p);

	for (i = 0; i < 512; i++)
		vaddr[i] = val;

	kunmap_page_dma(dev, vaddr);
}

398 399 400 401 402 403 404 405 406 407
static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
			     const uint32_t val32)
{
	uint64_t v = val32;

	v = v << 32 | val32;

	fill_page_dma(dev, p, v);
}

408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436
static struct i915_page_scratch *alloc_scratch_page(struct drm_device *dev)
{
	struct i915_page_scratch *sp;
	int ret;

	sp = kzalloc(sizeof(*sp), GFP_KERNEL);
	if (sp == NULL)
		return ERR_PTR(-ENOMEM);

	ret = __setup_page_dma(dev, px_base(sp), GFP_DMA32 | __GFP_ZERO);
	if (ret) {
		kfree(sp);
		return ERR_PTR(ret);
	}

	set_pages_uc(px_page(sp), 1);

	return sp;
}

static void free_scratch_page(struct drm_device *dev,
			      struct i915_page_scratch *sp)
{
	set_pages_wb(px_page(sp), 1);

	cleanup_px(dev, sp);
	kfree(sp);
}

437
static struct i915_page_table *alloc_pt(struct drm_device *dev)
438
{
439
	struct i915_page_table *pt;
440 441 442
	const size_t count = INTEL_INFO(dev)->gen >= 8 ?
		GEN8_PTES : GEN6_PTES;
	int ret = -ENOMEM;
443 444 445 446 447

	pt = kzalloc(sizeof(*pt), GFP_KERNEL);
	if (!pt)
		return ERR_PTR(-ENOMEM);

448 449 450 451 452 453
	pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes),
				GFP_KERNEL);

	if (!pt->used_ptes)
		goto fail_bitmap;

454
	ret = setup_px(dev, pt);
455
	if (ret)
456
		goto fail_page_m;
457 458

	return pt;
459

460
fail_page_m:
461 462 463 464 465
	kfree(pt->used_ptes);
fail_bitmap:
	kfree(pt);

	return ERR_PTR(ret);
466 467
}

468
static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
469
{
470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
	cleanup_px(dev, pt);
	kfree(pt->used_ptes);
	kfree(pt);
}

static void gen8_initialize_pt(struct i915_address_space *vm,
			       struct i915_page_table *pt)
{
	gen8_pte_t scratch_pte;

	scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
				      I915_CACHE_LLC, true);

	fill_px(vm->dev, pt, scratch_pte);
}

static void gen6_initialize_pt(struct i915_address_space *vm,
			       struct i915_page_table *pt)
{
	gen6_pte_t scratch_pte;

	WARN_ON(px_dma(vm->scratch_page) == 0);

	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
				     I915_CACHE_LLC, true, 0);

	fill32_px(vm->dev, pt, scratch_pte);
497 498
}

499
static struct i915_page_directory *alloc_pd(struct drm_device *dev)
500
{
501
	struct i915_page_directory *pd;
502
	int ret = -ENOMEM;
503 504 505 506 507

	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
	if (!pd)
		return ERR_PTR(-ENOMEM);

508 509 510
	pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES),
				sizeof(*pd->used_pdes), GFP_KERNEL);
	if (!pd->used_pdes)
511
		goto fail_bitmap;
512

513
	ret = setup_px(dev, pd);
514
	if (ret)
515
		goto fail_page_m;
516

517
	return pd;
518

519
fail_page_m:
520
	kfree(pd->used_pdes);
521
fail_bitmap:
522 523 524
	kfree(pd);

	return ERR_PTR(ret);
525 526
}

527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545
static void free_pd(struct drm_device *dev, struct i915_page_directory *pd)
{
	if (px_page(pd)) {
		cleanup_px(dev, pd);
		kfree(pd->used_pdes);
		kfree(pd);
	}
}

static void gen8_initialize_pd(struct i915_address_space *vm,
			       struct i915_page_directory *pd)
{
	gen8_pde_t scratch_pde;

	scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);

	fill_px(vm->dev, pd, scratch_pde);
}

546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576
static int __pdp_init(struct drm_device *dev,
		      struct i915_page_directory_pointer *pdp)
{
	size_t pdpes = I915_PDPES_PER_PDP(dev);

	pdp->used_pdpes = kcalloc(BITS_TO_LONGS(pdpes),
				  sizeof(unsigned long),
				  GFP_KERNEL);
	if (!pdp->used_pdpes)
		return -ENOMEM;

	pdp->page_directory = kcalloc(pdpes, sizeof(*pdp->page_directory),
				      GFP_KERNEL);
	if (!pdp->page_directory) {
		kfree(pdp->used_pdpes);
		/* the PDP might be the statically allocated top level. Keep it
		 * as clean as possible */
		pdp->used_pdpes = NULL;
		return -ENOMEM;
	}

	return 0;
}

static void __pdp_fini(struct i915_page_directory_pointer *pdp)
{
	kfree(pdp->used_pdpes);
	kfree(pdp->page_directory);
	pdp->page_directory = NULL;
}

577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606
static struct
i915_page_directory_pointer *alloc_pdp(struct drm_device *dev)
{
	struct i915_page_directory_pointer *pdp;
	int ret = -ENOMEM;

	WARN_ON(!USES_FULL_48BIT_PPGTT(dev));

	pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
	if (!pdp)
		return ERR_PTR(-ENOMEM);

	ret = __pdp_init(dev, pdp);
	if (ret)
		goto fail_bitmap;

	ret = setup_px(dev, pdp);
	if (ret)
		goto fail_page_m;

	return pdp;

fail_page_m:
	__pdp_fini(pdp);
fail_bitmap:
	kfree(pdp);

	return ERR_PTR(ret);
}

607 608 609 610
static void free_pdp(struct drm_device *dev,
		     struct i915_page_directory_pointer *pdp)
{
	__pdp_fini(pdp);
611 612 613 614 615 616
	if (USES_FULL_48BIT_PPGTT(dev)) {
		cleanup_px(dev, pdp);
		kfree(pdp);
	}
}

617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637
static void gen8_initialize_pdp(struct i915_address_space *vm,
				struct i915_page_directory_pointer *pdp)
{
	gen8_ppgtt_pdpe_t scratch_pdpe;

	scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);

	fill_px(vm->dev, pdp, scratch_pdpe);
}

static void gen8_initialize_pml4(struct i915_address_space *vm,
				 struct i915_pml4 *pml4)
{
	gen8_ppgtt_pml4e_t scratch_pml4e;

	scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
					  I915_CACHE_LLC);

	fill_px(vm->dev, pml4, scratch_pml4e);
}

638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664
static void
gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt,
			  struct i915_page_directory_pointer *pdp,
			  struct i915_page_directory *pd,
			  int index)
{
	gen8_ppgtt_pdpe_t *page_directorypo;

	if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
		return;

	page_directorypo = kmap_px(pdp);
	page_directorypo[index] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
	kunmap_px(ppgtt, page_directorypo);
}

static void
gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt,
				  struct i915_pml4 *pml4,
				  struct i915_page_directory_pointer *pdp,
				  int index)
{
	gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);

	WARN_ON(!USES_FULL_48BIT_PPGTT(ppgtt->base.dev));
	pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
	kunmap_px(ppgtt, pagemap);
665 666
}

667
/* Broadwell Page Directory Pointer Descriptors */
668
static int gen8_write_pdp(struct drm_i915_gem_request *req,
669 670
			  unsigned entry,
			  dma_addr_t addr)
671
{
672
	struct intel_ringbuffer *ring = req->ring;
673
	struct intel_engine_cs *engine = req->engine;
674 675 676 677
	int ret;

	BUG_ON(entry >= 4);

678
	ret = intel_ring_begin(req, 6);
679 680 681
	if (ret)
		return ret;

682 683 684 685 686 687 688
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
	intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, entry));
	intel_ring_emit(ring, upper_32_bits(addr));
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
	intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, entry));
	intel_ring_emit(ring, lower_32_bits(addr));
	intel_ring_advance(ring);
689 690 691 692

	return 0;
}

693 694
static int gen8_legacy_mm_switch(struct i915_hw_ppgtt *ppgtt,
				 struct drm_i915_gem_request *req)
695
{
696
	int i, ret;
697

698
	for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
699 700
		const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);

701
		ret = gen8_write_pdp(req, i, pd_daddr);
702 703
		if (ret)
			return ret;
704
	}
B
Ben Widawsky 已提交
705

706
	return 0;
707 708
}

709 710 711 712 713 714
static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
			      struct drm_i915_gem_request *req)
{
	return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
}

715 716 717 718 719
static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
				       struct i915_page_directory_pointer *pdp,
				       uint64_t start,
				       uint64_t length,
				       gen8_pte_t scratch_pte)
720
{
721
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
722
	gen8_pte_t *pt_vaddr;
723 724 725
	unsigned pdpe = gen8_pdpe_index(start);
	unsigned pde = gen8_pde_index(start);
	unsigned pte = gen8_pte_index(start);
726
	unsigned num_entries = length >> PAGE_SHIFT;
727 728
	unsigned last_pte, i;

729 730
	if (WARN_ON(!pdp))
		return;
731 732

	while (num_entries) {
733 734
		struct i915_page_directory *pd;
		struct i915_page_table *pt;
735

736
		if (WARN_ON(!pdp->page_directory[pdpe]))
737
			break;
738

739
		pd = pdp->page_directory[pdpe];
740 741

		if (WARN_ON(!pd->page_table[pde]))
742
			break;
743 744 745

		pt = pd->page_table[pde];

746
		if (WARN_ON(!px_page(pt)))
747
			break;
748

749
		last_pte = pte + num_entries;
750 751
		if (last_pte > GEN8_PTES)
			last_pte = GEN8_PTES;
752

753
		pt_vaddr = kmap_px(pt);
754

755
		for (i = pte; i < last_pte; i++) {
756
			pt_vaddr[i] = scratch_pte;
757 758
			num_entries--;
		}
759

760
		kunmap_px(ppgtt, pt_vaddr);
761

762
		pte = 0;
763
		if (++pde == I915_PDES) {
764 765
			if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
				break;
766 767
			pde = 0;
		}
768 769 770
	}
}

771 772 773 774
static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
				   uint64_t start,
				   uint64_t length,
				   bool use_scratch)
775
{
776
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
777 778 779
	gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
						 I915_CACHE_LLC, use_scratch);

780 781 782 783
	if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
		gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
					   scratch_pte);
	} else {
784
		uint64_t pml4e;
785 786
		struct i915_page_directory_pointer *pdp;

787
		gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
788 789 790 791
			gen8_ppgtt_clear_pte_range(vm, pdp, start, length,
						   scratch_pte);
		}
	}
792 793 794 795 796
}

static void
gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
			      struct i915_page_directory_pointer *pdp,
797
			      struct sg_page_iter *sg_iter,
798 799 800
			      uint64_t start,
			      enum i915_cache_level cache_level)
{
801
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
802
	gen8_pte_t *pt_vaddr;
803 804 805
	unsigned pdpe = gen8_pdpe_index(start);
	unsigned pde = gen8_pde_index(start);
	unsigned pte = gen8_pte_index(start);
806

807
	pt_vaddr = NULL;
808

809
	while (__sg_page_iter_next(sg_iter)) {
B
Ben Widawsky 已提交
810
		if (pt_vaddr == NULL) {
811
			struct i915_page_directory *pd = pdp->page_directory[pdpe];
812
			struct i915_page_table *pt = pd->page_table[pde];
813
			pt_vaddr = kmap_px(pt);
B
Ben Widawsky 已提交
814
		}
815

816
		pt_vaddr[pte] =
817
			gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
818
					cache_level, true);
819
		if (++pte == GEN8_PTES) {
820
			kunmap_px(ppgtt, pt_vaddr);
821
			pt_vaddr = NULL;
822
			if (++pde == I915_PDES) {
823 824
				if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
					break;
825 826 827
				pde = 0;
			}
			pte = 0;
828 829
		}
	}
830 831 832

	if (pt_vaddr)
		kunmap_px(ppgtt, pt_vaddr);
833 834
}

835 836 837 838 839 840
static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
				      struct sg_table *pages,
				      uint64_t start,
				      enum i915_cache_level cache_level,
				      u32 unused)
{
841
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
842
	struct sg_page_iter sg_iter;
843

844
	__sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
845 846 847 848 849 850

	if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
		gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
					      cache_level);
	} else {
		struct i915_page_directory_pointer *pdp;
851
		uint64_t pml4e;
852 853
		uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT;

854
		gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
855 856 857 858
			gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
						      start, cache_level);
		}
	}
859 860
}

861 862
static void gen8_free_page_tables(struct drm_device *dev,
				  struct i915_page_directory *pd)
863 864 865
{
	int i;

866
	if (!px_page(pd))
867 868
		return;

869
	for_each_set_bit(i, pd->used_pdes, I915_PDES) {
870 871
		if (WARN_ON(!pd->page_table[i]))
			continue;
872

873
		free_pt(dev, pd->page_table[i]);
874 875
		pd->page_table[i] = NULL;
	}
B
Ben Widawsky 已提交
876 877
}

878 879 880
static int gen8_init_scratch(struct i915_address_space *vm)
{
	struct drm_device *dev = vm->dev;
881
	int ret;
882 883 884 885 886 887 888

	vm->scratch_page = alloc_scratch_page(dev);
	if (IS_ERR(vm->scratch_page))
		return PTR_ERR(vm->scratch_page);

	vm->scratch_pt = alloc_pt(dev);
	if (IS_ERR(vm->scratch_pt)) {
889 890
		ret = PTR_ERR(vm->scratch_pt);
		goto free_scratch_page;
891 892 893 894
	}

	vm->scratch_pd = alloc_pd(dev);
	if (IS_ERR(vm->scratch_pd)) {
895 896
		ret = PTR_ERR(vm->scratch_pd);
		goto free_pt;
897 898
	}

899 900 901
	if (USES_FULL_48BIT_PPGTT(dev)) {
		vm->scratch_pdp = alloc_pdp(dev);
		if (IS_ERR(vm->scratch_pdp)) {
902 903
			ret = PTR_ERR(vm->scratch_pdp);
			goto free_pd;
904 905 906
		}
	}

907 908
	gen8_initialize_pt(vm, vm->scratch_pt);
	gen8_initialize_pd(vm, vm->scratch_pd);
909 910
	if (USES_FULL_48BIT_PPGTT(dev))
		gen8_initialize_pdp(vm, vm->scratch_pdp);
911 912

	return 0;
913 914 915 916 917 918 919 920 921

free_pd:
	free_pd(dev, vm->scratch_pd);
free_pt:
	free_pt(dev, vm->scratch_pt);
free_scratch_page:
	free_scratch_page(dev, vm->scratch_page);

	return ret;
922 923
}

924 925 926
static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
{
	enum vgt_g2v_type msg;
927
	struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
928 929
	int i;

930
	if (USES_FULL_48BIT_PPGTT(dev_priv)) {
931 932
		u64 daddr = px_dma(&ppgtt->pml4);

933 934
		I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
		I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
935 936 937 938 939 940 941

		msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
				VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
	} else {
		for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
			u64 daddr = i915_page_dir_dma_addr(ppgtt, i);

942 943
			I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
			I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
944 945 946 947 948 949 950 951 952 953 954
		}

		msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
				VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
	}

	I915_WRITE(vgtif_reg(g2v_notify), msg);

	return 0;
}

955 956 957 958
static void gen8_free_scratch(struct i915_address_space *vm)
{
	struct drm_device *dev = vm->dev;

959 960
	if (USES_FULL_48BIT_PPGTT(dev))
		free_pdp(dev, vm->scratch_pdp);
961 962 963 964 965
	free_pd(dev, vm->scratch_pd);
	free_pt(dev, vm->scratch_pt);
	free_scratch_page(dev, vm->scratch_page);
}

966 967
static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev,
				    struct i915_page_directory_pointer *pdp)
968 969 970
{
	int i;

971 972
	for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev)) {
		if (WARN_ON(!pdp->page_directory[i]))
973 974
			continue;

975 976
		gen8_free_page_tables(dev, pdp->page_directory[i]);
		free_pd(dev, pdp->page_directory[i]);
977
	}
978

979
	free_pdp(dev, pdp);
980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997
}

static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
{
	int i;

	for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) {
		if (WARN_ON(!ppgtt->pml4.pdps[i]))
			continue;

		gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, ppgtt->pml4.pdps[i]);
	}

	cleanup_px(ppgtt->base.dev, &ppgtt->pml4);
}

static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
{
998
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
999

1000
	if (intel_vgpu_active(to_i915(vm->dev)))
1001 1002
		gen8_ppgtt_notify_vgt(ppgtt, false);

1003 1004 1005 1006
	if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
		gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, &ppgtt->pdp);
	else
		gen8_ppgtt_cleanup_4lvl(ppgtt);
1007

1008
	gen8_free_scratch(vm);
1009 1010
}

1011 1012
/**
 * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range.
1013 1014
 * @vm:	Master vm structure.
 * @pd:	Page directory for this address range.
1015
 * @start:	Starting virtual address to begin allocations.
1016
 * @length:	Size of the allocations.
1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028
 * @new_pts:	Bitmap set by function with new allocations. Likely used by the
 *		caller to free on error.
 *
 * Allocate the required number of page tables. Extremely similar to
 * gen8_ppgtt_alloc_page_directories(). The main difference is here we are limited by
 * the page directory boundary (instead of the page directory pointer). That
 * boundary is 1GB virtual. Therefore, unlike gen8_ppgtt_alloc_page_directories(), it is
 * possible, and likely that the caller will need to use multiple calls of this
 * function to achieve the appropriate allocation.
 *
 * Return: 0 if success; negative error code otherwise.
 */
1029
static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
1030
				     struct i915_page_directory *pd,
1031
				     uint64_t start,
1032 1033
				     uint64_t length,
				     unsigned long *new_pts)
1034
{
1035
	struct drm_device *dev = vm->dev;
1036
	struct i915_page_table *pt;
1037
	uint32_t pde;
1038

1039
	gen8_for_each_pde(pt, pd, start, length, pde) {
1040
		/* Don't reallocate page tables */
1041
		if (test_bit(pde, pd->used_pdes)) {
1042
			/* Scratch is never allocated this way */
1043
			WARN_ON(pt == vm->scratch_pt);
1044 1045 1046
			continue;
		}

1047
		pt = alloc_pt(dev);
1048
		if (IS_ERR(pt))
1049 1050
			goto unwind_out;

1051
		gen8_initialize_pt(vm, pt);
1052
		pd->page_table[pde] = pt;
1053
		__set_bit(pde, new_pts);
1054
		trace_i915_page_table_entry_alloc(vm, pde, start, GEN8_PDE_SHIFT);
1055 1056
	}

1057
	return 0;
1058 1059

unwind_out:
1060
	for_each_set_bit(pde, new_pts, I915_PDES)
1061
		free_pt(dev, pd->page_table[pde]);
1062

B
Ben Widawsky 已提交
1063
	return -ENOMEM;
1064 1065
}

1066 1067
/**
 * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range.
1068
 * @vm:	Master vm structure.
1069 1070
 * @pdp:	Page directory pointer for this address range.
 * @start:	Starting virtual address to begin allocations.
1071 1072
 * @length:	Size of the allocations.
 * @new_pds:	Bitmap set by function with new allocations. Likely used by the
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
 *		caller to free on error.
 *
 * Allocate the required number of page directories starting at the pde index of
 * @start, and ending at the pde index @start + @length. This function will skip
 * over already allocated page directories within the range, and only allocate
 * new ones, setting the appropriate pointer within the pdp as well as the
 * correct position in the bitmap @new_pds.
 *
 * The function will only allocate the pages within the range for a give page
 * directory pointer. In other words, if @start + @length straddles a virtually
 * addressed PDP boundary (512GB for 4k pages), there will be more allocations
 * required by the caller, This is not currently possible, and the BUG in the
 * code will prevent it.
 *
 * Return: 0 if success; negative error code otherwise.
 */
1089 1090 1091 1092 1093 1094
static int
gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
				  struct i915_page_directory_pointer *pdp,
				  uint64_t start,
				  uint64_t length,
				  unsigned long *new_pds)
1095
{
1096
	struct drm_device *dev = vm->dev;
1097
	struct i915_page_directory *pd;
1098
	uint32_t pdpe;
1099
	uint32_t pdpes = I915_PDPES_PER_PDP(dev);
1100

1101
	WARN_ON(!bitmap_empty(new_pds, pdpes));
1102

1103
	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1104
		if (test_bit(pdpe, pdp->used_pdpes))
1105
			continue;
1106

1107
		pd = alloc_pd(dev);
1108
		if (IS_ERR(pd))
B
Ben Widawsky 已提交
1109
			goto unwind_out;
1110

1111
		gen8_initialize_pd(vm, pd);
1112
		pdp->page_directory[pdpe] = pd;
1113
		__set_bit(pdpe, new_pds);
1114
		trace_i915_page_directory_entry_alloc(vm, pdpe, start, GEN8_PDPE_SHIFT);
B
Ben Widawsky 已提交
1115 1116
	}

1117
	return 0;
B
Ben Widawsky 已提交
1118 1119

unwind_out:
1120
	for_each_set_bit(pdpe, new_pds, pdpes)
1121
		free_pd(dev, pdp->page_directory[pdpe]);
B
Ben Widawsky 已提交
1122 1123

	return -ENOMEM;
1124 1125
}

1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
/**
 * gen8_ppgtt_alloc_page_dirpointers() - Allocate pdps for VA range.
 * @vm:	Master vm structure.
 * @pml4:	Page map level 4 for this address range.
 * @start:	Starting virtual address to begin allocations.
 * @length:	Size of the allocations.
 * @new_pdps:	Bitmap set by function with new allocations. Likely used by the
 *		caller to free on error.
 *
 * Allocate the required number of page directory pointers. Extremely similar to
 * gen8_ppgtt_alloc_page_directories() and gen8_ppgtt_alloc_pagetabs().
 * The main difference is here we are limited by the pml4 boundary (instead of
 * the page directory pointer).
 *
 * Return: 0 if success; negative error code otherwise.
 */
static int
gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
				  struct i915_pml4 *pml4,
				  uint64_t start,
				  uint64_t length,
				  unsigned long *new_pdps)
{
	struct drm_device *dev = vm->dev;
	struct i915_page_directory_pointer *pdp;
	uint32_t pml4e;

	WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4));

1155
	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1156 1157 1158 1159 1160
		if (!test_bit(pml4e, pml4->used_pml4es)) {
			pdp = alloc_pdp(dev);
			if (IS_ERR(pdp))
				goto unwind_out;

1161
			gen8_initialize_pdp(vm, pdp);
1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179
			pml4->pdps[pml4e] = pdp;
			__set_bit(pml4e, new_pdps);
			trace_i915_page_directory_pointer_entry_alloc(vm,
								      pml4e,
								      start,
								      GEN8_PML4E_SHIFT);
		}
	}

	return 0;

unwind_out:
	for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
		free_pdp(dev, pml4->pdps[pml4e]);

	return -ENOMEM;
}

1180
static void
1181
free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long *new_pts)
1182 1183 1184 1185 1186 1187 1188 1189 1190 1191
{
	kfree(new_pts);
	kfree(new_pds);
}

/* Fills in the page directory bitmap, and the array of page tables bitmap. Both
 * of these are based on the number of PDPEs in the system.
 */
static
int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds,
1192
					 unsigned long **new_pts,
1193
					 uint32_t pdpes)
1194 1195
{
	unsigned long *pds;
1196
	unsigned long *pts;
1197

1198
	pds = kcalloc(BITS_TO_LONGS(pdpes), sizeof(unsigned long), GFP_TEMPORARY);
1199 1200 1201
	if (!pds)
		return -ENOMEM;

1202 1203 1204 1205
	pts = kcalloc(pdpes, BITS_TO_LONGS(I915_PDES) * sizeof(unsigned long),
		      GFP_TEMPORARY);
	if (!pts)
		goto err_out;
1206 1207 1208 1209 1210 1211 1212

	*new_pds = pds;
	*new_pts = pts;

	return 0;

err_out:
1213
	free_gen8_temp_bitmaps(pds, pts);
1214 1215 1216
	return -ENOMEM;
}

1217 1218 1219 1220 1221 1222 1223 1224 1225 1226
/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
 * the page table structures, we mark them dirty so that
 * context switching/execlist queuing code takes extra steps
 * to ensure that tlbs are flushed.
 */
static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
{
	ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
}

1227 1228 1229 1230
static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
				    struct i915_page_directory_pointer *pdp,
				    uint64_t start,
				    uint64_t length)
1231
{
1232
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1233
	unsigned long *new_page_dirs, *new_page_tables;
1234
	struct drm_device *dev = vm->dev;
1235
	struct i915_page_directory *pd;
1236 1237
	const uint64_t orig_start = start;
	const uint64_t orig_length = length;
1238
	uint32_t pdpe;
1239
	uint32_t pdpes = I915_PDPES_PER_PDP(dev);
1240 1241
	int ret;

1242 1243 1244 1245
	/* Wrap is never okay since we can only represent 48b, and we don't
	 * actually use the other side of the canonical address space.
	 */
	if (WARN_ON(start + length < start))
1246 1247
		return -ENODEV;

1248
	if (WARN_ON(start + length > vm->total))
1249
		return -ENODEV;
1250

1251
	ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
1252 1253 1254
	if (ret)
		return ret;

1255
	/* Do the allocations first so we can easily bail out */
1256 1257
	ret = gen8_ppgtt_alloc_page_directories(vm, pdp, start, length,
						new_page_dirs);
1258
	if (ret) {
1259
		free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1260 1261 1262 1263
		return ret;
	}

	/* For every page directory referenced, allocate page tables */
1264
	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1265
		ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length,
1266
						new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES));
1267 1268 1269 1270
		if (ret)
			goto err_out;
	}

1271 1272 1273
	start = orig_start;
	length = orig_length;

1274 1275
	/* Allocations have completed successfully, so set the bitmaps, and do
	 * the mappings. */
1276
	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1277
		gen8_pde_t *const page_directory = kmap_px(pd);
1278
		struct i915_page_table *pt;
1279
		uint64_t pd_len = length;
1280 1281 1282
		uint64_t pd_start = start;
		uint32_t pde;

1283 1284 1285
		/* Every pd should be allocated, we just did that above. */
		WARN_ON(!pd);

1286
		gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297
			/* Same reasoning as pd */
			WARN_ON(!pt);
			WARN_ON(!pd_len);
			WARN_ON(!gen8_pte_count(pd_start, pd_len));

			/* Set our used ptes within the page table */
			bitmap_set(pt->used_ptes,
				   gen8_pte_index(pd_start),
				   gen8_pte_count(pd_start, pd_len));

			/* Our pde is now pointing to the pagetable, pt */
1298
			__set_bit(pde, pd->used_pdes);
1299 1300

			/* Map the PDE to the page table */
1301 1302
			page_directory[pde] = gen8_pde_encode(px_dma(pt),
							      I915_CACHE_LLC);
1303 1304 1305 1306
			trace_i915_page_table_entry_map(&ppgtt->base, pde, pt,
							gen8_pte_index(start),
							gen8_pte_count(start, length),
							GEN8_PTES);
1307 1308 1309

			/* NB: We haven't yet mapped ptes to pages. At this
			 * point we're still relying on insert_entries() */
1310
		}
1311

1312
		kunmap_px(ppgtt, page_directory);
1313
		__set_bit(pdpe, pdp->used_pdpes);
1314
		gen8_setup_page_directory(ppgtt, pdp, pd, pdpe);
1315 1316
	}

1317
	free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1318
	mark_tlbs_dirty(ppgtt);
B
Ben Widawsky 已提交
1319
	return 0;
1320

B
Ben Widawsky 已提交
1321
err_out:
1322
	while (pdpe--) {
1323 1324
		unsigned long temp;

1325 1326
		for_each_set_bit(temp, new_page_tables + pdpe *
				BITS_TO_LONGS(I915_PDES), I915_PDES)
1327
			free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]);
1328 1329
	}

1330
	for_each_set_bit(pdpe, new_page_dirs, pdpes)
1331
		free_pd(dev, pdp->page_directory[pdpe]);
1332

1333
	free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1334
	mark_tlbs_dirty(ppgtt);
1335 1336 1337
	return ret;
}

1338 1339 1340 1341 1342 1343
static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
				    struct i915_pml4 *pml4,
				    uint64_t start,
				    uint64_t length)
{
	DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4);
1344
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1345
	struct i915_page_directory_pointer *pdp;
1346
	uint64_t pml4e;
1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364
	int ret = 0;

	/* Do the pml4 allocations first, so we don't need to track the newly
	 * allocated tables below the pdp */
	bitmap_zero(new_pdps, GEN8_PML4ES_PER_PML4);

	/* The pagedirectory and pagetable allocations are done in the shared 3
	 * and 4 level code. Just allocate the pdps.
	 */
	ret = gen8_ppgtt_alloc_page_dirpointers(vm, pml4, start, length,
						new_pdps);
	if (ret)
		return ret;

	WARN(bitmap_weight(new_pdps, GEN8_PML4ES_PER_PML4) > 2,
	     "The allocation has spanned more than 512GB. "
	     "It is highly likely this is incorrect.");

1365
	gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389
		WARN_ON(!pdp);

		ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length);
		if (ret)
			goto err_out;

		gen8_setup_page_directory_pointer(ppgtt, pml4, pdp, pml4e);
	}

	bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es,
		  GEN8_PML4ES_PER_PML4);

	return 0;

err_out:
	for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
		gen8_ppgtt_cleanup_3lvl(vm->dev, pml4->pdps[pml4e]);

	return ret;
}

static int gen8_alloc_va_range(struct i915_address_space *vm,
			       uint64_t start, uint64_t length)
{
1390
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1391 1392 1393 1394 1395 1396 1397

	if (USES_FULL_48BIT_PPGTT(vm->dev))
		return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
	else
		return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
}

1398 1399 1400 1401 1402 1403 1404 1405
static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp,
			  uint64_t start, uint64_t length,
			  gen8_pte_t scratch_pte,
			  struct seq_file *m)
{
	struct i915_page_directory *pd;
	uint32_t pdpe;

1406
	gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1407 1408 1409 1410 1411 1412 1413 1414 1415
		struct i915_page_table *pt;
		uint64_t pd_len = length;
		uint64_t pd_start = start;
		uint32_t pde;

		if (!test_bit(pdpe, pdp->used_pdpes))
			continue;

		seq_printf(m, "\tPDPE #%d\n", pdpe);
1416
		gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465
			uint32_t  pte;
			gen8_pte_t *pt_vaddr;

			if (!test_bit(pde, pd->used_pdes))
				continue;

			pt_vaddr = kmap_px(pt);
			for (pte = 0; pte < GEN8_PTES; pte += 4) {
				uint64_t va =
					(pdpe << GEN8_PDPE_SHIFT) |
					(pde << GEN8_PDE_SHIFT) |
					(pte << GEN8_PTE_SHIFT);
				int i;
				bool found = false;

				for (i = 0; i < 4; i++)
					if (pt_vaddr[pte + i] != scratch_pte)
						found = true;
				if (!found)
					continue;

				seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
				for (i = 0; i < 4; i++) {
					if (pt_vaddr[pte + i] != scratch_pte)
						seq_printf(m, " %llx", pt_vaddr[pte + i]);
					else
						seq_puts(m, "  SCRATCH ");
				}
				seq_puts(m, "\n");
			}
			/* don't use kunmap_px, it could trigger
			 * an unnecessary flush.
			 */
			kunmap_atomic(pt_vaddr);
		}
	}
}

static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
	struct i915_address_space *vm = &ppgtt->base;
	uint64_t start = ppgtt->base.start;
	uint64_t length = ppgtt->base.total;
	gen8_pte_t scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
						 I915_CACHE_LLC, true);

	if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
		gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
	} else {
1466
		uint64_t pml4e;
1467 1468 1469
		struct i915_pml4 *pml4 = &ppgtt->pml4;
		struct i915_page_directory_pointer *pdp;

1470
		gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
1471 1472 1473 1474 1475 1476 1477 1478 1479
			if (!test_bit(pml4e, pml4->used_pml4es))
				continue;

			seq_printf(m, "    PML4E #%llu\n", pml4e);
			gen8_dump_pdp(pdp, start, length, scratch_pte, m);
		}
	}
}

1480 1481
static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
{
1482
	unsigned long *new_page_dirs, *new_page_tables;
1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501
	uint32_t pdpes = I915_PDPES_PER_PDP(dev);
	int ret;

	/* We allocate temp bitmap for page tables for no gain
	 * but as this is for init only, lets keep the things simple
	 */
	ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
	if (ret)
		return ret;

	/* Allocate for all pdps regardless of how the ppgtt
	 * was defined.
	 */
	ret = gen8_ppgtt_alloc_page_directories(&ppgtt->base, &ppgtt->pdp,
						0, 1ULL << 32,
						new_page_dirs);
	if (!ret)
		*ppgtt->pdp.used_pdpes = *new_page_dirs;

1502
	free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
1503 1504 1505 1506

	return ret;
}

1507
/*
1508 1509 1510 1511
 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
 * with a net effect resembling a 2-level page table in normal x86 terms. Each
 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
 * space.
B
Ben Widawsky 已提交
1512
 *
1513
 */
1514
static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
B
Ben Widawsky 已提交
1515
{
1516
	int ret;
1517

1518 1519 1520
	ret = gen8_init_scratch(&ppgtt->base);
	if (ret)
		return ret;
1521

1522 1523
	ppgtt->base.start = 0;
	ppgtt->base.cleanup = gen8_ppgtt_cleanup;
1524
	ppgtt->base.allocate_va_range = gen8_alloc_va_range;
1525
	ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
1526
	ppgtt->base.clear_range = gen8_ppgtt_clear_range;
1527 1528
	ppgtt->base.unbind_vma = ppgtt_unbind_vma;
	ppgtt->base.bind_vma = ppgtt_bind_vma;
1529
	ppgtt->debug_dump = gen8_dump_ppgtt;
1530

1531 1532 1533 1534
	if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
		ret = setup_px(ppgtt->base.dev, &ppgtt->pml4);
		if (ret)
			goto free_scratch;
1535

1536 1537
		gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);

1538
		ppgtt->base.total = 1ULL << 48;
1539
		ppgtt->switch_mm = gen8_48b_mm_switch;
1540
	} else {
1541
		ret = __pdp_init(ppgtt->base.dev, &ppgtt->pdp);
1542 1543 1544 1545
		if (ret)
			goto free_scratch;

		ppgtt->base.total = 1ULL << 32;
1546
		ppgtt->switch_mm = gen8_legacy_mm_switch;
1547 1548 1549
		trace_i915_page_directory_pointer_entry_alloc(&ppgtt->base,
							      0, 0,
							      GEN8_PML4E_SHIFT);
1550

1551
		if (intel_vgpu_active(to_i915(ppgtt->base.dev))) {
1552 1553 1554 1555
			ret = gen8_preallocate_top_level_pdps(ppgtt);
			if (ret)
				goto free_scratch;
		}
1556
	}
1557

1558
	if (intel_vgpu_active(to_i915(ppgtt->base.dev)))
1559 1560
		gen8_ppgtt_notify_vgt(ppgtt, true);

1561
	return 0;
1562 1563 1564 1565

free_scratch:
	gen8_free_scratch(&ppgtt->base);
	return ret;
1566 1567
}

B
Ben Widawsky 已提交
1568 1569 1570
static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
{
	struct i915_address_space *vm = &ppgtt->base;
1571
	struct i915_page_table *unused;
1572
	gen6_pte_t scratch_pte;
B
Ben Widawsky 已提交
1573
	uint32_t pd_entry;
1574
	uint32_t  pte, pde;
1575
	uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
B
Ben Widawsky 已提交
1576

1577 1578
	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
				     I915_CACHE_LLC, true, 0);
B
Ben Widawsky 已提交
1579

1580
	gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
B
Ben Widawsky 已提交
1581
		u32 expected;
1582
		gen6_pte_t *pt_vaddr;
1583
		const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
1584
		pd_entry = readl(ppgtt->pd_addr + pde);
B
Ben Widawsky 已提交
1585 1586 1587 1588 1589 1590 1591 1592 1593
		expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);

		if (pd_entry != expected)
			seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
				   pde,
				   pd_entry,
				   expected);
		seq_printf(m, "\tPDE: %x\n", pd_entry);

1594 1595
		pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]);

1596
		for (pte = 0; pte < GEN6_PTES; pte+=4) {
B
Ben Widawsky 已提交
1597
			unsigned long va =
1598
				(pde * PAGE_SIZE * GEN6_PTES) +
B
Ben Widawsky 已提交
1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616
				(pte * PAGE_SIZE);
			int i;
			bool found = false;
			for (i = 0; i < 4; i++)
				if (pt_vaddr[pte + i] != scratch_pte)
					found = true;
			if (!found)
				continue;

			seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
			for (i = 0; i < 4; i++) {
				if (pt_vaddr[pte + i] != scratch_pte)
					seq_printf(m, " %08x", pt_vaddr[pte + i]);
				else
					seq_puts(m, "  SCRATCH ");
			}
			seq_puts(m, "\n");
		}
1617
		kunmap_px(ppgtt, pt_vaddr);
B
Ben Widawsky 已提交
1618 1619 1620
	}
}

1621
/* Write pde (index) from the page directory @pd to the page table @pt */
1622 1623
static void gen6_write_pde(struct i915_page_directory *pd,
			    const int pde, struct i915_page_table *pt)
B
Ben Widawsky 已提交
1624
{
1625 1626 1627 1628
	/* Caller needs to make sure the write completes if necessary */
	struct i915_hw_ppgtt *ppgtt =
		container_of(pd, struct i915_hw_ppgtt, pd);
	u32 pd_entry;
B
Ben Widawsky 已提交
1629

1630
	pd_entry = GEN6_PDE_ADDR_ENCODE(px_dma(pt));
1631
	pd_entry |= GEN6_PDE_VALID;
B
Ben Widawsky 已提交
1632

1633 1634
	writel(pd_entry, ppgtt->pd_addr + pde);
}
B
Ben Widawsky 已提交
1635

1636 1637 1638
/* Write all the page tables found in the ppgtt structure to incrementing page
 * directories. */
static void gen6_write_page_range(struct drm_i915_private *dev_priv,
1639
				  struct i915_page_directory *pd,
1640 1641
				  uint32_t start, uint32_t length)
{
1642
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
1643
	struct i915_page_table *pt;
1644
	uint32_t pde;
1645

1646
	gen6_for_each_pde(pt, pd, start, length, pde)
1647 1648 1649 1650
		gen6_write_pde(pd, pde, pt);

	/* Make sure write is complete before other code can use this page
	 * table. Also require for WC mapped PTEs */
1651
	readl(ggtt->gsm);
B
Ben Widawsky 已提交
1652 1653
}

1654
static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
B
Ben Widawsky 已提交
1655
{
1656
	BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
1657

1658
	return (ppgtt->pd.base.ggtt_offset / 64) << 16;
1659 1660
}

1661
static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
1662
			 struct drm_i915_gem_request *req)
1663
{
1664
	struct intel_ringbuffer *ring = req->ring;
1665
	struct intel_engine_cs *engine = req->engine;
1666 1667 1668
	int ret;

	/* NB: TLBs must be flushed and invalidated before a switch */
1669
	ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
1670 1671 1672
	if (ret)
		return ret;

1673
	ret = intel_ring_begin(req, 6);
1674 1675 1676
	if (ret)
		return ret;

1677 1678 1679 1680 1681 1682 1683
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
	intel_ring_emit(ring, PP_DIR_DCLV_2G);
	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
	intel_ring_emit(ring, get_pd_offset(ppgtt));
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_advance(ring);
1684 1685 1686 1687

	return 0;
}

1688
static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
1689
			  struct drm_i915_gem_request *req)
1690
{
1691
	struct intel_ringbuffer *ring = req->ring;
1692
	struct intel_engine_cs *engine = req->engine;
1693 1694 1695
	int ret;

	/* NB: TLBs must be flushed and invalidated before a switch */
1696
	ret = engine->flush(req, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
1697 1698 1699
	if (ret)
		return ret;

1700
	ret = intel_ring_begin(req, 6);
1701 1702 1703
	if (ret)
		return ret;

1704 1705 1706 1707 1708 1709 1710
	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
	intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
	intel_ring_emit(ring, PP_DIR_DCLV_2G);
	intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
	intel_ring_emit(ring, get_pd_offset(ppgtt));
	intel_ring_emit(ring, MI_NOOP);
	intel_ring_advance(ring);
1711

1712
	/* XXX: RCS is the only one to auto invalidate the TLBs? */
1713
	if (engine->id != RCS) {
1714 1715
		ret = engine->flush(req,
				    I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
1716 1717 1718 1719
		if (ret)
			return ret;
	}

1720 1721 1722
	return 0;
}

1723
static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
1724
			  struct drm_i915_gem_request *req)
1725
{
1726
	struct intel_engine_cs *engine = req->engine;
1727
	struct drm_i915_private *dev_priv = req->i915;
1728

1729 1730
	I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
	I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
1731 1732 1733
	return 0;
}

1734
static void gen8_ppgtt_enable(struct drm_device *dev)
1735
{
1736
	struct drm_i915_private *dev_priv = to_i915(dev);
1737
	struct intel_engine_cs *engine;
B
Ben Widawsky 已提交
1738

1739
	for_each_engine(engine, dev_priv) {
1740
		u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
1741
		I915_WRITE(RING_MODE_GEN7(engine),
1742
			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
1743 1744
	}
}
B
Ben Widawsky 已提交
1745

1746
static void gen7_ppgtt_enable(struct drm_device *dev)
B
Ben Widawsky 已提交
1747
{
1748
	struct drm_i915_private *dev_priv = to_i915(dev);
1749
	struct intel_engine_cs *engine;
1750
	uint32_t ecochk, ecobits;
B
Ben Widawsky 已提交
1751

1752 1753
	ecobits = I915_READ(GAC_ECO_BITS);
	I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
1754

1755 1756 1757 1758 1759 1760 1761 1762
	ecochk = I915_READ(GAM_ECOCHK);
	if (IS_HASWELL(dev)) {
		ecochk |= ECOCHK_PPGTT_WB_HSW;
	} else {
		ecochk |= ECOCHK_PPGTT_LLC_IVB;
		ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
	}
	I915_WRITE(GAM_ECOCHK, ecochk);
1763

1764
	for_each_engine(engine, dev_priv) {
B
Ben Widawsky 已提交
1765
		/* GFX_MODE is per-ring on gen7+ */
1766
		I915_WRITE(RING_MODE_GEN7(engine),
1767
			   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
B
Ben Widawsky 已提交
1768
	}
1769
}
B
Ben Widawsky 已提交
1770

1771
static void gen6_ppgtt_enable(struct drm_device *dev)
1772
{
1773
	struct drm_i915_private *dev_priv = to_i915(dev);
1774
	uint32_t ecochk, gab_ctl, ecobits;
1775

1776 1777 1778
	ecobits = I915_READ(GAC_ECO_BITS);
	I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
		   ECOBITS_PPGTT_CACHE64B);
B
Ben Widawsky 已提交
1779

1780 1781 1782 1783 1784 1785 1786
	gab_ctl = I915_READ(GAB_CTL);
	I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);

	ecochk = I915_READ(GAM_ECOCHK);
	I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);

	I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
B
Ben Widawsky 已提交
1787 1788
}

1789
/* PPGTT support for Sandybdrige/Gen6 and later */
1790
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
1791 1792
				   uint64_t start,
				   uint64_t length,
1793
				   bool use_scratch)
1794
{
1795
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1796
	gen6_pte_t *pt_vaddr, scratch_pte;
1797 1798
	unsigned first_entry = start >> PAGE_SHIFT;
	unsigned num_entries = length >> PAGE_SHIFT;
1799 1800
	unsigned act_pt = first_entry / GEN6_PTES;
	unsigned first_pte = first_entry % GEN6_PTES;
1801
	unsigned last_pte, i;
1802

1803 1804
	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
				     I915_CACHE_LLC, true, 0);
1805

1806 1807
	while (num_entries) {
		last_pte = first_pte + num_entries;
1808 1809
		if (last_pte > GEN6_PTES)
			last_pte = GEN6_PTES;
1810

1811
		pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
1812

1813 1814
		for (i = first_pte; i < last_pte; i++)
			pt_vaddr[i] = scratch_pte;
1815

1816
		kunmap_px(ppgtt, pt_vaddr);
1817

1818 1819
		num_entries -= last_pte - first_pte;
		first_pte = 0;
1820
		act_pt++;
1821
	}
1822 1823
}

1824
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
D
Daniel Vetter 已提交
1825
				      struct sg_table *pages,
1826
				      uint64_t start,
1827
				      enum i915_cache_level cache_level, u32 flags)
D
Daniel Vetter 已提交
1828
{
1829
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1830
	unsigned first_entry = start >> PAGE_SHIFT;
1831 1832
	unsigned act_pt = first_entry / GEN6_PTES;
	unsigned act_pte = first_entry % GEN6_PTES;
1833 1834 1835
	gen6_pte_t *pt_vaddr = NULL;
	struct sgt_iter sgt_iter;
	dma_addr_t addr;
1836

1837
	for_each_sgt_dma(addr, sgt_iter, pages) {
1838
		if (pt_vaddr == NULL)
1839
			pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
1840

1841
		pt_vaddr[act_pte] =
1842
			vm->pte_encode(addr, cache_level, true, flags);
1843

1844
		if (++act_pte == GEN6_PTES) {
1845
			kunmap_px(ppgtt, pt_vaddr);
1846
			pt_vaddr = NULL;
1847
			act_pt++;
1848
			act_pte = 0;
D
Daniel Vetter 已提交
1849 1850
		}
	}
1851

1852
	if (pt_vaddr)
1853
		kunmap_px(ppgtt, pt_vaddr);
D
Daniel Vetter 已提交
1854 1855
}

1856
static int gen6_alloc_va_range(struct i915_address_space *vm,
1857
			       uint64_t start_in, uint64_t length_in)
1858
{
1859 1860
	DECLARE_BITMAP(new_page_tables, I915_PDES);
	struct drm_device *dev = vm->dev;
1861 1862
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
1863
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1864
	struct i915_page_table *pt;
1865
	uint32_t start, length, start_save, length_save;
1866
	uint32_t pde;
1867 1868
	int ret;

1869 1870 1871 1872 1873
	if (WARN_ON(start_in + length_in > ppgtt->base.total))
		return -ENODEV;

	start = start_save = start_in;
	length = length_save = length_in;
1874 1875 1876 1877 1878 1879 1880 1881

	bitmap_zero(new_page_tables, I915_PDES);

	/* The allocation is done in two stages so that we can bail out with
	 * minimal amount of pain. The first stage finds new page tables that
	 * need allocation. The second stage marks use ptes within the page
	 * tables.
	 */
1882
	gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
1883
		if (pt != vm->scratch_pt) {
1884 1885 1886 1887 1888 1889 1890
			WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
			continue;
		}

		/* We've already allocated a page table */
		WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));

1891
		pt = alloc_pt(dev);
1892 1893 1894 1895 1896 1897 1898 1899
		if (IS_ERR(pt)) {
			ret = PTR_ERR(pt);
			goto unwind_out;
		}

		gen6_initialize_pt(vm, pt);

		ppgtt->pd.page_table[pde] = pt;
1900
		__set_bit(pde, new_page_tables);
1901
		trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT);
1902 1903 1904 1905
	}

	start = start_save;
	length = length_save;
1906

1907
	gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
1908 1909 1910 1911 1912 1913
		DECLARE_BITMAP(tmp_bitmap, GEN6_PTES);

		bitmap_zero(tmp_bitmap, GEN6_PTES);
		bitmap_set(tmp_bitmap, gen6_pte_index(start),
			   gen6_pte_count(start, length));

1914
		if (__test_and_clear_bit(pde, new_page_tables))
1915 1916
			gen6_write_pde(&ppgtt->pd, pde, pt);

1917 1918 1919 1920
		trace_i915_page_table_entry_map(vm, pde, pt,
					 gen6_pte_index(start),
					 gen6_pte_count(start, length),
					 GEN6_PTES);
1921
		bitmap_or(pt->used_ptes, tmp_bitmap, pt->used_ptes,
1922 1923 1924
				GEN6_PTES);
	}

1925 1926 1927 1928
	WARN_ON(!bitmap_empty(new_page_tables, I915_PDES));

	/* Make sure write is complete before other code can use this page
	 * table. Also require for WC mapped PTEs */
1929
	readl(ggtt->gsm);
1930

1931
	mark_tlbs_dirty(ppgtt);
1932
	return 0;
1933 1934 1935

unwind_out:
	for_each_set_bit(pde, new_page_tables, I915_PDES) {
1936
		struct i915_page_table *pt = ppgtt->pd.page_table[pde];
1937

1938
		ppgtt->pd.page_table[pde] = vm->scratch_pt;
1939
		free_pt(vm->dev, pt);
1940 1941 1942 1943
	}

	mark_tlbs_dirty(ppgtt);
	return ret;
1944 1945
}

1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972
static int gen6_init_scratch(struct i915_address_space *vm)
{
	struct drm_device *dev = vm->dev;

	vm->scratch_page = alloc_scratch_page(dev);
	if (IS_ERR(vm->scratch_page))
		return PTR_ERR(vm->scratch_page);

	vm->scratch_pt = alloc_pt(dev);
	if (IS_ERR(vm->scratch_pt)) {
		free_scratch_page(dev, vm->scratch_page);
		return PTR_ERR(vm->scratch_pt);
	}

	gen6_initialize_pt(vm, vm->scratch_pt);

	return 0;
}

static void gen6_free_scratch(struct i915_address_space *vm)
{
	struct drm_device *dev = vm->dev;

	free_pt(dev, vm->scratch_pt);
	free_scratch_page(dev, vm->scratch_page);
}

1973
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
1974
{
1975
	struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1976 1977
	struct i915_page_directory *pd = &ppgtt->pd;
	struct drm_device *dev = vm->dev;
1978 1979
	struct i915_page_table *pt;
	uint32_t pde;
1980

1981 1982
	drm_mm_remove_node(&ppgtt->node);

1983
	gen6_for_all_pdes(pt, pd, pde)
1984
		if (pt != vm->scratch_pt)
1985
			free_pt(dev, pt);
1986

1987
	gen6_free_scratch(vm);
1988 1989
}

1990
static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
1991
{
1992
	struct i915_address_space *vm = &ppgtt->base;
1993
	struct drm_device *dev = ppgtt->base.dev;
1994 1995
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
1996
	bool retried = false;
1997
	int ret;
1998

B
Ben Widawsky 已提交
1999 2000 2001 2002
	/* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
	 * allocator works in address space sizes, so it's multiplied by page
	 * size. We allocate at the top of the GTT to avoid fragmentation.
	 */
2003
	BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
2004

2005 2006 2007
	ret = gen6_init_scratch(vm);
	if (ret)
		return ret;
2008

2009
alloc:
2010
	ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
B
Ben Widawsky 已提交
2011 2012
						  &ppgtt->node, GEN6_PD_SIZE,
						  GEN6_PD_ALIGN, 0,
2013
						  0, ggtt->base.total,
2014
						  DRM_MM_TOPDOWN);
2015
	if (ret == -ENOSPC && !retried) {
2016
		ret = i915_gem_evict_something(dev, &ggtt->base,
2017
					       GEN6_PD_SIZE, GEN6_PD_ALIGN,
2018
					       I915_CACHE_NONE,
2019
					       0, ggtt->base.total,
2020
					       0);
2021
		if (ret)
2022
			goto err_out;
2023 2024 2025 2026

		retried = true;
		goto alloc;
	}
B
Ben Widawsky 已提交
2027

2028
	if (ret)
2029 2030
		goto err_out;

2031

2032
	if (ppgtt->node.start < ggtt->mappable_end)
B
Ben Widawsky 已提交
2033
		DRM_DEBUG("Forced to use aperture for PDEs\n");
2034

2035
	return 0;
2036 2037

err_out:
2038
	gen6_free_scratch(vm);
2039
	return ret;
2040 2041 2042 2043
}

static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
{
2044
	return gen6_ppgtt_allocate_page_directories(ppgtt);
2045
}
2046

2047 2048 2049
static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
				  uint64_t start, uint64_t length)
{
2050
	struct i915_page_table *unused;
2051
	uint32_t pde;
2052

2053
	gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
2054
		ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
2055 2056
}

2057
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
2058 2059
{
	struct drm_device *dev = ppgtt->base.dev;
2060 2061
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2062 2063
	int ret;

2064
	ppgtt->base.pte_encode = ggtt->base.pte_encode;
2065
	if (intel_vgpu_active(dev_priv) || IS_GEN6(dev))
2066
		ppgtt->switch_mm = gen6_mm_switch;
2067
	else if (IS_HASWELL(dev))
2068
		ppgtt->switch_mm = hsw_mm_switch;
2069
	else if (IS_GEN7(dev))
2070
		ppgtt->switch_mm = gen7_mm_switch;
2071
	else
2072 2073 2074 2075 2076 2077
		BUG();

	ret = gen6_ppgtt_alloc(ppgtt);
	if (ret)
		return ret;

2078
	ppgtt->base.allocate_va_range = gen6_alloc_va_range;
2079 2080
	ppgtt->base.clear_range = gen6_ppgtt_clear_range;
	ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
2081 2082
	ppgtt->base.unbind_vma = ppgtt_unbind_vma;
	ppgtt->base.bind_vma = ppgtt_bind_vma;
2083 2084
	ppgtt->base.cleanup = gen6_ppgtt_cleanup;
	ppgtt->base.start = 0;
2085
	ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
B
Ben Widawsky 已提交
2086
	ppgtt->debug_dump = gen6_dump_ppgtt;
2087

2088
	ppgtt->pd.base.ggtt_offset =
2089
		ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
2090

2091
	ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
2092
		ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
2093

2094
	gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
2095

2096 2097
	gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total);

2098
	DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
2099 2100
			 ppgtt->node.size >> 20,
			 ppgtt->node.start / PAGE_SIZE);
2101

2102
	DRM_DEBUG("Adding PPGTT at offset %x\n",
2103
		  ppgtt->pd.base.ggtt_offset << 10);
2104

2105
	return 0;
2106 2107
}

2108
static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
2109
{
2110
	ppgtt->base.dev = dev;
2111

B
Ben Widawsky 已提交
2112
	if (INTEL_INFO(dev)->gen < 8)
2113
		return gen6_ppgtt_init(ppgtt);
B
Ben Widawsky 已提交
2114
	else
2115
		return gen8_ppgtt_init(ppgtt);
2116
}
2117

2118 2119 2120 2121
static void i915_address_space_init(struct i915_address_space *vm,
				    struct drm_i915_private *dev_priv)
{
	drm_mm_init(&vm->mm, vm->start, vm->total);
2122
	vm->dev = &dev_priv->drm;
2123 2124 2125 2126 2127
	INIT_LIST_HEAD(&vm->active_list);
	INIT_LIST_HEAD(&vm->inactive_list);
	list_add_tail(&vm->global_link, &dev_priv->vm_list);
}

2128 2129
static void gtt_write_workarounds(struct drm_device *dev)
{
2130
	struct drm_i915_private *dev_priv = to_i915(dev);
2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146

	/* This function is for gtt related workarounds. This function is
	 * called on driver load and after a GPU reset, so you can place
	 * workarounds here even if they get overwritten by GPU reset.
	 */
	/* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */
	if (IS_BROADWELL(dev))
		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
	else if (IS_CHERRYVIEW(dev))
		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
	else if (IS_SKYLAKE(dev))
		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
	else if (IS_BROXTON(dev))
		I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
}

2147
static int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
2148
{
2149
	struct drm_i915_private *dev_priv = to_i915(dev);
2150
	int ret = 0;
B
Ben Widawsky 已提交
2151

2152
	ret = __hw_ppgtt_init(dev, ppgtt);
2153
	if (ret == 0) {
B
Ben Widawsky 已提交
2154
		kref_init(&ppgtt->ref);
2155
		i915_address_space_init(&ppgtt->base, dev_priv);
2156
	}
2157 2158 2159 2160

	return ret;
}

2161 2162
int i915_ppgtt_init_hw(struct drm_device *dev)
{
2163 2164
	gtt_write_workarounds(dev);

2165 2166 2167 2168 2169 2170
	/* In the case of execlists, PPGTT is enabled by the context descriptor
	 * and the PDPs are contained within the context itself.  We don't
	 * need to do anything here. */
	if (i915.enable_execlists)
		return 0;

2171 2172 2173 2174 2175 2176 2177 2178 2179 2180
	if (!USES_PPGTT(dev))
		return 0;

	if (IS_GEN6(dev))
		gen6_ppgtt_enable(dev);
	else if (IS_GEN7(dev))
		gen7_ppgtt_enable(dev);
	else if (INTEL_INFO(dev)->gen >= 8)
		gen8_ppgtt_enable(dev);
	else
2181
		MISSING_CASE(INTEL_INFO(dev)->gen);
2182

2183 2184
	return 0;
}
2185

2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203
struct i915_hw_ppgtt *
i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
{
	struct i915_hw_ppgtt *ppgtt;
	int ret;

	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
	if (!ppgtt)
		return ERR_PTR(-ENOMEM);

	ret = i915_ppgtt_init(dev, ppgtt);
	if (ret) {
		kfree(ppgtt);
		return ERR_PTR(ret);
	}

	ppgtt->file_priv = fpriv;

2204 2205
	trace_i915_ppgtt_create(&ppgtt->base);

2206 2207 2208
	return ppgtt;
}

2209 2210 2211 2212 2213
void  i915_ppgtt_release(struct kref *kref)
{
	struct i915_hw_ppgtt *ppgtt =
		container_of(kref, struct i915_hw_ppgtt, ref);

2214 2215
	trace_i915_ppgtt_release(&ppgtt->base);

2216 2217 2218 2219
	/* vmas should already be unbound */
	WARN_ON(!list_empty(&ppgtt->base.active_list));
	WARN_ON(!list_empty(&ppgtt->base.inactive_list));

2220 2221 2222
	list_del(&ppgtt->base.global_link);
	drm_mm_takedown(&ppgtt->base.mm);

2223 2224 2225
	ppgtt->base.cleanup(&ppgtt->base);
	kfree(ppgtt);
}
2226

2227 2228 2229 2230
extern int intel_iommu_gfx_mapped;
/* Certain Gen5 chipsets require require idling the GPU before
 * unmapping anything from the GTT when VT-d is enabled.
 */
2231
static bool needs_idle_maps(struct drm_device *dev)
2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242
{
#ifdef CONFIG_INTEL_IOMMU
	/* Query intel_iommu to see if we need the workaround. Presumably that
	 * was loaded first.
	 */
	if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
		return true;
#endif
	return false;
}

B
Ben Widawsky 已提交
2243 2244
static bool do_idling(struct drm_i915_private *dev_priv)
{
2245
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
B
Ben Widawsky 已提交
2246 2247
	bool ret = dev_priv->mm.interruptible;

2248
	if (unlikely(ggtt->do_idle_maps)) {
B
Ben Widawsky 已提交
2249
		dev_priv->mm.interruptible = false;
2250 2251
		if (i915_gem_wait_for_idle(dev_priv)) {
			DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
B
Ben Widawsky 已提交
2252 2253 2254 2255 2256 2257 2258 2259 2260 2261
			/* Wait a bit, in hopes it avoids the hang */
			udelay(10);
		}
	}

	return ret;
}

static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
{
2262 2263 2264
	struct i915_ggtt *ggtt = &dev_priv->ggtt;

	if (unlikely(ggtt->do_idle_maps))
B
Ben Widawsky 已提交
2265 2266 2267
		dev_priv->mm.interruptible = interruptible;
}

2268
void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
2269
{
2270
	struct intel_engine_cs *engine;
2271

2272
	if (INTEL_INFO(dev_priv)->gen < 6)
2273 2274
		return;

2275
	for_each_engine(engine, dev_priv) {
2276
		u32 fault_reg;
2277
		fault_reg = I915_READ(RING_FAULT_REG(engine));
2278 2279
		if (fault_reg & RING_FAULT_VALID) {
			DRM_DEBUG_DRIVER("Unexpected fault\n"
2280
					 "\tAddr: 0x%08lx\n"
2281 2282 2283 2284 2285 2286 2287
					 "\tAddress space: %s\n"
					 "\tSource ID: %d\n"
					 "\tType: %d\n",
					 fault_reg & PAGE_MASK,
					 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
					 RING_FAULT_SRCID(fault_reg),
					 RING_FAULT_FAULT_TYPE(fault_reg));
2288
			I915_WRITE(RING_FAULT_REG(engine),
2289 2290 2291
				   fault_reg & ~RING_FAULT_VALID);
		}
	}
2292
	POSTING_READ(RING_FAULT_REG(&dev_priv->engine[RCS]));
2293 2294
}

2295 2296
static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
{
2297
	if (INTEL_INFO(dev_priv)->gen < 6) {
2298 2299 2300 2301 2302 2303 2304
		intel_gtt_chipset_flush();
	} else {
		I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
		POSTING_READ(GFX_FLSH_CNTL_GEN6);
	}
}

2305 2306
void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
{
2307 2308
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2309 2310 2311 2312 2313 2314 2315

	/* Don't bother messing with faults pre GEN6 as we have little
	 * documentation supporting that it's a good idea.
	 */
	if (INTEL_INFO(dev)->gen < 6)
		return;

2316
	i915_check_and_clear_faults(dev_priv);
2317

2318 2319
	ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
			     true);
2320 2321

	i915_ggtt_flush(dev_priv);
2322 2323
}

2324
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
2325
{
2326 2327 2328 2329 2330 2331
	if (!dma_map_sg(&obj->base.dev->pdev->dev,
			obj->pages->sgl, obj->pages->nents,
			PCI_DMA_BIDIRECTIONAL))
		return -ENOSPC;

	return 0;
2332 2333
}

2334
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
B
Ben Widawsky 已提交
2335 2336 2337 2338 2339 2340 2341 2342 2343
{
#ifdef writeq
	writeq(pte, addr);
#else
	iowrite32((u32)pte, addr);
	iowrite32(pte >> 32, addr + 4);
#endif
}

2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365
static void gen8_ggtt_insert_page(struct i915_address_space *vm,
				  dma_addr_t addr,
				  uint64_t offset,
				  enum i915_cache_level level,
				  u32 unused)
{
	struct drm_i915_private *dev_priv = to_i915(vm->dev);
	gen8_pte_t __iomem *pte =
		(gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
		(offset >> PAGE_SHIFT);
	int rpm_atomic_seq;

	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);

	gen8_set_pte(pte, gen8_pte_encode(addr, level, true));

	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
	POSTING_READ(GFX_FLSH_CNTL_GEN6);

	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}

B
Ben Widawsky 已提交
2366 2367
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
				     struct sg_table *st,
2368
				     uint64_t start,
2369
				     enum i915_cache_level level, u32 unused)
B
Ben Widawsky 已提交
2370
{
2371
	struct drm_i915_private *dev_priv = to_i915(vm->dev);
2372
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2373 2374 2375 2376
	struct sgt_iter sgt_iter;
	gen8_pte_t __iomem *gtt_entries;
	gen8_pte_t gtt_entry;
	dma_addr_t addr;
2377
	int rpm_atomic_seq;
2378
	int i = 0;
2379 2380

	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
B
Ben Widawsky 已提交
2381

2382 2383 2384 2385 2386
	gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);

	for_each_sgt_dma(addr, sgt_iter, st) {
		gtt_entry = gen8_pte_encode(addr, level, true);
		gen8_set_pte(&gtt_entries[i++], gtt_entry);
B
Ben Widawsky 已提交
2387 2388 2389 2390 2391 2392 2393 2394 2395 2396
	}

	/*
	 * XXX: This serves as a posting read to make sure that the PTE has
	 * actually been updated. There is some concern that even though
	 * registers and PTEs are within the same BAR that they are potentially
	 * of NUMA access patterns. Therefore, even with the way we assume
	 * hardware should work, we must keep this posting read for paranoia.
	 */
	if (i != 0)
2397
		WARN_ON(readq(&gtt_entries[i-1]) != gtt_entry);
B
Ben Widawsky 已提交
2398 2399 2400 2401 2402 2403 2404

	/* This next bit makes the above posting read even more important. We
	 * want to flush the TLBs only after we're certain all the PTE updates
	 * have finished.
	 */
	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
	POSTING_READ(GFX_FLSH_CNTL_GEN6);
2405 2406

	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
B
Ben Widawsky 已提交
2407 2408
}

2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434
struct insert_entries {
	struct i915_address_space *vm;
	struct sg_table *st;
	uint64_t start;
	enum i915_cache_level level;
	u32 flags;
};

static int gen8_ggtt_insert_entries__cb(void *_arg)
{
	struct insert_entries *arg = _arg;
	gen8_ggtt_insert_entries(arg->vm, arg->st,
				 arg->start, arg->level, arg->flags);
	return 0;
}

static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm,
					  struct sg_table *st,
					  uint64_t start,
					  enum i915_cache_level level,
					  u32 flags)
{
	struct insert_entries arg = { vm, st, start, level, flags };
	stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL);
}

2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456
static void gen6_ggtt_insert_page(struct i915_address_space *vm,
				  dma_addr_t addr,
				  uint64_t offset,
				  enum i915_cache_level level,
				  u32 flags)
{
	struct drm_i915_private *dev_priv = to_i915(vm->dev);
	gen6_pte_t __iomem *pte =
		(gen6_pte_t __iomem *)dev_priv->ggtt.gsm +
		(offset >> PAGE_SHIFT);
	int rpm_atomic_seq;

	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);

	iowrite32(vm->pte_encode(addr, level, true, flags), pte);

	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
	POSTING_READ(GFX_FLSH_CNTL_GEN6);

	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}

2457 2458 2459 2460 2461 2462
/*
 * Binds an object into the global gtt with the specified cache level. The object
 * will be accessible to the GPU via commands whose operands reference offsets
 * within the global GTT as well as accessible by the GPU through the GMADR
 * mapped BAR (dev_priv->mm.gtt->gtt).
 */
2463
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
2464
				     struct sg_table *st,
2465
				     uint64_t start,
2466
				     enum i915_cache_level level, u32 flags)
2467
{
2468
	struct drm_i915_private *dev_priv = to_i915(vm->dev);
2469
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2470 2471 2472 2473
	struct sgt_iter sgt_iter;
	gen6_pte_t __iomem *gtt_entries;
	gen6_pte_t gtt_entry;
	dma_addr_t addr;
2474
	int rpm_atomic_seq;
2475
	int i = 0;
2476 2477

	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2478

2479 2480 2481 2482 2483
	gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);

	for_each_sgt_dma(addr, sgt_iter, st) {
		gtt_entry = vm->pte_encode(addr, level, true, flags);
		iowrite32(gtt_entry, &gtt_entries[i++]);
2484 2485 2486 2487 2488 2489 2490 2491
	}

	/* XXX: This serves as a posting read to make sure that the PTE has
	 * actually been updated. There is some concern that even though
	 * registers and PTEs are within the same BAR that they are potentially
	 * of NUMA access patterns. Therefore, even with the way we assume
	 * hardware should work, we must keep this posting read for paranoia.
	 */
2492 2493
	if (i != 0)
		WARN_ON(readl(&gtt_entries[i-1]) != gtt_entry);
2494 2495 2496 2497 2498 2499 2500

	/* This next bit makes the above posting read even more important. We
	 * want to flush the TLBs only after we're certain all the PTE updates
	 * have finished.
	 */
	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
	POSTING_READ(GFX_FLSH_CNTL_GEN6);
2501 2502

	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2503 2504
}

2505 2506 2507 2508 2509 2510 2511
static void nop_clear_range(struct i915_address_space *vm,
			    uint64_t start,
			    uint64_t length,
			    bool use_scratch)
{
}

B
Ben Widawsky 已提交
2512
static void gen8_ggtt_clear_range(struct i915_address_space *vm,
2513 2514
				  uint64_t start,
				  uint64_t length,
B
Ben Widawsky 已提交
2515 2516
				  bool use_scratch)
{
2517
	struct drm_i915_private *dev_priv = to_i915(vm->dev);
2518
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2519 2520
	unsigned first_entry = start >> PAGE_SHIFT;
	unsigned num_entries = length >> PAGE_SHIFT;
2521
	gen8_pte_t scratch_pte, __iomem *gtt_base =
2522 2523
		(gen8_pte_t __iomem *)ggtt->gsm + first_entry;
	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
B
Ben Widawsky 已提交
2524
	int i;
2525 2526 2527
	int rpm_atomic_seq;

	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
B
Ben Widawsky 已提交
2528 2529 2530 2531 2532 2533

	if (WARN(num_entries > max_entries,
		 "First entry = %d; Num entries = %d (max=%d)\n",
		 first_entry, num_entries, max_entries))
		num_entries = max_entries;

2534
	scratch_pte = gen8_pte_encode(px_dma(vm->scratch_page),
B
Ben Widawsky 已提交
2535 2536 2537 2538 2539
				      I915_CACHE_LLC,
				      use_scratch);
	for (i = 0; i < num_entries; i++)
		gen8_set_pte(&gtt_base[i], scratch_pte);
	readl(gtt_base);
2540 2541

	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
B
Ben Widawsky 已提交
2542 2543
}

2544
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
2545 2546
				  uint64_t start,
				  uint64_t length,
2547
				  bool use_scratch)
2548
{
2549
	struct drm_i915_private *dev_priv = to_i915(vm->dev);
2550
	struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2551 2552
	unsigned first_entry = start >> PAGE_SHIFT;
	unsigned num_entries = length >> PAGE_SHIFT;
2553
	gen6_pte_t scratch_pte, __iomem *gtt_base =
2554 2555
		(gen6_pte_t __iomem *)ggtt->gsm + first_entry;
	const int max_entries = ggtt_total_entries(ggtt) - first_entry;
2556
	int i;
2557 2558 2559
	int rpm_atomic_seq;

	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2560 2561 2562 2563 2564 2565

	if (WARN(num_entries > max_entries,
		 "First entry = %d; Num entries = %d (max=%d)\n",
		 first_entry, num_entries, max_entries))
		num_entries = max_entries;

2566 2567
	scratch_pte = vm->pte_encode(px_dma(vm->scratch_page),
				     I915_CACHE_LLC, use_scratch, 0);
2568

2569 2570 2571
	for (i = 0; i < num_entries; i++)
		iowrite32(scratch_pte, &gtt_base[i]);
	readl(gtt_base);
2572 2573

	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2574 2575
}

2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593
static void i915_ggtt_insert_page(struct i915_address_space *vm,
				  dma_addr_t addr,
				  uint64_t offset,
				  enum i915_cache_level cache_level,
				  u32 unused)
{
	struct drm_i915_private *dev_priv = to_i915(vm->dev);
	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
	int rpm_atomic_seq;

	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);

	intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);

	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
}

2594 2595 2596 2597
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
				     struct sg_table *pages,
				     uint64_t start,
				     enum i915_cache_level cache_level, u32 unused)
2598
{
2599
	struct drm_i915_private *dev_priv = to_i915(vm->dev);
2600 2601
	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2602 2603 2604
	int rpm_atomic_seq;

	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2605

2606
	intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
2607

2608 2609
	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);

2610 2611
}

2612
static void i915_ggtt_clear_range(struct i915_address_space *vm,
2613 2614
				  uint64_t start,
				  uint64_t length,
2615
				  bool unused)
2616
{
2617
	struct drm_i915_private *dev_priv = to_i915(vm->dev);
2618 2619
	unsigned first_entry = start >> PAGE_SHIFT;
	unsigned num_entries = length >> PAGE_SHIFT;
2620 2621 2622 2623
	int rpm_atomic_seq;

	rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);

2624
	intel_gtt_clear_range(first_entry, num_entries);
2625 2626

	assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2627 2628
}

2629 2630 2631
static int ggtt_bind_vma(struct i915_vma *vma,
			 enum i915_cache_level cache_level,
			 u32 flags)
2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661
{
	struct drm_i915_gem_object *obj = vma->obj;
	u32 pte_flags = 0;
	int ret;

	ret = i915_get_ggtt_vma_pages(vma);
	if (ret)
		return ret;

	/* Currently applicable only to VLV */
	if (obj->gt_ro)
		pte_flags |= PTE_READ_ONLY;

	vma->vm->insert_entries(vma->vm, vma->ggtt_view.pages,
				vma->node.start,
				cache_level, pte_flags);

	/*
	 * Without aliasing PPGTT there's no difference between
	 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
	 * upgrade to both bound if we bind either to avoid double-binding.
	 */
	vma->bound |= GLOBAL_BIND | LOCAL_BIND;

	return 0;
}

static int aliasing_gtt_bind_vma(struct i915_vma *vma,
				 enum i915_cache_level cache_level,
				 u32 flags)
2662
{
2663
	u32 pte_flags;
2664 2665 2666 2667 2668
	int ret;

	ret = i915_get_ggtt_vma_pages(vma);
	if (ret)
		return ret;
2669

2670
	/* Currently applicable only to VLV */
2671 2672
	pte_flags = 0;
	if (vma->obj->gt_ro)
2673
		pte_flags |= PTE_READ_ONLY;
2674

2675

2676
	if (flags & GLOBAL_BIND) {
2677 2678
		vma->vm->insert_entries(vma->vm,
					vma->ggtt_view.pages,
2679 2680
					vma->node.start,
					cache_level, pte_flags);
2681
	}
2682

2683
	if (flags & LOCAL_BIND) {
2684 2685 2686 2687
		struct i915_hw_ppgtt *appgtt =
			to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
		appgtt->base.insert_entries(&appgtt->base,
					    vma->ggtt_view.pages,
2688
					    vma->node.start,
2689
					    cache_level, pte_flags);
2690
	}
2691 2692

	return 0;
2693 2694
}

2695
static void ggtt_unbind_vma(struct i915_vma *vma)
2696
{
2697
	struct drm_device *dev = vma->vm->dev;
2698
	struct drm_i915_private *dev_priv = to_i915(dev);
2699
	struct drm_i915_gem_object *obj = vma->obj;
2700 2701 2702
	const uint64_t size = min_t(uint64_t,
				    obj->base.size,
				    vma->node.size);
2703

2704
	if (vma->bound & GLOBAL_BIND) {
2705 2706
		vma->vm->clear_range(vma->vm,
				     vma->node.start,
2707
				     size,
2708 2709
				     true);
	}
2710

2711
	if (dev_priv->mm.aliasing_ppgtt && vma->bound & LOCAL_BIND) {
2712
		struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
2713

2714
		appgtt->base.clear_range(&appgtt->base,
2715
					 vma->node.start,
2716
					 size,
2717 2718
					 true);
	}
2719 2720 2721
}

void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
2722
{
B
Ben Widawsky 已提交
2723
	struct drm_device *dev = obj->base.dev;
2724
	struct drm_i915_private *dev_priv = to_i915(dev);
B
Ben Widawsky 已提交
2725 2726 2727 2728
	bool interruptible;

	interruptible = do_idling(dev_priv);

2729 2730
	dma_unmap_sg(&dev->pdev->dev, obj->pages->sgl, obj->pages->nents,
		     PCI_DMA_BIDIRECTIONAL);
B
Ben Widawsky 已提交
2731 2732

	undo_idling(dev_priv, interruptible);
2733
}
2734

2735 2736
static void i915_gtt_color_adjust(struct drm_mm_node *node,
				  unsigned long color,
2737 2738
				  u64 *start,
				  u64 *end)
2739 2740 2741 2742
{
	if (node->color != color)
		*start += 4096;

2743 2744 2745 2746 2747
	node = list_first_entry_or_null(&node->node_list,
					struct drm_mm_node,
					node_list);
	if (node && node->allocated && node->color != color)
		*end -= 4096;
2748
}
B
Ben Widawsky 已提交
2749

D
Daniel Vetter 已提交
2750
static int i915_gem_setup_global_gtt(struct drm_device *dev,
2751 2752 2753
				     u64 start,
				     u64 mappable_end,
				     u64 end)
2754
{
2755 2756 2757 2758 2759 2760 2761 2762 2763
	/* Let GEM Manage all of the aperture.
	 *
	 * However, leave one page at the end still bound to the scratch page.
	 * There are a number of places where the hardware apparently prefetches
	 * past the end of the object, and we've seen multiple hangs with the
	 * GPU head pointer stuck in a batchbuffer bound at the last page of the
	 * aperture.  One page should be enough to keep any prefetching inside
	 * of the aperture.
	 */
2764 2765
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2766 2767 2768
	struct drm_mm_node *entry;
	struct drm_i915_gem_object *obj;
	unsigned long hole_start, hole_end;
2769
	int ret;
2770

2771 2772
	BUG_ON(mappable_end > end);

2773
	ggtt->base.start = start;
2774

2775 2776
	/* Subtract the guard page before address space initialization to
	 * shrink the range used by drm_mm */
2777 2778 2779
	ggtt->base.total = end - start - PAGE_SIZE;
	i915_address_space_init(&ggtt->base, dev_priv);
	ggtt->base.total += PAGE_SIZE;
2780

2781 2782 2783
	ret = intel_vgt_balloon(dev_priv);
	if (ret)
		return ret;
2784

2785
	if (!HAS_LLC(dev))
2786
		ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
2787

2788
	/* Mark any preallocated objects as occupied */
2789
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
2790
		struct i915_vma *vma = i915_gem_obj_to_vma(obj, &ggtt->base);
2791

2792
		DRM_DEBUG_KMS("reserving preallocated space: %llx + %zx\n",
2793 2794 2795
			      i915_gem_obj_ggtt_offset(obj), obj->base.size);

		WARN_ON(i915_gem_obj_ggtt_bound(obj));
2796
		ret = drm_mm_reserve_node(&ggtt->base.mm, &vma->node);
2797 2798 2799 2800
		if (ret) {
			DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
			return ret;
		}
2801
		vma->bound |= GLOBAL_BIND;
2802
		__i915_vma_set_map_and_fenceable(vma);
2803
		list_add_tail(&vma->vm_link, &ggtt->base.inactive_list);
2804 2805 2806
	}

	/* Clear any non-preallocated blocks */
2807
	drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
2808 2809
		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
			      hole_start, hole_end);
2810
		ggtt->base.clear_range(&ggtt->base, hole_start,
2811
				     hole_end - hole_start, true);
2812 2813 2814
	}

	/* And finally clear the reserved guard page */
2815
	ggtt->base.clear_range(&ggtt->base, end - PAGE_SIZE, PAGE_SIZE, true);
2816

2817 2818 2819 2820 2821 2822 2823
	if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
		struct i915_hw_ppgtt *ppgtt;

		ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
		if (!ppgtt)
			return -ENOMEM;

2824 2825 2826 2827 2828 2829 2830 2831 2832 2833
		ret = __hw_ppgtt_init(dev, ppgtt);
		if (ret) {
			ppgtt->base.cleanup(&ppgtt->base);
			kfree(ppgtt);
			return ret;
		}

		if (ppgtt->base.allocate_va_range)
			ret = ppgtt->base.allocate_va_range(&ppgtt->base, 0,
							    ppgtt->base.total);
2834
		if (ret) {
2835
			ppgtt->base.cleanup(&ppgtt->base);
2836
			kfree(ppgtt);
2837
			return ret;
2838
		}
2839

2840 2841 2842 2843 2844
		ppgtt->base.clear_range(&ppgtt->base,
					ppgtt->base.start,
					ppgtt->base.total,
					true);

2845
		dev_priv->mm.aliasing_ppgtt = ppgtt;
2846 2847
		WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
		ggtt->base.bind_vma = aliasing_gtt_bind_vma;
2848 2849
	}

2850
	return 0;
2851 2852
}

2853 2854 2855 2856 2857
/**
 * i915_gem_init_ggtt - Initialize GEM for Global GTT
 * @dev: DRM device
 */
void i915_gem_init_ggtt(struct drm_device *dev)
2858
{
2859 2860
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2861

2862
	i915_gem_setup_global_gtt(dev, 0, ggtt->mappable_end, ggtt->base.total);
2863 2864
}

2865 2866 2867 2868 2869
/**
 * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
 * @dev: DRM device
 */
void i915_ggtt_cleanup_hw(struct drm_device *dev)
2870
{
2871 2872
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2873

2874 2875 2876 2877 2878 2879
	if (dev_priv->mm.aliasing_ppgtt) {
		struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;

		ppgtt->base.cleanup(&ppgtt->base);
	}

2880 2881
	i915_gem_cleanup_stolen(dev);

2882
	if (drm_mm_initialized(&ggtt->base.mm)) {
2883
		intel_vgt_deballoon(dev_priv);
2884

2885 2886
		drm_mm_takedown(&ggtt->base.mm);
		list_del(&ggtt->base.global_link);
2887 2888
	}

2889
	ggtt->base.cleanup(&ggtt->base);
2890
}
2891

2892
static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
2893 2894 2895 2896 2897 2898
{
	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
	snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
	return snb_gmch_ctl << 20;
}

2899
static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
2900 2901 2902 2903 2904
{
	bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
	bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
	if (bdw_gmch_ctl)
		bdw_gmch_ctl = 1 << bdw_gmch_ctl;
2905 2906 2907 2908 2909 2910 2911

#ifdef CONFIG_X86_32
	/* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
	if (bdw_gmch_ctl > 4)
		bdw_gmch_ctl = 4;
#endif

2912 2913 2914
	return bdw_gmch_ctl << 20;
}

2915
static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
2916 2917 2918 2919 2920 2921 2922 2923 2924 2925
{
	gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
	gmch_ctrl &= SNB_GMCH_GGMS_MASK;

	if (gmch_ctrl)
		return 1 << (20 + gmch_ctrl);

	return 0;
}

2926
static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
2927 2928 2929 2930 2931 2932
{
	snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
	snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
	return snb_gmch_ctl << 25; /* 32 MB units */
}

2933
static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
2934 2935 2936 2937 2938 2939
{
	bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
	bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
	return bdw_gmch_ctl << 25; /* 32 MB units */
}

2940 2941 2942 2943 2944 2945 2946 2947 2948 2949 2950 2951 2952 2953 2954 2955 2956 2957
static size_t chv_get_stolen_size(u16 gmch_ctrl)
{
	gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
	gmch_ctrl &= SNB_GMCH_GMS_MASK;

	/*
	 * 0x0  to 0x10: 32MB increments starting at 0MB
	 * 0x11 to 0x16: 4MB increments starting at 8MB
	 * 0x17 to 0x1d: 4MB increments start at 36MB
	 */
	if (gmch_ctrl < 0x11)
		return gmch_ctrl << 25;
	else if (gmch_ctrl < 0x17)
		return (gmch_ctrl - 0x11 + 2) << 22;
	else
		return (gmch_ctrl - 0x17 + 9) << 22;
}

2958 2959 2960 2961 2962 2963 2964 2965 2966 2967 2968 2969
static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
{
	gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
	gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;

	if (gen9_gmch_ctl < 0xf0)
		return gen9_gmch_ctl << 25; /* 32 MB units */
	else
		/* 4MB increments starting at 0xf0 for 4MB */
		return (gen9_gmch_ctl - 0xf0 + 1) << 22;
}

B
Ben Widawsky 已提交
2970 2971 2972
static int ggtt_probe_common(struct drm_device *dev,
			     size_t gtt_size)
{
2973 2974
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
2975
	struct i915_page_scratch *scratch_page;
2976
	phys_addr_t ggtt_phys_addr;
B
Ben Widawsky 已提交
2977 2978

	/* For Modern GENs the PTEs and register space are split in the BAR */
2979 2980
	ggtt_phys_addr = pci_resource_start(dev->pdev, 0) +
			 (pci_resource_len(dev->pdev, 0) / 2);
B
Ben Widawsky 已提交
2981

I
Imre Deak 已提交
2982 2983 2984 2985 2986 2987 2988 2989
	/*
	 * On BXT writes larger than 64 bit to the GTT pagetable range will be
	 * dropped. For WC mappings in general we have 64 byte burst writes
	 * when the WC buffer is flushed, so we can't use it, but have to
	 * resort to an uncached mapping. The WC issue is easily caught by the
	 * readback check when writing GTT PTE entries.
	 */
	if (IS_BROXTON(dev))
2990
		ggtt->gsm = ioremap_nocache(ggtt_phys_addr, gtt_size);
I
Imre Deak 已提交
2991
	else
2992 2993
		ggtt->gsm = ioremap_wc(ggtt_phys_addr, gtt_size);
	if (!ggtt->gsm) {
B
Ben Widawsky 已提交
2994 2995 2996 2997
		DRM_ERROR("Failed to map the gtt page table\n");
		return -ENOMEM;
	}

2998 2999
	scratch_page = alloc_scratch_page(dev);
	if (IS_ERR(scratch_page)) {
B
Ben Widawsky 已提交
3000 3001
		DRM_ERROR("Scratch setup failed\n");
		/* iounmap will also get called at remove, but meh */
3002
		iounmap(ggtt->gsm);
3003
		return PTR_ERR(scratch_page);
B
Ben Widawsky 已提交
3004 3005
	}

3006
	ggtt->base.scratch_page = scratch_page;
3007 3008

	return 0;
B
Ben Widawsky 已提交
3009 3010
}

B
Ben Widawsky 已提交
3011 3012 3013
/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
 * bits. When using advanced contexts each context stores its own PAT, but
 * writing this data shouldn't be harmful even in those cases. */
3014
static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
B
Ben Widawsky 已提交
3015 3016 3017 3018 3019 3020 3021 3022 3023 3024 3025 3026
{
	uint64_t pat;

	pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC)     | /* for normal objects, no eLLC */
	      GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
	      GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
	      GEN8_PPAT(3, GEN8_PPAT_UC)                     | /* Uncached objects, mostly for scanout */
	      GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
	      GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
	      GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
	      GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));

3027
	if (!USES_PPGTT(dev_priv))
3028 3029 3030 3031 3032 3033 3034 3035 3036 3037 3038 3039 3040 3041 3042
		/* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
		 * so RTL will always use the value corresponding to
		 * pat_sel = 000".
		 * So let's disable cache for GGTT to avoid screen corruptions.
		 * MOCS still can be used though.
		 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
		 * before this patch, i.e. the same uncached + snooping access
		 * like on gen6/7 seems to be in effect.
		 * - So this just fixes blitter/render access. Again it looks
		 * like it's not just uncached access, but uncached + snooping.
		 * So we can still hold onto all our assumptions wrt cpu
		 * clflushing on LLC machines.
		 */
		pat = GEN8_PPAT(0, GEN8_PPAT_UC);

B
Ben Widawsky 已提交
3043 3044
	/* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
	 * write would work. */
3045 3046
	I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
	I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
B
Ben Widawsky 已提交
3047 3048
}

3049 3050 3051 3052 3053 3054 3055 3056 3057 3058
static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
{
	uint64_t pat;

	/*
	 * Map WB on BDW to snooped on CHV.
	 *
	 * Only the snoop bit has meaning for CHV, the rest is
	 * ignored.
	 *
3059 3060 3061 3062 3063 3064 3065 3066 3067 3068 3069
	 * The hardware will never snoop for certain types of accesses:
	 * - CPU GTT (GMADR->GGTT->no snoop->memory)
	 * - PPGTT page tables
	 * - some other special cycles
	 *
	 * As with BDW, we also need to consider the following for GT accesses:
	 * "For GGTT, there is NO pat_sel[2:0] from the entry,
	 * so RTL will always use the value corresponding to
	 * pat_sel = 000".
	 * Which means we must set the snoop bit in PAT entry 0
	 * in order to keep the global status page working.
3070 3071 3072 3073 3074 3075 3076 3077 3078 3079
	 */
	pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
	      GEN8_PPAT(1, 0) |
	      GEN8_PPAT(2, 0) |
	      GEN8_PPAT(3, 0) |
	      GEN8_PPAT(4, CHV_PPAT_SNOOP) |
	      GEN8_PPAT(5, CHV_PPAT_SNOOP) |
	      GEN8_PPAT(6, CHV_PPAT_SNOOP) |
	      GEN8_PPAT(7, CHV_PPAT_SNOOP);

3080 3081
	I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
	I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
3082 3083
}

3084
static int gen8_gmch_probe(struct i915_ggtt *ggtt)
B
Ben Widawsky 已提交
3085
{
3086
	struct drm_device *dev = ggtt->base.dev;
3087
	struct drm_i915_private *dev_priv = to_i915(dev);
B
Ben Widawsky 已提交
3088 3089 3090 3091
	u16 snb_gmch_ctl;
	int ret;

	/* TODO: We're not aware of mappable constraints on gen8 yet */
3092 3093
	ggtt->mappable_base = pci_resource_start(dev->pdev, 2);
	ggtt->mappable_end = pci_resource_len(dev->pdev, 2);
B
Ben Widawsky 已提交
3094 3095 3096 3097 3098 3099

	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));

	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);

3100
	if (INTEL_INFO(dev)->gen >= 9) {
3101 3102
		ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
		ggtt->size = gen8_get_total_gtt_size(snb_gmch_ctl);
3103
	} else if (IS_CHERRYVIEW(dev)) {
3104 3105
		ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
		ggtt->size = chv_get_total_gtt_size(snb_gmch_ctl);
3106
	} else {
3107 3108
		ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
		ggtt->size = gen8_get_total_gtt_size(snb_gmch_ctl);
3109
	}
B
Ben Widawsky 已提交
3110

3111
	ggtt->base.total = (ggtt->size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
B
Ben Widawsky 已提交
3112

S
Sumit Singh 已提交
3113
	if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
3114 3115 3116
		chv_setup_private_ppat(dev_priv);
	else
		bdw_setup_private_ppat(dev_priv);
B
Ben Widawsky 已提交
3117

3118
	ret = ggtt_probe_common(dev, ggtt->size);
B
Ben Widawsky 已提交
3119

3120 3121
	ggtt->base.bind_vma = ggtt_bind_vma;
	ggtt->base.unbind_vma = ggtt_unbind_vma;
3122
	ggtt->base.insert_page = gen8_ggtt_insert_page;
3123
	ggtt->base.clear_range = nop_clear_range;
3124
	if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
3125 3126 3127 3128 3129 3130
		ggtt->base.clear_range = gen8_ggtt_clear_range;

	ggtt->base.insert_entries = gen8_ggtt_insert_entries;
	if (IS_CHERRYVIEW(dev_priv))
		ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;

B
Ben Widawsky 已提交
3131 3132 3133
	return ret;
}

3134
static int gen6_gmch_probe(struct i915_ggtt *ggtt)
3135
{
3136
	struct drm_device *dev = ggtt->base.dev;
3137 3138 3139
	u16 snb_gmch_ctl;
	int ret;

3140 3141
	ggtt->mappable_base = pci_resource_start(dev->pdev, 2);
	ggtt->mappable_end = pci_resource_len(dev->pdev, 2);
3142

3143 3144
	/* 64/512MB is the current min/max we actually know of, but this is just
	 * a coarse sanity check.
3145
	 */
3146 3147
	if ((ggtt->mappable_end < (64<<20) || (ggtt->mappable_end > (512<<20)))) {
		DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
3148
		return -ENXIO;
3149 3150 3151 3152 3153 3154
	}

	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);

3155 3156 3157
	ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
	ggtt->size = gen6_get_total_gtt_size(snb_gmch_ctl);
	ggtt->base.total = (ggtt->size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
3158

3159
	ret = ggtt_probe_common(dev, ggtt->size);
3160

3161
	ggtt->base.clear_range = gen6_ggtt_clear_range;
3162
	ggtt->base.insert_page = gen6_ggtt_insert_page;
3163 3164 3165
	ggtt->base.insert_entries = gen6_ggtt_insert_entries;
	ggtt->base.bind_vma = ggtt_bind_vma;
	ggtt->base.unbind_vma = ggtt_unbind_vma;
3166

3167 3168 3169
	return ret;
}

3170
static void gen6_gmch_remove(struct i915_address_space *vm)
3171
{
3172
	struct i915_ggtt *ggtt = container_of(vm, struct i915_ggtt, base);
3173

3174
	iounmap(ggtt->gsm);
3175
	free_scratch_page(vm->dev, vm->scratch_page);
3176
}
3177

3178
static int i915_gmch_probe(struct i915_ggtt *ggtt)
3179
{
3180
	struct drm_device *dev = ggtt->base.dev;
3181
	struct drm_i915_private *dev_priv = to_i915(dev);
3182 3183
	int ret;

3184
	ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
3185 3186 3187 3188 3189
	if (!ret) {
		DRM_ERROR("failed to set up gmch\n");
		return -EIO;
	}

3190 3191
	intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size,
		      &ggtt->mappable_base, &ggtt->mappable_end);
3192

3193
	ggtt->do_idle_maps = needs_idle_maps(&dev_priv->drm);
3194
	ggtt->base.insert_page = i915_ggtt_insert_page;
3195 3196 3197 3198
	ggtt->base.insert_entries = i915_ggtt_insert_entries;
	ggtt->base.clear_range = i915_ggtt_clear_range;
	ggtt->base.bind_vma = ggtt_bind_vma;
	ggtt->base.unbind_vma = ggtt_unbind_vma;
3199

3200
	if (unlikely(ggtt->do_idle_maps))
3201 3202
		DRM_INFO("applying Ironlake quirks for intel_iommu\n");

3203 3204 3205
	return 0;
}

3206
static void i915_gmch_remove(struct i915_address_space *vm)
3207 3208 3209 3210
{
	intel_gmch_remove();
}

3211 3212 3213 3214 3215
/**
 * i915_ggtt_init_hw - Initialize GGTT hardware
 * @dev: DRM device
 */
int i915_ggtt_init_hw(struct drm_device *dev)
3216
{
3217
	struct drm_i915_private *dev_priv = to_i915(dev);
3218
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
3219 3220 3221
	int ret;

	if (INTEL_INFO(dev)->gen <= 5) {
3222 3223
		ggtt->probe = i915_gmch_probe;
		ggtt->base.cleanup = i915_gmch_remove;
B
Ben Widawsky 已提交
3224
	} else if (INTEL_INFO(dev)->gen < 8) {
3225 3226
		ggtt->probe = gen6_gmch_probe;
		ggtt->base.cleanup = gen6_gmch_remove;
3227 3228

		if (HAS_EDRAM(dev))
3229
			ggtt->base.pte_encode = iris_pte_encode;
3230
		else if (IS_HASWELL(dev))
3231
			ggtt->base.pte_encode = hsw_pte_encode;
3232
		else if (IS_VALLEYVIEW(dev))
3233
			ggtt->base.pte_encode = byt_pte_encode;
3234
		else if (INTEL_INFO(dev)->gen >= 7)
3235
			ggtt->base.pte_encode = ivb_pte_encode;
3236
		else
3237
			ggtt->base.pte_encode = snb_pte_encode;
B
Ben Widawsky 已提交
3238
	} else {
3239 3240
		ggtt->probe = gen8_gmch_probe;
		ggtt->base.cleanup = gen6_gmch_remove;
3241 3242
	}

3243 3244
	ggtt->base.dev = dev;
	ggtt->base.is_ggtt = true;
3245

3246
	ret = ggtt->probe(ggtt);
3247
	if (ret)
3248 3249
		return ret;

3250 3251 3252 3253 3254 3255 3256 3257
	if ((ggtt->base.total - 1) >> 32) {
		DRM_ERROR("We never expected a Global GTT with more than 32bits"
			  "of address space! Found %lldM!\n",
			  ggtt->base.total >> 20);
		ggtt->base.total = 1ULL << 32;
		ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
	}

3258 3259 3260 3261 3262 3263 3264 3265
	/*
	 * Initialise stolen early so that we may reserve preallocated
	 * objects for the BIOS to KMS transition.
	 */
	ret = i915_gem_init_stolen(dev);
	if (ret)
		goto out_gtt_cleanup;

3266
	/* GMADR is the PCI mmio aperture into the global GTT. */
3267
	DRM_INFO("Memory usable by graphics device = %lluM\n",
3268 3269 3270
		 ggtt->base.total >> 20);
	DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
	DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", ggtt->stolen_size >> 20);
3271 3272 3273 3274
#ifdef CONFIG_INTEL_IOMMU
	if (intel_iommu_gfx_mapped)
		DRM_INFO("VT-d active for gfx access\n");
#endif
3275 3276

	return 0;
3277 3278

out_gtt_cleanup:
3279
	ggtt->base.cleanup(&ggtt->base);
3280 3281

	return ret;
3282
}
3283

3284 3285 3286 3287 3288 3289 3290 3291
int i915_ggtt_enable_hw(struct drm_device *dev)
{
	if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
		return -EIO;

	return 0;
}

3292 3293
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
3294 3295
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
3296
	struct drm_i915_gem_object *obj;
3297
	struct i915_vma *vma;
3298

3299
	i915_check_and_clear_faults(dev_priv);
3300 3301

	/* First fill our portion of the GTT with scratch pages */
3302 3303
	ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
			       true);
3304

3305
	/* Cache flush objects bound into GGTT and rebind them. */
3306
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
3307
		list_for_each_entry(vma, &obj->vma_list, obj_link) {
3308
			if (vma->vm != &ggtt->base)
3309
				continue;
3310

3311 3312 3313 3314
			WARN_ON(i915_vma_bind(vma, obj->cache_level,
					      PIN_UPDATE));
		}

3315 3316
		if (obj->pin_display)
			WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
3317
	}
3318 3319 3320 3321 3322 3323 3324 3325 3326 3327 3328

	if (INTEL_INFO(dev)->gen >= 8) {
		if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
			chv_setup_private_ppat(dev_priv);
		else
			bdw_setup_private_ppat(dev_priv);

		return;
	}

	if (USES_PPGTT(dev)) {
3329 3330
		struct i915_address_space *vm;

3331 3332 3333
		list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
			/* TODO: Perhaps it shouldn't be gen6 specific */

3334
			struct i915_hw_ppgtt *ppgtt;
3335

3336
			if (vm->is_ggtt)
3337
				ppgtt = dev_priv->mm.aliasing_ppgtt;
3338 3339
			else
				ppgtt = i915_vm_to_ppgtt(vm);
3340 3341 3342 3343 3344 3345 3346 3347 3348

			gen6_write_page_range(dev_priv, &ppgtt->pd,
					      0, ppgtt->base.total);
		}
	}

	i915_ggtt_flush(dev_priv);
}

3349 3350 3351 3352
static struct i915_vma *
__i915_gem_vma_create(struct drm_i915_gem_object *obj,
		      struct i915_address_space *vm,
		      const struct i915_ggtt_view *ggtt_view)
3353
{
3354
	struct i915_vma *vma;
3355

3356 3357
	if (WARN_ON(i915_is_ggtt(vm) != !!ggtt_view))
		return ERR_PTR(-EINVAL);
3358 3359

	vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
3360 3361
	if (vma == NULL)
		return ERR_PTR(-ENOMEM);
3362

3363 3364
	INIT_LIST_HEAD(&vma->vm_link);
	INIT_LIST_HEAD(&vma->obj_link);
3365 3366 3367
	INIT_LIST_HEAD(&vma->exec_list);
	vma->vm = vm;
	vma->obj = obj;
3368
	vma->is_ggtt = i915_is_ggtt(vm);
3369

3370
	if (i915_is_ggtt(vm))
3371
		vma->ggtt_view = *ggtt_view;
3372 3373
	else
		i915_ppgtt_get(i915_vm_to_ppgtt(vm));
3374

3375
	list_add_tail(&vma->obj_link, &obj->vma_list);
3376 3377 3378 3379 3380

	return vma;
}

struct i915_vma *
3381 3382 3383 3384 3385 3386 3387 3388 3389 3390 3391 3392 3393 3394 3395
i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
				  struct i915_address_space *vm)
{
	struct i915_vma *vma;

	vma = i915_gem_obj_to_vma(obj, vm);
	if (!vma)
		vma = __i915_gem_vma_create(obj, vm,
					    i915_is_ggtt(vm) ? &i915_ggtt_view_normal : NULL);

	return vma;
}

struct i915_vma *
i915_gem_obj_lookup_or_create_ggtt_vma(struct drm_i915_gem_object *obj,
3396
				       const struct i915_ggtt_view *view)
3397
{
3398 3399 3400
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
3401
	struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
3402

3403
	if (!vma)
3404
		vma = __i915_gem_vma_create(obj, &ggtt->base, view);
3405 3406

	return vma;
3407

3408
}
3409

3410
static struct scatterlist *
3411
rotate_pages(const dma_addr_t *in, unsigned int offset,
3412
	     unsigned int width, unsigned int height,
3413
	     unsigned int stride,
3414
	     struct sg_table *st, struct scatterlist *sg)
3415 3416 3417 3418 3419
{
	unsigned int column, row;
	unsigned int src_idx;

	for (column = 0; column < width; column++) {
3420
		src_idx = stride * (height - 1) + column;
3421 3422 3423 3424 3425 3426 3427
		for (row = 0; row < height; row++) {
			st->nents++;
			/* We don't need the pages, but need to initialize
			 * the entries so the sg list can be happily traversed.
			 * The only thing we need are DMA addresses.
			 */
			sg_set_page(sg, NULL, PAGE_SIZE, 0);
3428
			sg_dma_address(sg) = in[offset + src_idx];
3429 3430
			sg_dma_len(sg) = PAGE_SIZE;
			sg = sg_next(sg);
3431
			src_idx -= stride;
3432 3433
		}
	}
3434 3435

	return sg;
3436 3437 3438
}

static struct sg_table *
3439
intel_rotate_fb_obj_pages(struct intel_rotation_info *rot_info,
3440 3441
			  struct drm_i915_gem_object *obj)
{
3442
	const size_t n_pages = obj->base.size / PAGE_SIZE;
3443
	unsigned int size_pages = rot_info->plane[0].width * rot_info->plane[0].height;
3444
	unsigned int size_pages_uv;
3445 3446
	struct sgt_iter sgt_iter;
	dma_addr_t dma_addr;
3447 3448 3449
	unsigned long i;
	dma_addr_t *page_addr_list;
	struct sg_table *st;
3450 3451
	unsigned int uv_start_page;
	struct scatterlist *sg;
3452
	int ret = -ENOMEM;
3453 3454

	/* Allocate a temporary list of source pages for random access. */
3455
	page_addr_list = drm_malloc_gfp(n_pages,
3456 3457
					sizeof(dma_addr_t),
					GFP_TEMPORARY);
3458 3459 3460
	if (!page_addr_list)
		return ERR_PTR(ret);

3461 3462
	/* Account for UV plane with NV12. */
	if (rot_info->pixel_format == DRM_FORMAT_NV12)
3463
		size_pages_uv = rot_info->plane[1].width * rot_info->plane[1].height;
3464 3465 3466
	else
		size_pages_uv = 0;

3467 3468 3469 3470 3471
	/* Allocate target SG list. */
	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (!st)
		goto err_st_alloc;

3472
	ret = sg_alloc_table(st, size_pages + size_pages_uv, GFP_KERNEL);
3473 3474 3475 3476 3477
	if (ret)
		goto err_sg_alloc;

	/* Populate source page list from the object. */
	i = 0;
3478 3479
	for_each_sgt_dma(dma_addr, sgt_iter, obj->pages)
		page_addr_list[i++] = dma_addr;
3480

3481
	GEM_BUG_ON(i != n_pages);
3482 3483 3484
	st->nents = 0;
	sg = st->sgl;

3485
	/* Rotate the pages. */
3486
	sg = rotate_pages(page_addr_list, 0,
3487 3488
			  rot_info->plane[0].width, rot_info->plane[0].height,
			  rot_info->plane[0].width,
3489
			  st, sg);
3490

3491 3492 3493 3494 3495 3496 3497 3498
	/* Append the UV plane if NV12. */
	if (rot_info->pixel_format == DRM_FORMAT_NV12) {
		uv_start_page = size_pages;

		/* Check for tile-row un-alignment. */
		if (offset_in_page(rot_info->uv_offset))
			uv_start_page--;

3499 3500
		rot_info->uv_start_page = uv_start_page;

3501 3502 3503 3504
		sg = rotate_pages(page_addr_list, rot_info->uv_start_page,
				  rot_info->plane[1].width, rot_info->plane[1].height,
				  rot_info->plane[1].width,
				  st, sg);
3505 3506
	}

3507 3508 3509
	DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages (%u plane 0)).\n",
		      obj->base.size, rot_info->plane[0].width,
		      rot_info->plane[0].height, size_pages + size_pages_uv,
3510
		      size_pages);
3511 3512 3513 3514 3515 3516 3517 3518 3519 3520

	drm_free_large(page_addr_list);

	return st;

err_sg_alloc:
	kfree(st);
err_st_alloc:
	drm_free_large(page_addr_list);

3521 3522 3523
	DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%d) (%ux%u tiles, %u pages (%u plane 0))\n",
		      obj->base.size, ret, rot_info->plane[0].width,
		      rot_info->plane[0].height, size_pages + size_pages_uv,
3524
		      size_pages);
3525 3526
	return ERR_PTR(ret);
}
3527

3528 3529 3530 3531 3532 3533 3534 3535 3536 3537 3538 3539 3540 3541 3542 3543 3544 3545 3546 3547 3548 3549 3550 3551 3552 3553 3554 3555 3556 3557 3558 3559 3560 3561 3562 3563 3564 3565 3566 3567 3568
static struct sg_table *
intel_partial_pages(const struct i915_ggtt_view *view,
		    struct drm_i915_gem_object *obj)
{
	struct sg_table *st;
	struct scatterlist *sg;
	struct sg_page_iter obj_sg_iter;
	int ret = -ENOMEM;

	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (!st)
		goto err_st_alloc;

	ret = sg_alloc_table(st, view->params.partial.size, GFP_KERNEL);
	if (ret)
		goto err_sg_alloc;

	sg = st->sgl;
	st->nents = 0;
	for_each_sg_page(obj->pages->sgl, &obj_sg_iter, obj->pages->nents,
		view->params.partial.offset)
	{
		if (st->nents >= view->params.partial.size)
			break;

		sg_set_page(sg, NULL, PAGE_SIZE, 0);
		sg_dma_address(sg) = sg_page_iter_dma_address(&obj_sg_iter);
		sg_dma_len(sg) = PAGE_SIZE;

		sg = sg_next(sg);
		st->nents++;
	}

	return st;

err_sg_alloc:
	kfree(st);
err_st_alloc:
	return ERR_PTR(ret);
}

3569
static int
3570
i915_get_ggtt_vma_pages(struct i915_vma *vma)
3571
{
3572 3573
	int ret = 0;

3574 3575 3576 3577 3578
	if (vma->ggtt_view.pages)
		return 0;

	if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
		vma->ggtt_view.pages = vma->obj->pages;
3579 3580
	else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
		vma->ggtt_view.pages =
3581
			intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
3582 3583 3584
	else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
		vma->ggtt_view.pages =
			intel_partial_pages(&vma->ggtt_view, vma->obj);
3585 3586 3587 3588 3589
	else
		WARN_ONCE(1, "GGTT view %u not implemented!\n",
			  vma->ggtt_view.type);

	if (!vma->ggtt_view.pages) {
3590
		DRM_ERROR("Failed to get pages for GGTT view type %u!\n",
3591
			  vma->ggtt_view.type);
3592 3593 3594 3595 3596 3597
		ret = -EINVAL;
	} else if (IS_ERR(vma->ggtt_view.pages)) {
		ret = PTR_ERR(vma->ggtt_view.pages);
		vma->ggtt_view.pages = NULL;
		DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
			  vma->ggtt_view.type, ret);
3598 3599
	}

3600
	return ret;
3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615
}

/**
 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
 * @vma: VMA to map
 * @cache_level: mapping cache level
 * @flags: flags like global or local mapping
 *
 * DMA addresses are taken from the scatter-gather table of this object (or of
 * this VMA in case of non-default GGTT views) and PTE entries set up.
 * Note that DMA addresses are also the only part of the SG table we care about.
 */
int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
		  u32 flags)
{
3616 3617
	int ret;
	u32 bind_flags;
3618

3619 3620
	if (WARN_ON(flags == 0))
		return -EINVAL;
3621

3622
	bind_flags = 0;
3623 3624 3625 3626 3627 3628 3629 3630 3631 3632
	if (flags & PIN_GLOBAL)
		bind_flags |= GLOBAL_BIND;
	if (flags & PIN_USER)
		bind_flags |= LOCAL_BIND;

	if (flags & PIN_UPDATE)
		bind_flags |= vma->bound;
	else
		bind_flags &= ~vma->bound;

3633 3634 3635 3636
	if (bind_flags == 0)
		return 0;

	if (vma->bound == 0 && vma->vm->allocate_va_range) {
3637 3638
		/* XXX: i915_vma_pin() will fix this +- hack */
		vma->pin_count++;
3639
		trace_i915_va_alloc(vma);
3640 3641 3642
		ret = vma->vm->allocate_va_range(vma->vm,
						 vma->node.start,
						 vma->node.size);
3643
		vma->pin_count--;
3644 3645 3646 3647 3648
		if (ret)
			return ret;
	}

	ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
3649 3650
	if (ret)
		return ret;
3651 3652

	vma->bound |= bind_flags;
3653 3654 3655

	return 0;
}
3656 3657 3658 3659 3660 3661 3662 3663 3664 3665 3666 3667

/**
 * i915_ggtt_view_size - Get the size of a GGTT view.
 * @obj: Object the view is of.
 * @view: The view in question.
 *
 * @return The size of the GGTT view in bytes.
 */
size_t
i915_ggtt_view_size(struct drm_i915_gem_object *obj,
		    const struct i915_ggtt_view *view)
{
3668
	if (view->type == I915_GGTT_VIEW_NORMAL) {
3669
		return obj->base.size;
3670
	} else if (view->type == I915_GGTT_VIEW_ROTATED) {
3671
		return intel_rotation_info_size(&view->params.rotated) << PAGE_SHIFT;
3672 3673
	} else if (view->type == I915_GGTT_VIEW_PARTIAL) {
		return view->params.partial.size << PAGE_SHIFT;
3674 3675 3676 3677 3678
	} else {
		WARN_ONCE(1, "GGTT view %u not implemented!\n", view->type);
		return obj->base.size;
	}
}
3679 3680 3681 3682 3683 3684 3685

void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
{
	void __iomem *ptr;

	lockdep_assert_held(&vma->vm->dev->struct_mutex);
	if (WARN_ON(!vma->obj->map_and_fenceable))
3686
		return IO_ERR_PTR(-ENODEV);
3687 3688 3689 3690 3691 3692 3693 3694 3695 3696

	GEM_BUG_ON(!vma->is_ggtt);
	GEM_BUG_ON((vma->bound & GLOBAL_BIND) == 0);

	ptr = vma->iomap;
	if (ptr == NULL) {
		ptr = io_mapping_map_wc(i915_vm_to_ggtt(vma->vm)->mappable,
					vma->node.start,
					vma->node.size);
		if (ptr == NULL)
3697
			return IO_ERR_PTR(-ENOMEM);
3698 3699 3700 3701 3702 3703 3704

		vma->iomap = ptr;
	}

	vma->pin_count++;
	return ptr;
}