i915_gem_gtt.c 29.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright © 2010 Daniel Vetter
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

25 26
#include <drm/drmP.h>
#include <drm/i915_drm.h>
27 28 29 30
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"

31 32 33
#define GEN6_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))

34 35
/* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
36
#define HSW_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0x7f0))
37 38 39 40 41 42 43 44 45

#define GEN6_PDE_VALID			(1 << 0)
/* gen6+ has bit 11-4 for physical addr bit 39-32 */
#define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)

#define GEN6_PTE_VALID			(1 << 0)
#define GEN6_PTE_UNCACHED		(1 << 1)
#define HSW_PTE_UNCACHED		(0)
#define GEN6_PTE_CACHE_LLC		(2 << 1)
46
#define GEN7_PTE_CACHE_L3_LLC		(3 << 1)
47
#define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
48 49 50 51 52 53 54
#define HSW_PTE_ADDR_ENCODE(addr)	HSW_GTT_ADDR_ENCODE(addr)

/* Cacheability Control is a 4-bit value. The low three bits are stored in *
 * bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
 */
#define HSW_CACHEABILITY_CONTROL(bits)	((((bits) & 0x7) << 1) | \
					 (((bits) & 0x8) << (11 - 3)))
55
#define HSW_WB_LLC_AGE3			HSW_CACHEABILITY_CONTROL(0x2)
56
#define HSW_WB_LLC_AGE0			HSW_CACHEABILITY_CONTROL(0x3)
57
#define HSW_WB_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0xb)
58
#define HSW_WT_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0x6)
59

60
static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
61 62
				     enum i915_cache_level level,
				     bool valid)
63
{
64
	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
65
	pte |= GEN6_PTE_ADDR_ENCODE(addr);
66 67

	switch (level) {
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
	case I915_CACHE_L3_LLC:
	case I915_CACHE_LLC:
		pte |= GEN6_PTE_CACHE_LLC;
		break;
	case I915_CACHE_NONE:
		pte |= GEN6_PTE_UNCACHED;
		break;
	default:
		WARN_ON(1);
	}

	return pte;
}

static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
83 84
				     enum i915_cache_level level,
				     bool valid)
85
{
86
	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
87 88 89 90 91
	pte |= GEN6_PTE_ADDR_ENCODE(addr);

	switch (level) {
	case I915_CACHE_L3_LLC:
		pte |= GEN7_PTE_CACHE_L3_LLC;
92 93 94 95 96
		break;
	case I915_CACHE_LLC:
		pte |= GEN6_PTE_CACHE_LLC;
		break;
	case I915_CACHE_NONE:
97
		pte |= GEN6_PTE_UNCACHED;
98 99
		break;
	default:
100
		WARN_ON(1);
101 102
	}

103 104 105
	return pte;
}

106 107 108
#define BYT_PTE_WRITEABLE		(1 << 1)
#define BYT_PTE_SNOOPED_BY_CPU_CACHES	(1 << 2)

109
static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
110 111
				     enum i915_cache_level level,
				     bool valid)
112
{
113
	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
114 115 116 117 118 119 120 121 122 123 124 125 126
	pte |= GEN6_PTE_ADDR_ENCODE(addr);

	/* Mark the page as writeable.  Other platforms don't have a
	 * setting for read-only/writable, so this matches that behavior.
	 */
	pte |= BYT_PTE_WRITEABLE;

	if (level != I915_CACHE_NONE)
		pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;

	return pte;
}

127
static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
128 129
				     enum i915_cache_level level,
				     bool valid)
130
{
131
	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
132
	pte |= HSW_PTE_ADDR_ENCODE(addr);
133 134

	if (level != I915_CACHE_NONE)
135
		pte |= HSW_WB_LLC_AGE3;
136 137 138 139

	return pte;
}

140
static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
141 142
				      enum i915_cache_level level,
				      bool valid)
143
{
144
	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
145 146
	pte |= HSW_PTE_ADDR_ENCODE(addr);

147 148 149 150 151 152 153
	switch (level) {
	case I915_CACHE_NONE:
		break;
	case I915_CACHE_WT:
		pte |= HSW_WT_ELLC_LLC_AGE0;
		break;
	default:
154
		pte |= HSW_WB_ELLC_LLC_AGE0;
155 156
		break;
	}
157 158 159 160

	return pte;
}

B
Ben Widawsky 已提交
161
static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
B
Ben Widawsky 已提交
162
{
163
	struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
B
Ben Widawsky 已提交
164 165 166 167
	gen6_gtt_pte_t __iomem *pd_addr;
	uint32_t pd_entry;
	int i;

B
Ben Widawsky 已提交
168
	WARN_ON(ppgtt->pd_offset & 0x3f);
B
Ben Widawsky 已提交
169 170 171 172 173 174 175 176 177 178 179 180
	pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
		ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
		dma_addr_t pt_addr;

		pt_addr = ppgtt->pt_dma_addr[i];
		pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
		pd_entry |= GEN6_PDE_VALID;

		writel(pd_entry, pd_addr + i);
	}
	readl(pd_addr);
B
Ben Widawsky 已提交
181 182 183 184 185 186 187 188 189 190 191 192 193
}

static int gen6_ppgtt_enable(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	uint32_t pd_offset;
	struct intel_ring_buffer *ring;
	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
	int i;

	BUG_ON(ppgtt->pd_offset & 0x3f);

	gen6_write_pdes(ppgtt);
B
Ben Widawsky 已提交
194 195 196 197 198 199 200 201 202

	pd_offset = ppgtt->pd_offset;
	pd_offset /= 64; /* in cachelines, */
	pd_offset <<= 16;

	if (INTEL_INFO(dev)->gen == 6) {
		uint32_t ecochk, gab_ctl, ecobits;

		ecobits = I915_READ(GAC_ECO_BITS);
V
Ville Syrjälä 已提交
203 204
		I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
					 ECOBITS_PPGTT_CACHE64B);
B
Ben Widawsky 已提交
205 206 207 208 209 210 211 212 213

		gab_ctl = I915_READ(GAB_CTL);
		I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);

		ecochk = I915_READ(GAM_ECOCHK);
		I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
				       ECOCHK_PPGTT_CACHE64B);
		I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
	} else if (INTEL_INFO(dev)->gen >= 7) {
214
		uint32_t ecochk, ecobits;
215 216 217 218

		ecobits = I915_READ(GAC_ECO_BITS);
		I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);

219 220 221 222 223 224 225 226
		ecochk = I915_READ(GAM_ECOCHK);
		if (IS_HASWELL(dev)) {
			ecochk |= ECOCHK_PPGTT_WB_HSW;
		} else {
			ecochk |= ECOCHK_PPGTT_LLC_IVB;
			ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
		}
		I915_WRITE(GAM_ECOCHK, ecochk);
B
Ben Widawsky 已提交
227 228 229 230 231 232 233 234 235 236 237
		/* GFX_MODE is per-ring on gen7+ */
	}

	for_each_ring(ring, dev_priv, i) {
		if (INTEL_INFO(dev)->gen >= 7)
			I915_WRITE(RING_MODE_GEN7(ring),
				   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));

		I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
		I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
	}
238
	return 0;
B
Ben Widawsky 已提交
239 240
}

241
/* PPGTT support for Sandybdrige/Gen6 and later */
242
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
243
				   unsigned first_entry,
244 245
				   unsigned num_entries,
				   bool use_scratch)
246
{
247 248
	struct i915_hw_ppgtt *ppgtt =
		container_of(vm, struct i915_hw_ppgtt, base);
249
	gen6_gtt_pte_t *pt_vaddr, scratch_pte;
250
	unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
251 252
	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
	unsigned last_pte, i;
253

254
	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
255

256 257 258 259 260
	while (num_entries) {
		last_pte = first_pte + num_entries;
		if (last_pte > I915_PPGTT_PT_ENTRIES)
			last_pte = I915_PPGTT_PT_ENTRIES;

261
		pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
262

263 264
		for (i = first_pte; i < last_pte; i++)
			pt_vaddr[i] = scratch_pte;
265 266 267

		kunmap_atomic(pt_vaddr);

268 269
		num_entries -= last_pte - first_pte;
		first_pte = 0;
270
		act_pt++;
271
	}
272 273
}

274
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
D
Daniel Vetter 已提交
275 276 277 278
				      struct sg_table *pages,
				      unsigned first_entry,
				      enum i915_cache_level cache_level)
{
279 280
	struct i915_hw_ppgtt *ppgtt =
		container_of(vm, struct i915_hw_ppgtt, base);
281
	gen6_gtt_pte_t *pt_vaddr;
282
	unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
283 284 285
	unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
	struct sg_page_iter sg_iter;

286
	pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
287 288 289
	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
		dma_addr_t page_addr;

290
		page_addr = sg_page_iter_dma_address(&sg_iter);
291
		pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true);
292 293
		if (++act_pte == I915_PPGTT_PT_ENTRIES) {
			kunmap_atomic(pt_vaddr);
294 295
			act_pt++;
			pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
296
			act_pte = 0;
D
Daniel Vetter 已提交
297 298 299

		}
	}
300
	kunmap_atomic(pt_vaddr);
D
Daniel Vetter 已提交
301 302
}

303
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
304
{
305 306
	struct i915_hw_ppgtt *ppgtt =
		container_of(vm, struct i915_hw_ppgtt, base);
307 308
	int i;

309 310
	drm_mm_takedown(&ppgtt->base.mm);

311 312
	if (ppgtt->pt_dma_addr) {
		for (i = 0; i < ppgtt->num_pd_entries; i++)
313
			pci_unmap_page(ppgtt->base.dev->pdev,
314 315 316 317 318 319 320 321 322 323 324 325 326
				       ppgtt->pt_dma_addr[i],
				       4096, PCI_DMA_BIDIRECTIONAL);
	}

	kfree(ppgtt->pt_dma_addr);
	for (i = 0; i < ppgtt->num_pd_entries; i++)
		__free_page(ppgtt->pt_pages[i]);
	kfree(ppgtt->pt_pages);
	kfree(ppgtt);
}

static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
327
	struct drm_device *dev = ppgtt->base.dev;
328 329 330 331 332 333 334 335
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned first_pd_entry_in_global_pt;
	int i;
	int ret = -ENOMEM;

	/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
	 * entries. For aliasing ppgtt support we just steal them at the end for
	 * now. */
336
	first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
337

338
	ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
339
	ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
B
Ben Widawsky 已提交
340
	ppgtt->enable = gen6_ppgtt_enable;
341 342 343 344
	ppgtt->base.clear_range = gen6_ppgtt_clear_range;
	ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
	ppgtt->base.cleanup = gen6_ppgtt_cleanup;
	ppgtt->base.scratch = dev_priv->gtt.base.scratch;
D
Daniel Vetter 已提交
345
	ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
346 347
				  GFP_KERNEL);
	if (!ppgtt->pt_pages)
348
		return -ENOMEM;
349 350 351 352 353 354 355

	for (i = 0; i < ppgtt->num_pd_entries; i++) {
		ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
		if (!ppgtt->pt_pages[i])
			goto err_pt_alloc;
	}

D
Daniel Vetter 已提交
356
	ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
B
Ben Widawsky 已提交
357 358 359
				     GFP_KERNEL);
	if (!ppgtt->pt_dma_addr)
		goto err_pt_alloc;
360

B
Ben Widawsky 已提交
361 362
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
		dma_addr_t pt_addr;
D
Daniel Vetter 已提交
363

B
Ben Widawsky 已提交
364 365
		pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
				       PCI_DMA_BIDIRECTIONAL);
366

B
Ben Widawsky 已提交
367 368 369
		if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
			ret = -EIO;
			goto err_pd_pin;
370

D
Daniel Vetter 已提交
371
		}
B
Ben Widawsky 已提交
372
		ppgtt->pt_dma_addr[i] = pt_addr;
373 374
	}

375
	ppgtt->base.clear_range(&ppgtt->base, 0,
376
				ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true);
377

378
	ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394

	return 0;

err_pd_pin:
	if (ppgtt->pt_dma_addr) {
		for (i--; i >= 0; i--)
			pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
				       4096, PCI_DMA_BIDIRECTIONAL);
	}
err_pt_alloc:
	kfree(ppgtt->pt_dma_addr);
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
		if (ppgtt->pt_pages[i])
			__free_page(ppgtt->pt_pages[i]);
	}
	kfree(ppgtt->pt_pages);
395 396 397 398 399 400 401 402 403 404 405 406 407 408

	return ret;
}

static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_hw_ppgtt *ppgtt;
	int ret;

	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
	if (!ppgtt)
		return -ENOMEM;

409
	ppgtt->base.dev = dev;
410

B
Ben Widawsky 已提交
411 412
	if (INTEL_INFO(dev)->gen < 8)
		ret = gen6_ppgtt_init(ppgtt);
413 414
	else if (IS_GEN8(dev))
		ret = -ENOSYS;
B
Ben Widawsky 已提交
415 416 417
	else
		BUG();

418 419
	if (ret)
		kfree(ppgtt);
420
	else {
421
		dev_priv->mm.aliasing_ppgtt = ppgtt;
422 423 424
		drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
			    ppgtt->base.total);
	}
425 426 427 428 429 430 431 432 433 434 435 436

	return ret;
}

void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;

	if (!ppgtt)
		return;

437
	ppgtt->base.cleanup(&ppgtt->base);
438
	dev_priv->mm.aliasing_ppgtt = NULL;
439 440
}

441 442 443 444
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
			    struct drm_i915_gem_object *obj,
			    enum i915_cache_level cache_level)
{
445 446 447
	ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
				   i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
				   cache_level);
448 449 450 451 452
}

void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
			      struct drm_i915_gem_object *obj)
{
453 454
	ppgtt->base.clear_range(&ppgtt->base,
				i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
455 456
				obj->base.size >> PAGE_SHIFT,
				true);
457 458
}

459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474
extern int intel_iommu_gfx_mapped;
/* Certain Gen5 chipsets require require idling the GPU before
 * unmapping anything from the GTT when VT-d is enabled.
 */
static inline bool needs_idle_maps(struct drm_device *dev)
{
#ifdef CONFIG_INTEL_IOMMU
	/* Query intel_iommu to see if we need the workaround. Presumably that
	 * was loaded first.
	 */
	if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
		return true;
#endif
	return false;
}

B
Ben Widawsky 已提交
475 476 477 478
static bool do_idling(struct drm_i915_private *dev_priv)
{
	bool ret = dev_priv->mm.interruptible;

479
	if (unlikely(dev_priv->gtt.do_idle_maps)) {
B
Ben Widawsky 已提交
480
		dev_priv->mm.interruptible = false;
481
		if (i915_gpu_idle(dev_priv->dev)) {
B
Ben Widawsky 已提交
482 483 484 485 486 487 488 489 490 491 492
			DRM_ERROR("Couldn't idle GPU\n");
			/* Wait a bit, in hopes it avoids the hang */
			udelay(10);
		}
	}

	return ret;
}

static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
{
493
	if (unlikely(dev_priv->gtt.do_idle_maps))
B
Ben Widawsky 已提交
494 495 496
		dev_priv->mm.interruptible = interruptible;
}

497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543
void i915_check_and_clear_faults(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_ring_buffer *ring;
	int i;

	if (INTEL_INFO(dev)->gen < 6)
		return;

	for_each_ring(ring, dev_priv, i) {
		u32 fault_reg;
		fault_reg = I915_READ(RING_FAULT_REG(ring));
		if (fault_reg & RING_FAULT_VALID) {
			DRM_DEBUG_DRIVER("Unexpected fault\n"
					 "\tAddr: 0x%08lx\\n"
					 "\tAddress space: %s\n"
					 "\tSource ID: %d\n"
					 "\tType: %d\n",
					 fault_reg & PAGE_MASK,
					 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
					 RING_FAULT_SRCID(fault_reg),
					 RING_FAULT_FAULT_TYPE(fault_reg));
			I915_WRITE(RING_FAULT_REG(ring),
				   fault_reg & ~RING_FAULT_VALID);
		}
	}
	POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
}

void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	/* Don't bother messing with faults pre GEN6 as we have little
	 * documentation supporting that it's a good idea.
	 */
	if (INTEL_INFO(dev)->gen < 6)
		return;

	i915_check_and_clear_faults(dev);

	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
				       dev_priv->gtt.base.start / PAGE_SIZE,
				       dev_priv->gtt.base.total / PAGE_SIZE,
				       false);
}

544 545 546
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
547
	struct drm_i915_gem_object *obj;
548

549 550
	i915_check_and_clear_faults(dev);

551
	/* First fill our portion of the GTT with scratch pages */
552 553
	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
				       dev_priv->gtt.base.start / PAGE_SIZE,
554 555
				       dev_priv->gtt.base.total / PAGE_SIZE,
				       true);
556

557
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
558
		i915_gem_clflush_object(obj, obj->pin_display);
559
		i915_gem_gtt_bind_object(obj, obj->cache_level);
560 561
	}

562
	i915_gem_chipset_flush(dev);
563
}
564

565
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
566
{
567
	if (obj->has_dma_mapping)
568
		return 0;
569 570 571 572 573 574 575

	if (!dma_map_sg(&obj->base.dev->pdev->dev,
			obj->pages->sgl, obj->pages->nents,
			PCI_DMA_BIDIRECTIONAL))
		return -ENOSPC;

	return 0;
576 577
}

578 579 580 581 582 583
/*
 * Binds an object into the global gtt with the specified cache level. The object
 * will be accessible to the GPU via commands whose operands reference offsets
 * within the global GTT as well as accessible by the GPU through the GMADR
 * mapped BAR (dev_priv->mm.gtt->gtt).
 */
584
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
585 586 587
				     struct sg_table *st,
				     unsigned int first_entry,
				     enum i915_cache_level level)
588
{
589
	struct drm_i915_private *dev_priv = vm->dev->dev_private;
590 591
	gen6_gtt_pte_t __iomem *gtt_entries =
		(gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
592 593
	int i = 0;
	struct sg_page_iter sg_iter;
594 595
	dma_addr_t addr;

596
	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
597
		addr = sg_page_iter_dma_address(&sg_iter);
598
		iowrite32(vm->pte_encode(addr, level, true), &gtt_entries[i]);
599
		i++;
600 601 602 603 604 605 606 607 608
	}

	/* XXX: This serves as a posting read to make sure that the PTE has
	 * actually been updated. There is some concern that even though
	 * registers and PTEs are within the same BAR that they are potentially
	 * of NUMA access patterns. Therefore, even with the way we assume
	 * hardware should work, we must keep this posting read for paranoia.
	 */
	if (i != 0)
609
		WARN_ON(readl(&gtt_entries[i-1]) !=
610
			vm->pte_encode(addr, level, true));
611 612 613 614 615 616 617

	/* This next bit makes the above posting read even more important. We
	 * want to flush the TLBs only after we're certain all the PTE updates
	 * have finished.
	 */
	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
	POSTING_READ(GFX_FLSH_CNTL_GEN6);
618 619
}

620
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
621
				  unsigned int first_entry,
622 623
				  unsigned int num_entries,
				  bool use_scratch)
624
{
625
	struct drm_i915_private *dev_priv = vm->dev->dev_private;
626 627
	gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
		(gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
628
	const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
629 630 631 632 633 634 635
	int i;

	if (WARN(num_entries > max_entries,
		 "First entry = %d; Num entries = %d (max=%d)\n",
		 first_entry, num_entries, max_entries))
		num_entries = max_entries;

636 637
	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch);

638 639 640 641 642 643
	for (i = 0; i < num_entries; i++)
		iowrite32(scratch_pte, &gtt_base[i]);
	readl(gtt_base);
}


644
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
645 646 647 648 649 650 651 652 653 654 655
				     struct sg_table *st,
				     unsigned int pg_start,
				     enum i915_cache_level cache_level)
{
	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;

	intel_gtt_insert_sg_entries(st, pg_start, flags);

}

656
static void i915_ggtt_clear_range(struct i915_address_space *vm,
657
				  unsigned int first_entry,
658 659
				  unsigned int num_entries,
				  bool unused)
660 661 662 663 664
{
	intel_gtt_clear_range(first_entry, num_entries);
}


665 666
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
			      enum i915_cache_level cache_level)
667 668
{
	struct drm_device *dev = obj->base.dev;
669
	struct drm_i915_private *dev_priv = dev->dev_private;
670
	const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
671

672 673 674
	dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
					  entry,
					  cache_level);
675

676
	obj->has_global_gtt_mapping = 1;
677 678
}

679
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
680
{
681 682
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
683
	const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
684

685 686
	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
				       entry,
687 688
				       obj->base.size >> PAGE_SHIFT,
				       true);
689 690

	obj->has_global_gtt_mapping = 0;
691 692 693
}

void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
694
{
B
Ben Widawsky 已提交
695 696 697 698 699 700
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	bool interruptible;

	interruptible = do_idling(dev_priv);

701 702 703 704
	if (!obj->has_dma_mapping)
		dma_unmap_sg(&dev->pdev->dev,
			     obj->pages->sgl, obj->pages->nents,
			     PCI_DMA_BIDIRECTIONAL);
B
Ben Widawsky 已提交
705 706

	undo_idling(dev_priv, interruptible);
707
}
708

709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724
static void i915_gtt_color_adjust(struct drm_mm_node *node,
				  unsigned long color,
				  unsigned long *start,
				  unsigned long *end)
{
	if (node->color != color)
		*start += 4096;

	if (!list_empty(&node->node_list)) {
		node = list_entry(node->node_list.next,
				  struct drm_mm_node,
				  node_list);
		if (node->allocated && node->color != color)
			*end -= 4096;
	}
}
725 726 727 728
void i915_gem_setup_global_gtt(struct drm_device *dev,
			       unsigned long start,
			       unsigned long mappable_end,
			       unsigned long end)
729
{
730 731 732 733 734 735 736 737 738
	/* Let GEM Manage all of the aperture.
	 *
	 * However, leave one page at the end still bound to the scratch page.
	 * There are a number of places where the hardware apparently prefetches
	 * past the end of the object, and we've seen multiple hangs with the
	 * GPU head pointer stuck in a batchbuffer bound at the last page of the
	 * aperture.  One page should be enough to keep any prefetching inside
	 * of the aperture.
	 */
739 740
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
741 742 743
	struct drm_mm_node *entry;
	struct drm_i915_gem_object *obj;
	unsigned long hole_start, hole_end;
744

745 746
	BUG_ON(mappable_end > end);

747
	/* Subtract the guard page ... */
748
	drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
749
	if (!HAS_LLC(dev))
750
		dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
751

752
	/* Mark any preallocated objects as occupied */
753
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
754
		struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
755
		int ret;
B
Ben Widawsky 已提交
756
		DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
757 758 759
			      i915_gem_obj_ggtt_offset(obj), obj->base.size);

		WARN_ON(i915_gem_obj_ggtt_bound(obj));
760
		ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
761
		if (ret)
762
			DRM_DEBUG_KMS("Reservation failed\n");
763
		obj->has_global_gtt_mapping = 1;
B
Ben Widawsky 已提交
764
		list_add(&vma->vma_link, &obj->vma_list);
765 766
	}

767 768
	dev_priv->gtt.base.start = start;
	dev_priv->gtt.base.total = end - start;
769

770
	/* Clear any non-preallocated blocks */
771
	drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
772
		const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
773 774
		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
			      hole_start, hole_end);
775
		ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true);
776 777 778
	}

	/* And finally clear the reserved guard page */
779
	ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true);
780 781
}

782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801
static bool
intel_enable_ppgtt(struct drm_device *dev)
{
	if (i915_enable_ppgtt >= 0)
		return i915_enable_ppgtt;

#ifdef CONFIG_INTEL_IOMMU
	/* Disable ppgtt on SNB if VT-d is on. */
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
		return false;
#endif

	return true;
}

void i915_gem_init_global_gtt(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned long gtt_size, mappable_size;

802
	gtt_size = dev_priv->gtt.base.total;
803
	mappable_size = dev_priv->gtt.mappable_end;
804 805

	if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
806
		int ret;
807 808 809 810

		if (INTEL_INFO(dev)->gen <= 7) {
			/* PPGTT pdes are stolen from global gtt ptes, so shrink the
			 * aperture accordingly when using aliasing ppgtt. */
811
			gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
812
		}
813 814 815 816

		i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);

		ret = i915_gem_init_aliasing_ppgtt(dev);
817
		if (!ret)
818
			return;
819 820

		DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
821
		drm_mm_takedown(&dev_priv->gtt.base.mm);
822
		gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
823
	}
824
	i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846
}

static int setup_scratch_page(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct page *page;
	dma_addr_t dma_addr;

	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
	if (page == NULL)
		return -ENOMEM;
	get_page(page);
	set_pages_uc(page, 1);

#ifdef CONFIG_INTEL_IOMMU
	dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
				PCI_DMA_BIDIRECTIONAL);
	if (pci_dma_mapping_error(dev->pdev, dma_addr))
		return -EINVAL;
#else
	dma_addr = page_to_phys(page);
#endif
847 848
	dev_priv->gtt.base.scratch.page = page;
	dev_priv->gtt.base.scratch.addr = dma_addr;
849 850 851 852 853 854 855

	return 0;
}

static void teardown_scratch_page(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
856 857 858 859
	struct page *page = dev_priv->gtt.base.scratch.page;

	set_pages_wb(page, 1);
	pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
860
		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
861 862
	put_page(page);
	__free_page(page);
863 864 865 866 867 868 869 870 871
}

static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
{
	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
	snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
	return snb_gmch_ctl << 20;
}

872 873 874 875 876 877 878 879 880
static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
{
	bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
	bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
	if (bdw_gmch_ctl)
		bdw_gmch_ctl = 1 << bdw_gmch_ctl;
	return bdw_gmch_ctl << 20;
}

881
static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
882 883 884 885 886 887
{
	snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
	snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
	return snb_gmch_ctl << 25; /* 32 MB units */
}

888 889 890 891 892 893 894
static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
{
	bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
	bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
	return bdw_gmch_ctl << 25; /* 32 MB units */
}

B
Ben Widawsky 已提交
895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954
static int ggtt_probe_common(struct drm_device *dev,
			     size_t gtt_size)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	phys_addr_t gtt_bus_addr;
	int ret;

	/* For Modern GENs the PTEs and register space are split in the BAR */
	gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
		(pci_resource_len(dev->pdev, 0) / 2);

	dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
	if (!dev_priv->gtt.gsm) {
		DRM_ERROR("Failed to map the gtt page table\n");
		return -ENOMEM;
	}

	ret = setup_scratch_page(dev);
	if (ret) {
		DRM_ERROR("Scratch setup failed\n");
		/* iounmap will also get called at remove, but meh */
		iounmap(dev_priv->gtt.gsm);
	}

	return ret;
}

static int gen8_gmch_probe(struct drm_device *dev,
			   size_t *gtt_total,
			   size_t *stolen,
			   phys_addr_t *mappable_base,
			   unsigned long *mappable_end)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned int gtt_size;
	u16 snb_gmch_ctl;
	int ret;

	/* TODO: We're not aware of mappable constraints on gen8 yet */
	*mappable_base = pci_resource_start(dev->pdev, 2);
	*mappable_end = pci_resource_len(dev->pdev, 2);

	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));

	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);

	*stolen = gen8_get_stolen_size(snb_gmch_ctl);

	gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
	*gtt_total = (gtt_size / 8) << PAGE_SHIFT;

	ret = ggtt_probe_common(dev, gtt_size);

	dev_priv->gtt.base.clear_range = NULL;
	dev_priv->gtt.base.insert_entries = NULL;

	return ret;
}

955 956
static int gen6_gmch_probe(struct drm_device *dev,
			   size_t *gtt_total,
957 958 959
			   size_t *stolen,
			   phys_addr_t *mappable_base,
			   unsigned long *mappable_end)
960 961
{
	struct drm_i915_private *dev_priv = dev->dev_private;
962
	unsigned int gtt_size;
963 964 965
	u16 snb_gmch_ctl;
	int ret;

966 967 968
	*mappable_base = pci_resource_start(dev->pdev, 2);
	*mappable_end = pci_resource_len(dev->pdev, 2);

969 970
	/* 64/512MB is the current min/max we actually know of, but this is just
	 * a coarse sanity check.
971
	 */
972
	if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
973 974 975
		DRM_ERROR("Unknown GMADR size (%lx)\n",
			  dev_priv->gtt.mappable_end);
		return -ENXIO;
976 977 978 979 980 981
	}

	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);

B
Ben Widawsky 已提交
982
	*stolen = gen6_get_stolen_size(snb_gmch_ctl);
983

B
Ben Widawsky 已提交
984 985
	gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
	*gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
986

B
Ben Widawsky 已提交
987
	ret = ggtt_probe_common(dev, gtt_size);
988

989 990
	dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
	dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
991

992 993 994
	return ret;
}

995
static void gen6_gmch_remove(struct i915_address_space *vm)
996
{
997 998 999 1000

	struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
	iounmap(gtt->gsm);
	teardown_scratch_page(vm->dev);
1001
}
1002 1003 1004

static int i915_gmch_probe(struct drm_device *dev,
			   size_t *gtt_total,
1005 1006 1007
			   size_t *stolen,
			   phys_addr_t *mappable_base,
			   unsigned long *mappable_end)
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
	if (!ret) {
		DRM_ERROR("failed to set up gmch\n");
		return -EIO;
	}

1018
	intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
1019 1020

	dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
1021 1022
	dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
	dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
1023 1024 1025 1026

	return 0;
}

1027
static void i915_gmch_remove(struct i915_address_space *vm)
1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038
{
	intel_gmch_remove();
}

int i915_gem_gtt_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_gtt *gtt = &dev_priv->gtt;
	int ret;

	if (INTEL_INFO(dev)->gen <= 5) {
1039
		gtt->gtt_probe = i915_gmch_probe;
1040
		gtt->base.cleanup = i915_gmch_remove;
B
Ben Widawsky 已提交
1041
	} else if (INTEL_INFO(dev)->gen < 8) {
1042
		gtt->gtt_probe = gen6_gmch_probe;
1043
		gtt->base.cleanup = gen6_gmch_remove;
1044
		if (IS_HASWELL(dev) && dev_priv->ellc_size)
1045
			gtt->base.pte_encode = iris_pte_encode;
1046
		else if (IS_HASWELL(dev))
1047
			gtt->base.pte_encode = hsw_pte_encode;
1048
		else if (IS_VALLEYVIEW(dev))
1049
			gtt->base.pte_encode = byt_pte_encode;
1050 1051
		else if (INTEL_INFO(dev)->gen >= 7)
			gtt->base.pte_encode = ivb_pte_encode;
1052
		else
1053
			gtt->base.pte_encode = snb_pte_encode;
B
Ben Widawsky 已提交
1054 1055 1056
	} else {
		dev_priv->gtt.gtt_probe = gen8_gmch_probe;
		dev_priv->gtt.base.cleanup = gen6_gmch_remove;
1057 1058
	}

1059
	ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
1060
			     &gtt->mappable_base, &gtt->mappable_end);
1061
	if (ret)
1062 1063
		return ret;

1064 1065
	gtt->base.dev = dev;

1066
	/* GMADR is the PCI mmio aperture into the global GTT. */
1067 1068
	DRM_INFO("Memory usable by graphics device = %zdM\n",
		 gtt->base.total >> 20);
1069 1070
	DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
	DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
1071 1072 1073

	return 0;
}