i915_gem_gtt.c 27.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright © 2010 Daniel Vetter
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

25 26
#include <drm/drmP.h>
#include <drm/i915_drm.h>
27 28 29 30
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"

31 32 33
#define GEN6_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES (PAGE_SIZE / sizeof(gen6_gtt_pte_t))

34 35
/* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))
36
#define HSW_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0x7f0))
37 38 39 40 41 42 43 44 45

#define GEN6_PDE_VALID			(1 << 0)
/* gen6+ has bit 11-4 for physical addr bit 39-32 */
#define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)

#define GEN6_PTE_VALID			(1 << 0)
#define GEN6_PTE_UNCACHED		(1 << 1)
#define HSW_PTE_UNCACHED		(0)
#define GEN6_PTE_CACHE_LLC		(2 << 1)
46
#define GEN7_PTE_CACHE_L3_LLC		(3 << 1)
47
#define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)
48 49 50 51 52 53 54
#define HSW_PTE_ADDR_ENCODE(addr)	HSW_GTT_ADDR_ENCODE(addr)

/* Cacheability Control is a 4-bit value. The low three bits are stored in *
 * bits 3:1 of the PTE, while the fourth bit is stored in bit 11 of the PTE.
 */
#define HSW_CACHEABILITY_CONTROL(bits)	((((bits) & 0x7) << 1) | \
					 (((bits) & 0x8) << (11 - 3)))
55
#define HSW_WB_LLC_AGE3			HSW_CACHEABILITY_CONTROL(0x2)
56
#define HSW_WB_LLC_AGE0			HSW_CACHEABILITY_CONTROL(0x3)
57
#define HSW_WB_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0xb)
58
#define HSW_WT_ELLC_LLC_AGE0		HSW_CACHEABILITY_CONTROL(0x6)
59

60
static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
61 62
				     enum i915_cache_level level,
				     bool valid)
63
{
64
	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
65
	pte |= GEN6_PTE_ADDR_ENCODE(addr);
66 67

	switch (level) {
68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
	case I915_CACHE_L3_LLC:
	case I915_CACHE_LLC:
		pte |= GEN6_PTE_CACHE_LLC;
		break;
	case I915_CACHE_NONE:
		pte |= GEN6_PTE_UNCACHED;
		break;
	default:
		WARN_ON(1);
	}

	return pte;
}

static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
83 84
				     enum i915_cache_level level,
				     bool valid)
85
{
86
	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
87 88 89 90 91
	pte |= GEN6_PTE_ADDR_ENCODE(addr);

	switch (level) {
	case I915_CACHE_L3_LLC:
		pte |= GEN7_PTE_CACHE_L3_LLC;
92 93 94 95 96
		break;
	case I915_CACHE_LLC:
		pte |= GEN6_PTE_CACHE_LLC;
		break;
	case I915_CACHE_NONE:
97
		pte |= GEN6_PTE_UNCACHED;
98 99
		break;
	default:
100
		WARN_ON(1);
101 102
	}

103 104 105
	return pte;
}

106 107 108
#define BYT_PTE_WRITEABLE		(1 << 1)
#define BYT_PTE_SNOOPED_BY_CPU_CACHES	(1 << 2)

109
static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
110 111
				     enum i915_cache_level level,
				     bool valid)
112
{
113
	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
114 115 116 117 118 119 120 121 122 123 124 125 126
	pte |= GEN6_PTE_ADDR_ENCODE(addr);

	/* Mark the page as writeable.  Other platforms don't have a
	 * setting for read-only/writable, so this matches that behavior.
	 */
	pte |= BYT_PTE_WRITEABLE;

	if (level != I915_CACHE_NONE)
		pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;

	return pte;
}

127
static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
128 129
				     enum i915_cache_level level,
				     bool valid)
130
{
131
	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
132
	pte |= HSW_PTE_ADDR_ENCODE(addr);
133 134

	if (level != I915_CACHE_NONE)
135
		pte |= HSW_WB_LLC_AGE3;
136 137 138 139

	return pte;
}

140
static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
141 142
				      enum i915_cache_level level,
				      bool valid)
143
{
144
	gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
145 146
	pte |= HSW_PTE_ADDR_ENCODE(addr);

147 148 149 150 151 152 153
	switch (level) {
	case I915_CACHE_NONE:
		break;
	case I915_CACHE_WT:
		pte |= HSW_WT_ELLC_LLC_AGE0;
		break;
	default:
154
		pte |= HSW_WB_ELLC_LLC_AGE0;
155 156
		break;
	}
157 158 159 160

	return pte;
}

B
Ben Widawsky 已提交
161
static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
B
Ben Widawsky 已提交
162
{
163
	struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
B
Ben Widawsky 已提交
164 165 166 167
	gen6_gtt_pte_t __iomem *pd_addr;
	uint32_t pd_entry;
	int i;

B
Ben Widawsky 已提交
168
	WARN_ON(ppgtt->pd_offset & 0x3f);
B
Ben Widawsky 已提交
169 170 171 172 173 174 175 176 177 178 179 180
	pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
		ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
		dma_addr_t pt_addr;

		pt_addr = ppgtt->pt_dma_addr[i];
		pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
		pd_entry |= GEN6_PDE_VALID;

		writel(pd_entry, pd_addr + i);
	}
	readl(pd_addr);
B
Ben Widawsky 已提交
181 182 183 184 185 186 187 188 189 190 191 192 193
}

static int gen6_ppgtt_enable(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	uint32_t pd_offset;
	struct intel_ring_buffer *ring;
	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
	int i;

	BUG_ON(ppgtt->pd_offset & 0x3f);

	gen6_write_pdes(ppgtt);
B
Ben Widawsky 已提交
194 195 196 197 198 199 200 201 202

	pd_offset = ppgtt->pd_offset;
	pd_offset /= 64; /* in cachelines, */
	pd_offset <<= 16;

	if (INTEL_INFO(dev)->gen == 6) {
		uint32_t ecochk, gab_ctl, ecobits;

		ecobits = I915_READ(GAC_ECO_BITS);
V
Ville Syrjälä 已提交
203 204
		I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
					 ECOBITS_PPGTT_CACHE64B);
B
Ben Widawsky 已提交
205 206 207 208 209 210 211 212 213

		gab_ctl = I915_READ(GAB_CTL);
		I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);

		ecochk = I915_READ(GAM_ECOCHK);
		I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
				       ECOCHK_PPGTT_CACHE64B);
		I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
	} else if (INTEL_INFO(dev)->gen >= 7) {
214
		uint32_t ecochk, ecobits;
215 216 217 218

		ecobits = I915_READ(GAC_ECO_BITS);
		I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);

219 220 221 222 223 224 225 226
		ecochk = I915_READ(GAM_ECOCHK);
		if (IS_HASWELL(dev)) {
			ecochk |= ECOCHK_PPGTT_WB_HSW;
		} else {
			ecochk |= ECOCHK_PPGTT_LLC_IVB;
			ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
		}
		I915_WRITE(GAM_ECOCHK, ecochk);
B
Ben Widawsky 已提交
227 228 229 230 231 232 233 234 235 236 237
		/* GFX_MODE is per-ring on gen7+ */
	}

	for_each_ring(ring, dev_priv, i) {
		if (INTEL_INFO(dev)->gen >= 7)
			I915_WRITE(RING_MODE_GEN7(ring),
				   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));

		I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
		I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
	}
238
	return 0;
B
Ben Widawsky 已提交
239 240
}

241
/* PPGTT support for Sandybdrige/Gen6 and later */
242
static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
243
				   unsigned first_entry,
244 245
				   unsigned num_entries,
				   bool use_scratch)
246
{
247 248
	struct i915_hw_ppgtt *ppgtt =
		container_of(vm, struct i915_hw_ppgtt, base);
249
	gen6_gtt_pte_t *pt_vaddr, scratch_pte;
250
	unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
251 252
	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
	unsigned last_pte, i;
253

254
	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true);
255

256 257 258 259 260
	while (num_entries) {
		last_pte = first_pte + num_entries;
		if (last_pte > I915_PPGTT_PT_ENTRIES)
			last_pte = I915_PPGTT_PT_ENTRIES;

261
		pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
262

263 264
		for (i = first_pte; i < last_pte; i++)
			pt_vaddr[i] = scratch_pte;
265 266 267

		kunmap_atomic(pt_vaddr);

268 269
		num_entries -= last_pte - first_pte;
		first_pte = 0;
270
		act_pt++;
271
	}
272 273
}

274
static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
D
Daniel Vetter 已提交
275 276 277 278
				      struct sg_table *pages,
				      unsigned first_entry,
				      enum i915_cache_level cache_level)
{
279 280
	struct i915_hw_ppgtt *ppgtt =
		container_of(vm, struct i915_hw_ppgtt, base);
281
	gen6_gtt_pte_t *pt_vaddr;
282
	unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
283 284 285
	unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
	struct sg_page_iter sg_iter;

286
	pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
287 288 289
	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
		dma_addr_t page_addr;

290
		page_addr = sg_page_iter_dma_address(&sg_iter);
291
		pt_vaddr[act_pte] = vm->pte_encode(page_addr, cache_level, true);
292 293
		if (++act_pte == I915_PPGTT_PT_ENTRIES) {
			kunmap_atomic(pt_vaddr);
294 295
			act_pt++;
			pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
296
			act_pte = 0;
D
Daniel Vetter 已提交
297 298 299

		}
	}
300
	kunmap_atomic(pt_vaddr);
D
Daniel Vetter 已提交
301 302
}

303
static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
304
{
305 306
	struct i915_hw_ppgtt *ppgtt =
		container_of(vm, struct i915_hw_ppgtt, base);
307 308
	int i;

309 310
	drm_mm_takedown(&ppgtt->base.mm);

311 312
	if (ppgtt->pt_dma_addr) {
		for (i = 0; i < ppgtt->num_pd_entries; i++)
313
			pci_unmap_page(ppgtt->base.dev->pdev,
314 315 316 317 318 319 320 321 322 323 324 325 326
				       ppgtt->pt_dma_addr[i],
				       4096, PCI_DMA_BIDIRECTIONAL);
	}

	kfree(ppgtt->pt_dma_addr);
	for (i = 0; i < ppgtt->num_pd_entries; i++)
		__free_page(ppgtt->pt_pages[i]);
	kfree(ppgtt->pt_pages);
	kfree(ppgtt);
}

static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
327
	struct drm_device *dev = ppgtt->base.dev;
328 329 330 331 332 333 334 335
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned first_pd_entry_in_global_pt;
	int i;
	int ret = -ENOMEM;

	/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
	 * entries. For aliasing ppgtt support we just steal them at the end for
	 * now. */
336
	first_pd_entry_in_global_pt = gtt_total_entries(dev_priv->gtt);
337

338
	ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
339
	ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
B
Ben Widawsky 已提交
340
	ppgtt->enable = gen6_ppgtt_enable;
341 342 343 344
	ppgtt->base.clear_range = gen6_ppgtt_clear_range;
	ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
	ppgtt->base.cleanup = gen6_ppgtt_cleanup;
	ppgtt->base.scratch = dev_priv->gtt.base.scratch;
D
Daniel Vetter 已提交
345
	ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
346 347
				  GFP_KERNEL);
	if (!ppgtt->pt_pages)
348
		return -ENOMEM;
349 350 351 352 353 354 355

	for (i = 0; i < ppgtt->num_pd_entries; i++) {
		ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
		if (!ppgtt->pt_pages[i])
			goto err_pt_alloc;
	}

D
Daniel Vetter 已提交
356
	ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
B
Ben Widawsky 已提交
357 358 359
				     GFP_KERNEL);
	if (!ppgtt->pt_dma_addr)
		goto err_pt_alloc;
360

B
Ben Widawsky 已提交
361 362
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
		dma_addr_t pt_addr;
D
Daniel Vetter 已提交
363

B
Ben Widawsky 已提交
364 365
		pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
				       PCI_DMA_BIDIRECTIONAL);
366

B
Ben Widawsky 已提交
367 368 369
		if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
			ret = -EIO;
			goto err_pd_pin;
370

D
Daniel Vetter 已提交
371
		}
B
Ben Widawsky 已提交
372
		ppgtt->pt_dma_addr[i] = pt_addr;
373 374
	}

375
	ppgtt->base.clear_range(&ppgtt->base, 0,
376
				ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES, true);
377

378
	ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394

	return 0;

err_pd_pin:
	if (ppgtt->pt_dma_addr) {
		for (i--; i >= 0; i--)
			pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
				       4096, PCI_DMA_BIDIRECTIONAL);
	}
err_pt_alloc:
	kfree(ppgtt->pt_dma_addr);
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
		if (ppgtt->pt_pages[i])
			__free_page(ppgtt->pt_pages[i]);
	}
	kfree(ppgtt->pt_pages);
395 396 397 398 399 400 401 402 403 404 405 406 407 408

	return ret;
}

static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_hw_ppgtt *ppgtt;
	int ret;

	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
	if (!ppgtt)
		return -ENOMEM;

409
	ppgtt->base.dev = dev;
410

B
Ben Widawsky 已提交
411 412 413 414 415
	if (INTEL_INFO(dev)->gen < 8)
		ret = gen6_ppgtt_init(ppgtt);
	else
		BUG();

416 417
	if (ret)
		kfree(ppgtt);
418
	else {
419
		dev_priv->mm.aliasing_ppgtt = ppgtt;
420 421 422
		drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
			    ppgtt->base.total);
	}
423 424 425 426 427 428 429 430 431 432 433 434

	return ret;
}

void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;

	if (!ppgtt)
		return;

435
	ppgtt->base.cleanup(&ppgtt->base);
436
	dev_priv->mm.aliasing_ppgtt = NULL;
437 438
}

439 440 441 442
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
			    struct drm_i915_gem_object *obj,
			    enum i915_cache_level cache_level)
{
443 444 445
	ppgtt->base.insert_entries(&ppgtt->base, obj->pages,
				   i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
				   cache_level);
446 447 448 449 450
}

void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
			      struct drm_i915_gem_object *obj)
{
451 452
	ppgtt->base.clear_range(&ppgtt->base,
				i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT,
453 454
				obj->base.size >> PAGE_SHIFT,
				true);
455 456
}

457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472
extern int intel_iommu_gfx_mapped;
/* Certain Gen5 chipsets require require idling the GPU before
 * unmapping anything from the GTT when VT-d is enabled.
 */
static inline bool needs_idle_maps(struct drm_device *dev)
{
#ifdef CONFIG_INTEL_IOMMU
	/* Query intel_iommu to see if we need the workaround. Presumably that
	 * was loaded first.
	 */
	if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
		return true;
#endif
	return false;
}

B
Ben Widawsky 已提交
473 474 475 476
static bool do_idling(struct drm_i915_private *dev_priv)
{
	bool ret = dev_priv->mm.interruptible;

477
	if (unlikely(dev_priv->gtt.do_idle_maps)) {
B
Ben Widawsky 已提交
478
		dev_priv->mm.interruptible = false;
479
		if (i915_gpu_idle(dev_priv->dev)) {
B
Ben Widawsky 已提交
480 481 482 483 484 485 486 487 488 489 490
			DRM_ERROR("Couldn't idle GPU\n");
			/* Wait a bit, in hopes it avoids the hang */
			udelay(10);
		}
	}

	return ret;
}

static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
{
491
	if (unlikely(dev_priv->gtt.do_idle_maps))
B
Ben Widawsky 已提交
492 493 494
		dev_priv->mm.interruptible = interruptible;
}

495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541
void i915_check_and_clear_faults(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct intel_ring_buffer *ring;
	int i;

	if (INTEL_INFO(dev)->gen < 6)
		return;

	for_each_ring(ring, dev_priv, i) {
		u32 fault_reg;
		fault_reg = I915_READ(RING_FAULT_REG(ring));
		if (fault_reg & RING_FAULT_VALID) {
			DRM_DEBUG_DRIVER("Unexpected fault\n"
					 "\tAddr: 0x%08lx\\n"
					 "\tAddress space: %s\n"
					 "\tSource ID: %d\n"
					 "\tType: %d\n",
					 fault_reg & PAGE_MASK,
					 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
					 RING_FAULT_SRCID(fault_reg),
					 RING_FAULT_FAULT_TYPE(fault_reg));
			I915_WRITE(RING_FAULT_REG(ring),
				   fault_reg & ~RING_FAULT_VALID);
		}
	}
	POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
}

void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	/* Don't bother messing with faults pre GEN6 as we have little
	 * documentation supporting that it's a good idea.
	 */
	if (INTEL_INFO(dev)->gen < 6)
		return;

	i915_check_and_clear_faults(dev);

	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
				       dev_priv->gtt.base.start / PAGE_SIZE,
				       dev_priv->gtt.base.total / PAGE_SIZE,
				       false);
}

542 543 544
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
545
	struct drm_i915_gem_object *obj;
546

547 548
	i915_check_and_clear_faults(dev);

549
	/* First fill our portion of the GTT with scratch pages */
550 551
	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
				       dev_priv->gtt.base.start / PAGE_SIZE,
552 553
				       dev_priv->gtt.base.total / PAGE_SIZE,
				       true);
554

555
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
556
		i915_gem_clflush_object(obj, obj->pin_display);
557
		i915_gem_gtt_bind_object(obj, obj->cache_level);
558 559
	}

560
	i915_gem_chipset_flush(dev);
561
}
562

563
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
564
{
565
	if (obj->has_dma_mapping)
566
		return 0;
567 568 569 570 571 572 573

	if (!dma_map_sg(&obj->base.dev->pdev->dev,
			obj->pages->sgl, obj->pages->nents,
			PCI_DMA_BIDIRECTIONAL))
		return -ENOSPC;

	return 0;
574 575
}

576 577 578 579 580 581
/*
 * Binds an object into the global gtt with the specified cache level. The object
 * will be accessible to the GPU via commands whose operands reference offsets
 * within the global GTT as well as accessible by the GPU through the GMADR
 * mapped BAR (dev_priv->mm.gtt->gtt).
 */
582
static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
583 584 585
				     struct sg_table *st,
				     unsigned int first_entry,
				     enum i915_cache_level level)
586
{
587
	struct drm_i915_private *dev_priv = vm->dev->dev_private;
588 589
	gen6_gtt_pte_t __iomem *gtt_entries =
		(gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
590 591
	int i = 0;
	struct sg_page_iter sg_iter;
592 593
	dma_addr_t addr;

594
	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
595
		addr = sg_page_iter_dma_address(&sg_iter);
596
		iowrite32(vm->pte_encode(addr, level, true), &gtt_entries[i]);
597
		i++;
598 599 600 601 602 603 604 605 606
	}

	/* XXX: This serves as a posting read to make sure that the PTE has
	 * actually been updated. There is some concern that even though
	 * registers and PTEs are within the same BAR that they are potentially
	 * of NUMA access patterns. Therefore, even with the way we assume
	 * hardware should work, we must keep this posting read for paranoia.
	 */
	if (i != 0)
607
		WARN_ON(readl(&gtt_entries[i-1]) !=
608
			vm->pte_encode(addr, level, true));
609 610 611 612 613 614 615

	/* This next bit makes the above posting read even more important. We
	 * want to flush the TLBs only after we're certain all the PTE updates
	 * have finished.
	 */
	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
	POSTING_READ(GFX_FLSH_CNTL_GEN6);
616 617
}

618
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
619
				  unsigned int first_entry,
620 621
				  unsigned int num_entries,
				  bool use_scratch)
622
{
623
	struct drm_i915_private *dev_priv = vm->dev->dev_private;
624 625
	gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
		(gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
626
	const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
627 628 629 630 631 632 633
	int i;

	if (WARN(num_entries > max_entries,
		 "First entry = %d; Num entries = %d (max=%d)\n",
		 first_entry, num_entries, max_entries))
		num_entries = max_entries;

634 635
	scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch);

636 637 638 639 640 641
	for (i = 0; i < num_entries; i++)
		iowrite32(scratch_pte, &gtt_base[i]);
	readl(gtt_base);
}


642
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
643 644 645 646 647 648 649 650 651 652 653
				     struct sg_table *st,
				     unsigned int pg_start,
				     enum i915_cache_level cache_level)
{
	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;

	intel_gtt_insert_sg_entries(st, pg_start, flags);

}

654
static void i915_ggtt_clear_range(struct i915_address_space *vm,
655
				  unsigned int first_entry,
656 657
				  unsigned int num_entries,
				  bool unused)
658 659 660 661 662
{
	intel_gtt_clear_range(first_entry, num_entries);
}


663 664
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
			      enum i915_cache_level cache_level)
665 666
{
	struct drm_device *dev = obj->base.dev;
667
	struct drm_i915_private *dev_priv = dev->dev_private;
668
	const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
669

670 671 672
	dev_priv->gtt.base.insert_entries(&dev_priv->gtt.base, obj->pages,
					  entry,
					  cache_level);
673

674
	obj->has_global_gtt_mapping = 1;
675 676
}

677
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
678
{
679 680
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
681
	const unsigned long entry = i915_gem_obj_ggtt_offset(obj) >> PAGE_SHIFT;
682

683 684
	dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
				       entry,
685 686
				       obj->base.size >> PAGE_SHIFT,
				       true);
687 688

	obj->has_global_gtt_mapping = 0;
689 690 691
}

void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
692
{
B
Ben Widawsky 已提交
693 694 695 696 697 698
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	bool interruptible;

	interruptible = do_idling(dev_priv);

699 700 701 702
	if (!obj->has_dma_mapping)
		dma_unmap_sg(&dev->pdev->dev,
			     obj->pages->sgl, obj->pages->nents,
			     PCI_DMA_BIDIRECTIONAL);
B
Ben Widawsky 已提交
703 704

	undo_idling(dev_priv, interruptible);
705
}
706

707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722
static void i915_gtt_color_adjust(struct drm_mm_node *node,
				  unsigned long color,
				  unsigned long *start,
				  unsigned long *end)
{
	if (node->color != color)
		*start += 4096;

	if (!list_empty(&node->node_list)) {
		node = list_entry(node->node_list.next,
				  struct drm_mm_node,
				  node_list);
		if (node->allocated && node->color != color)
			*end -= 4096;
	}
}
723 724 725 726
void i915_gem_setup_global_gtt(struct drm_device *dev,
			       unsigned long start,
			       unsigned long mappable_end,
			       unsigned long end)
727
{
728 729 730 731 732 733 734 735 736
	/* Let GEM Manage all of the aperture.
	 *
	 * However, leave one page at the end still bound to the scratch page.
	 * There are a number of places where the hardware apparently prefetches
	 * past the end of the object, and we've seen multiple hangs with the
	 * GPU head pointer stuck in a batchbuffer bound at the last page of the
	 * aperture.  One page should be enough to keep any prefetching inside
	 * of the aperture.
	 */
737 738
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
739 740 741
	struct drm_mm_node *entry;
	struct drm_i915_gem_object *obj;
	unsigned long hole_start, hole_end;
742

743 744
	BUG_ON(mappable_end > end);

745
	/* Subtract the guard page ... */
746
	drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
747
	if (!HAS_LLC(dev))
748
		dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
749

750
	/* Mark any preallocated objects as occupied */
751
	list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
752
		struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
753
		int ret;
B
Ben Widawsky 已提交
754
		DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
755 756 757
			      i915_gem_obj_ggtt_offset(obj), obj->base.size);

		WARN_ON(i915_gem_obj_ggtt_bound(obj));
758
		ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
759
		if (ret)
760
			DRM_DEBUG_KMS("Reservation failed\n");
761
		obj->has_global_gtt_mapping = 1;
B
Ben Widawsky 已提交
762
		list_add(&vma->vma_link, &obj->vma_list);
763 764
	}

765 766
	dev_priv->gtt.base.start = start;
	dev_priv->gtt.base.total = end - start;
767

768
	/* Clear any non-preallocated blocks */
769
	drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
770
		const unsigned long count = (hole_end - hole_start) / PAGE_SIZE;
771 772
		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
			      hole_start, hole_end);
773
		ggtt_vm->clear_range(ggtt_vm, hole_start / PAGE_SIZE, count, true);
774 775 776
	}

	/* And finally clear the reserved guard page */
777
	ggtt_vm->clear_range(ggtt_vm, end / PAGE_SIZE - 1, 1, true);
778 779
}

780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799
static bool
intel_enable_ppgtt(struct drm_device *dev)
{
	if (i915_enable_ppgtt >= 0)
		return i915_enable_ppgtt;

#ifdef CONFIG_INTEL_IOMMU
	/* Disable ppgtt on SNB if VT-d is on. */
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
		return false;
#endif

	return true;
}

void i915_gem_init_global_gtt(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned long gtt_size, mappable_size;

800
	gtt_size = dev_priv->gtt.base.total;
801
	mappable_size = dev_priv->gtt.mappable_end;
802 803

	if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
804
		int ret;
805 806 807 808

		if (INTEL_INFO(dev)->gen <= 7) {
			/* PPGTT pdes are stolen from global gtt ptes, so shrink the
			 * aperture accordingly when using aliasing ppgtt. */
809
			gtt_size -= GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
810
		}
811 812 813 814

		i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);

		ret = i915_gem_init_aliasing_ppgtt(dev);
815
		if (!ret)
816
			return;
817 818

		DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
819
		drm_mm_takedown(&dev_priv->gtt.base.mm);
820
		gtt_size += GEN6_PPGTT_PD_ENTRIES * PAGE_SIZE;
821
	}
822
	i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
}

static int setup_scratch_page(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct page *page;
	dma_addr_t dma_addr;

	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
	if (page == NULL)
		return -ENOMEM;
	get_page(page);
	set_pages_uc(page, 1);

#ifdef CONFIG_INTEL_IOMMU
	dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
				PCI_DMA_BIDIRECTIONAL);
	if (pci_dma_mapping_error(dev->pdev, dma_addr))
		return -EINVAL;
#else
	dma_addr = page_to_phys(page);
#endif
845 846
	dev_priv->gtt.base.scratch.page = page;
	dev_priv->gtt.base.scratch.addr = dma_addr;
847 848 849 850 851 852 853

	return 0;
}

static void teardown_scratch_page(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
854 855 856 857
	struct page *page = dev_priv->gtt.base.scratch.page;

	set_pages_wb(page, 1);
	pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
858
		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
859 860
	put_page(page);
	__free_page(page);
861 862 863 864 865 866 867 868 869
}

static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
{
	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
	snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
	return snb_gmch_ctl << 20;
}

870
static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
871 872 873 874 875 876
{
	snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
	snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
	return snb_gmch_ctl << 25; /* 32 MB units */
}

877 878
static int gen6_gmch_probe(struct drm_device *dev,
			   size_t *gtt_total,
879 880 881
			   size_t *stolen,
			   phys_addr_t *mappable_base,
			   unsigned long *mappable_end)
882 883 884
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	phys_addr_t gtt_bus_addr;
885
	unsigned int gtt_size;
886 887 888
	u16 snb_gmch_ctl;
	int ret;

889 890 891
	*mappable_base = pci_resource_start(dev->pdev, 2);
	*mappable_end = pci_resource_len(dev->pdev, 2);

892 893
	/* 64/512MB is the current min/max we actually know of, but this is just
	 * a coarse sanity check.
894
	 */
895
	if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
896 897 898
		DRM_ERROR("Unknown GMADR size (%lx)\n",
			  dev_priv->gtt.mappable_end);
		return -ENXIO;
899 900 901 902 903
	}

	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
904
	gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
905

906
	*stolen = gen6_get_stolen_size(snb_gmch_ctl);
907
	*gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
908

909 910 911 912
	/* For Modern GENs the PTEs and register space are split in the BAR */
	gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
		(pci_resource_len(dev->pdev, 0) / 2);

913
	dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
B
Ben Widawsky 已提交
914
	if (!dev_priv->gtt.gsm) {
915
		DRM_ERROR("Failed to map the gtt page table\n");
916
		return -ENOMEM;
917 918
	}

919 920 921
	ret = setup_scratch_page(dev);
	if (ret)
		DRM_ERROR("Scratch setup failed\n");
922

923 924
	dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
	dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
925

926 927 928
	return ret;
}

929
static void gen6_gmch_remove(struct i915_address_space *vm)
930
{
931 932 933 934

	struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
	iounmap(gtt->gsm);
	teardown_scratch_page(vm->dev);
935
}
936 937 938

static int i915_gmch_probe(struct drm_device *dev,
			   size_t *gtt_total,
939 940 941
			   size_t *stolen,
			   phys_addr_t *mappable_base,
			   unsigned long *mappable_end)
942 943 944 945 946 947 948 949 950 951
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
	if (!ret) {
		DRM_ERROR("failed to set up gmch\n");
		return -EIO;
	}

952
	intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
953 954

	dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
955 956
	dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
	dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
957 958 959 960

	return 0;
}

961
static void i915_gmch_remove(struct i915_address_space *vm)
962 963 964 965 966 967 968 969 970 971 972
{
	intel_gmch_remove();
}

int i915_gem_gtt_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_gtt *gtt = &dev_priv->gtt;
	int ret;

	if (INTEL_INFO(dev)->gen <= 5) {
973
		gtt->gtt_probe = i915_gmch_probe;
974
		gtt->base.cleanup = i915_gmch_remove;
975
	} else {
976
		gtt->gtt_probe = gen6_gmch_probe;
977
		gtt->base.cleanup = gen6_gmch_remove;
978
		if (IS_HASWELL(dev) && dev_priv->ellc_size)
979
			gtt->base.pte_encode = iris_pte_encode;
980
		else if (IS_HASWELL(dev))
981
			gtt->base.pte_encode = hsw_pte_encode;
982
		else if (IS_VALLEYVIEW(dev))
983
			gtt->base.pte_encode = byt_pte_encode;
984 985
		else if (INTEL_INFO(dev)->gen >= 7)
			gtt->base.pte_encode = ivb_pte_encode;
986
		else
987
			gtt->base.pte_encode = snb_pte_encode;
988 989
	}

990
	ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
991
			     &gtt->mappable_base, &gtt->mappable_end);
992
	if (ret)
993 994
		return ret;

995 996
	gtt->base.dev = dev;

997
	/* GMADR is the PCI mmio aperture into the global GTT. */
998 999
	DRM_INFO("Memory usable by graphics device = %zdM\n",
		 gtt->base.total >> 20);
1000 1001
	DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
	DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
1002 1003 1004

	return 0;
}