i915_gem_gtt.c 22.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright © 2010 Daniel Vetter
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

25 26
#include <drm/drmP.h>
#include <drm/i915_drm.h>
27 28 29 30
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"

31
typedef uint32_t gen6_gtt_pte_t;
B
Ben Widawsky 已提交
32

33 34 35 36 37 38 39 40 41 42 43 44 45 46
/* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))

#define GEN6_PDE_VALID			(1 << 0)
/* gen6+ has bit 11-4 for physical addr bit 39-32 */
#define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)

#define GEN6_PTE_VALID			(1 << 0)
#define GEN6_PTE_UNCACHED		(1 << 1)
#define HSW_PTE_UNCACHED		(0)
#define GEN6_PTE_CACHE_LLC		(2 << 1)
#define GEN6_PTE_CACHE_LLC_MLC		(3 << 1)
#define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)

47
static inline gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
D
Daniel Vetter 已提交
48 49
					dma_addr_t addr,
					enum i915_cache_level level)
50
{
51
	gen6_gtt_pte_t pte = GEN6_PTE_VALID;
52
	pte |= GEN6_PTE_ADDR_ENCODE(addr);
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74

	switch (level) {
	case I915_CACHE_LLC_MLC:
		/* Haswell doesn't set L3 this way */
		if (IS_HASWELL(dev))
			pte |= GEN6_PTE_CACHE_LLC;
		else
			pte |= GEN6_PTE_CACHE_LLC_MLC;
		break;
	case I915_CACHE_LLC:
		pte |= GEN6_PTE_CACHE_LLC;
		break;
	case I915_CACHE_NONE:
		if (IS_HASWELL(dev))
			pte |= HSW_PTE_UNCACHED;
		else
			pte |= GEN6_PTE_UNCACHED;
		break;
	default:
		BUG();
	}

75 76 77
	return pte;
}

78
/* PPGTT support for Sandybdrige/Gen6 and later */
D
Daniel Vetter 已提交
79
static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
80 81 82
				   unsigned first_entry,
				   unsigned num_entries)
{
83
	gen6_gtt_pte_t *pt_vaddr, scratch_pte;
84
	unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
85 86
	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
	unsigned last_pte, i;
87

D
Daniel Vetter 已提交
88 89 90
	scratch_pte = gen6_pte_encode(ppgtt->dev,
				      ppgtt->scratch_page_dma_addr,
				      I915_CACHE_LLC);
91

92 93 94 95 96
	while (num_entries) {
		last_pte = first_pte + num_entries;
		if (last_pte > I915_PPGTT_PT_ENTRIES)
			last_pte = I915_PPGTT_PT_ENTRIES;

97
		pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
98

99 100
		for (i = first_pte; i < last_pte; i++)
			pt_vaddr[i] = scratch_pte;
101 102 103

		kunmap_atomic(pt_vaddr);

104 105
		num_entries -= last_pte - first_pte;
		first_pte = 0;
106
		act_pt++;
107
	}
108 109
}

D
Daniel Vetter 已提交
110 111 112 113 114
static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
				      struct sg_table *pages,
				      unsigned first_entry,
				      enum i915_cache_level cache_level)
{
115
	gen6_gtt_pte_t *pt_vaddr;
116
	unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
117 118 119
	unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
	struct sg_page_iter sg_iter;

120
	pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
121 122 123
	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
		dma_addr_t page_addr;

124
		page_addr = sg_page_iter_dma_address(&sg_iter);
125
		pt_vaddr[act_pte] = gen6_pte_encode(ppgtt->dev, page_addr,
126
						    cache_level);
127 128
		if (++act_pte == I915_PPGTT_PT_ENTRIES) {
			kunmap_atomic(pt_vaddr);
129 130
			act_pt++;
			pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
131
			act_pte = 0;
D
Daniel Vetter 已提交
132 133 134

		}
	}
135
	kunmap_atomic(pt_vaddr);
D
Daniel Vetter 已提交
136 137
}

138
static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
139
{
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158
	int i;

	if (ppgtt->pt_dma_addr) {
		for (i = 0; i < ppgtt->num_pd_entries; i++)
			pci_unmap_page(ppgtt->dev->pdev,
				       ppgtt->pt_dma_addr[i],
				       4096, PCI_DMA_BIDIRECTIONAL);
	}

	kfree(ppgtt->pt_dma_addr);
	for (i = 0; i < ppgtt->num_pd_entries; i++)
		__free_page(ppgtt->pt_pages[i]);
	kfree(ppgtt->pt_pages);
	kfree(ppgtt);
}

static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
	struct drm_device *dev = ppgtt->dev;
159 160 161 162 163 164 165 166
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned first_pd_entry_in_global_pt;
	int i;
	int ret = -ENOMEM;

	/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
	 * entries. For aliasing ppgtt support we just steal them at the end for
	 * now. */
167 168
	first_pd_entry_in_global_pt =
		gtt_total_entries(dev_priv->gtt) - I915_PPGTT_PD_ENTRIES;
169 170

	ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
D
Daniel Vetter 已提交
171 172
	ppgtt->clear_range = gen6_ppgtt_clear_range;
	ppgtt->insert_entries = gen6_ppgtt_insert_entries;
173
	ppgtt->cleanup = gen6_ppgtt_cleanup;
174 175 176
	ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
				  GFP_KERNEL);
	if (!ppgtt->pt_pages)
177
		return -ENOMEM;
178 179 180 181 182 183 184

	for (i = 0; i < ppgtt->num_pd_entries; i++) {
		ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
		if (!ppgtt->pt_pages[i])
			goto err_pt_alloc;
	}

B
Ben Widawsky 已提交
185 186 187 188
	ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
				     GFP_KERNEL);
	if (!ppgtt->pt_dma_addr)
		goto err_pt_alloc;
189

B
Ben Widawsky 已提交
190 191
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
		dma_addr_t pt_addr;
D
Daniel Vetter 已提交
192

B
Ben Widawsky 已提交
193 194
		pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
				       PCI_DMA_BIDIRECTIONAL);
195

B
Ben Widawsky 已提交
196 197 198
		if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
			ret = -EIO;
			goto err_pd_pin;
199

D
Daniel Vetter 已提交
200
		}
B
Ben Widawsky 已提交
201
		ppgtt->pt_dma_addr[i] = pt_addr;
202 203
	}

204
	ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
205

D
Daniel Vetter 已提交
206 207
	ppgtt->clear_range(ppgtt, 0,
			   ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
208

209
	ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225

	return 0;

err_pd_pin:
	if (ppgtt->pt_dma_addr) {
		for (i--; i >= 0; i--)
			pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
				       4096, PCI_DMA_BIDIRECTIONAL);
	}
err_pt_alloc:
	kfree(ppgtt->pt_dma_addr);
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
		if (ppgtt->pt_pages[i])
			__free_page(ppgtt->pt_pages[i]);
	}
	kfree(ppgtt->pt_pages);
226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246

	return ret;
}

static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_hw_ppgtt *ppgtt;
	int ret;

	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
	if (!ppgtt)
		return -ENOMEM;

	ppgtt->dev = dev;

	ret = gen6_ppgtt_init(ppgtt);
	if (ret)
		kfree(ppgtt);
	else
		dev_priv->mm.aliasing_ppgtt = ppgtt;
247 248 249 250 251 252 253 254 255 256 257 258

	return ret;
}

void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;

	if (!ppgtt)
		return;

259
	ppgtt->cleanup(ppgtt);
260 261
}

262 263 264 265
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
			    struct drm_i915_gem_object *obj,
			    enum i915_cache_level cache_level)
{
D
Daniel Vetter 已提交
266 267 268
	ppgtt->insert_entries(ppgtt, obj->pages,
			      obj->gtt_space->start >> PAGE_SHIFT,
			      cache_level);
269 270 271 272 273
}

void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
			      struct drm_i915_gem_object *obj)
{
D
Daniel Vetter 已提交
274 275 276
	ppgtt->clear_range(ppgtt,
			   obj->gtt_space->start >> PAGE_SHIFT,
			   obj->base.size >> PAGE_SHIFT);
277 278
}

279 280 281 282 283 284
void i915_gem_init_ppgtt(struct drm_device *dev)
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	uint32_t pd_offset;
	struct intel_ring_buffer *ring;
	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
285
	gen6_gtt_pte_t __iomem *pd_addr;
286 287 288 289 290 291 292
	uint32_t pd_entry;
	int i;

	if (!dev_priv->mm.aliasing_ppgtt)
		return;


293 294
	pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
		ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
295 296 297
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
		dma_addr_t pt_addr;

B
Ben Widawsky 已提交
298
		pt_addr = ppgtt->pt_dma_addr[i];
299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337
		pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
		pd_entry |= GEN6_PDE_VALID;

		writel(pd_entry, pd_addr + i);
	}
	readl(pd_addr);

	pd_offset = ppgtt->pd_offset;
	pd_offset /= 64; /* in cachelines, */
	pd_offset <<= 16;

	if (INTEL_INFO(dev)->gen == 6) {
		uint32_t ecochk, gab_ctl, ecobits;

		ecobits = I915_READ(GAC_ECO_BITS);
		I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);

		gab_ctl = I915_READ(GAB_CTL);
		I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);

		ecochk = I915_READ(GAM_ECOCHK);
		I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
				       ECOCHK_PPGTT_CACHE64B);
		I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
	} else if (INTEL_INFO(dev)->gen >= 7) {
		I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
		/* GFX_MODE is per-ring on gen7+ */
	}

	for_each_ring(ring, dev_priv, i) {
		if (INTEL_INFO(dev)->gen >= 7)
			I915_WRITE(RING_MODE_GEN7(ring),
				   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));

		I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
		I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
	}
}

338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353
extern int intel_iommu_gfx_mapped;
/* Certain Gen5 chipsets require require idling the GPU before
 * unmapping anything from the GTT when VT-d is enabled.
 */
static inline bool needs_idle_maps(struct drm_device *dev)
{
#ifdef CONFIG_INTEL_IOMMU
	/* Query intel_iommu to see if we need the workaround. Presumably that
	 * was loaded first.
	 */
	if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
		return true;
#endif
	return false;
}

B
Ben Widawsky 已提交
354 355 356 357
static bool do_idling(struct drm_i915_private *dev_priv)
{
	bool ret = dev_priv->mm.interruptible;

358
	if (unlikely(dev_priv->gtt.do_idle_maps)) {
B
Ben Widawsky 已提交
359
		dev_priv->mm.interruptible = false;
360
		if (i915_gpu_idle(dev_priv->dev)) {
B
Ben Widawsky 已提交
361 362 363 364 365 366 367 368 369 370 371
			DRM_ERROR("Couldn't idle GPU\n");
			/* Wait a bit, in hopes it avoids the hang */
			udelay(10);
		}
	}

	return ret;
}

static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
{
372
	if (unlikely(dev_priv->gtt.do_idle_maps))
B
Ben Widawsky 已提交
373 374 375
		dev_priv->mm.interruptible = interruptible;
}

376 377 378
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
379
	struct drm_i915_gem_object *obj;
380

381
	/* First fill our portion of the GTT with scratch pages */
382 383
	dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
				      dev_priv->gtt.total / PAGE_SIZE);
384

C
Chris Wilson 已提交
385
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
386
		i915_gem_clflush_object(obj);
387
		i915_gem_gtt_bind_object(obj, obj->cache_level);
388 389
	}

390
	i915_gem_chipset_flush(dev);
391
}
392

393
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
394
{
395
	if (obj->has_dma_mapping)
396
		return 0;
397 398 399 400 401 402 403

	if (!dma_map_sg(&obj->base.dev->pdev->dev,
			obj->pages->sgl, obj->pages->nents,
			PCI_DMA_BIDIRECTIONAL))
		return -ENOSPC;

	return 0;
404 405
}

406 407 408 409 410 411
/*
 * Binds an object into the global gtt with the specified cache level. The object
 * will be accessible to the GPU via commands whose operands reference offsets
 * within the global GTT as well as accessible by the GPU through the GMADR
 * mapped BAR (dev_priv->mm.gtt->gtt).
 */
412 413 414 415
static void gen6_ggtt_insert_entries(struct drm_device *dev,
				     struct sg_table *st,
				     unsigned int first_entry,
				     enum i915_cache_level level)
416 417
{
	struct drm_i915_private *dev_priv = dev->dev_private;
418 419
	gen6_gtt_pte_t __iomem *gtt_entries =
		(gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
420 421
	int i = 0;
	struct sg_page_iter sg_iter;
422 423
	dma_addr_t addr;

424
	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
425
		addr = sg_page_iter_dma_address(&sg_iter);
426 427
		iowrite32(gen6_pte_encode(dev, addr, level), &gtt_entries[i]);
		i++;
428 429 430 431 432 433 434 435 436
	}

	/* XXX: This serves as a posting read to make sure that the PTE has
	 * actually been updated. There is some concern that even though
	 * registers and PTEs are within the same BAR that they are potentially
	 * of NUMA access patterns. Therefore, even with the way we assume
	 * hardware should work, we must keep this posting read for paranoia.
	 */
	if (i != 0)
D
Daniel Vetter 已提交
437 438
		WARN_ON(readl(&gtt_entries[i-1])
			!= gen6_pte_encode(dev, addr, level));
439 440 441 442 443 444 445

	/* This next bit makes the above posting read even more important. We
	 * want to flush the TLBs only after we're certain all the PTE updates
	 * have finished.
	 */
	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
	POSTING_READ(GFX_FLSH_CNTL_GEN6);
446 447
}

448 449 450 451 452
static void gen6_ggtt_clear_range(struct drm_device *dev,
				  unsigned int first_entry,
				  unsigned int num_entries)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
453 454
	gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
		(gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
455
	const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
456 457 458 459 460 461 462
	int i;

	if (WARN(num_entries > max_entries,
		 "First entry = %d; Num entries = %d (max=%d)\n",
		 first_entry, num_entries, max_entries))
		num_entries = max_entries;

D
Daniel Vetter 已提交
463 464
	scratch_pte = gen6_pte_encode(dev, dev_priv->gtt.scratch_page_dma,
				      I915_CACHE_LLC);
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
	for (i = 0; i < num_entries; i++)
		iowrite32(scratch_pte, &gtt_base[i]);
	readl(gtt_base);
}


static void i915_ggtt_insert_entries(struct drm_device *dev,
				     struct sg_table *st,
				     unsigned int pg_start,
				     enum i915_cache_level cache_level)
{
	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;

	intel_gtt_insert_sg_entries(st, pg_start, flags);

}

static void i915_ggtt_clear_range(struct drm_device *dev,
				  unsigned int first_entry,
				  unsigned int num_entries)
{
	intel_gtt_clear_range(first_entry, num_entries);
}


491 492
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
			      enum i915_cache_level cache_level)
493 494
{
	struct drm_device *dev = obj->base.dev;
495 496 497 498 499
	struct drm_i915_private *dev_priv = dev->dev_private;

	dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
					 obj->gtt_space->start >> PAGE_SHIFT,
					 cache_level);
500

501
	obj->has_global_gtt_mapping = 1;
502 503
}

504
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
505
{
506 507 508 509 510 511
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;

	dev_priv->gtt.gtt_clear_range(obj->base.dev,
				      obj->gtt_space->start >> PAGE_SHIFT,
				      obj->base.size >> PAGE_SHIFT);
512 513

	obj->has_global_gtt_mapping = 0;
514 515 516
}

void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
517
{
B
Ben Widawsky 已提交
518 519 520 521 522 523
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	bool interruptible;

	interruptible = do_idling(dev_priv);

524 525 526 527
	if (!obj->has_dma_mapping)
		dma_unmap_sg(&dev->pdev->dev,
			     obj->pages->sgl, obj->pages->nents,
			     PCI_DMA_BIDIRECTIONAL);
B
Ben Widawsky 已提交
528 529

	undo_idling(dev_priv, interruptible);
530
}
531

532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547
static void i915_gtt_color_adjust(struct drm_mm_node *node,
				  unsigned long color,
				  unsigned long *start,
				  unsigned long *end)
{
	if (node->color != color)
		*start += 4096;

	if (!list_empty(&node->node_list)) {
		node = list_entry(node->node_list.next,
				  struct drm_mm_node,
				  node_list);
		if (node->allocated && node->color != color)
			*end -= 4096;
	}
}
548 549 550 551
void i915_gem_setup_global_gtt(struct drm_device *dev,
			       unsigned long start,
			       unsigned long mappable_end,
			       unsigned long end)
552
{
553 554 555 556 557 558 559 560 561
	/* Let GEM Manage all of the aperture.
	 *
	 * However, leave one page at the end still bound to the scratch page.
	 * There are a number of places where the hardware apparently prefetches
	 * past the end of the object, and we've seen multiple hangs with the
	 * GPU head pointer stuck in a batchbuffer bound at the last page of the
	 * aperture.  One page should be enough to keep any prefetching inside
	 * of the aperture.
	 */
562
	drm_i915_private_t *dev_priv = dev->dev_private;
563 564 565
	struct drm_mm_node *entry;
	struct drm_i915_gem_object *obj;
	unsigned long hole_start, hole_end;
566

567 568
	BUG_ON(mappable_end > end);

569
	/* Subtract the guard page ... */
570
	drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
571 572
	if (!HAS_LLC(dev))
		dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
573

574 575 576 577 578 579 580 581 582 583 584 585 586
	/* Mark any preallocated objects as occupied */
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
		DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
			      obj->gtt_offset, obj->base.size);

		BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
		obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
						     obj->gtt_offset,
						     obj->base.size,
						     false);
		obj->has_global_gtt_mapping = 1;
	}

B
Ben Widawsky 已提交
587 588
	dev_priv->gtt.start = start;
	dev_priv->gtt.total = end - start;
589

590 591 592 593 594
	/* Clear any non-preallocated blocks */
	drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
			     hole_start, hole_end) {
		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
			      hole_start, hole_end);
595 596
		dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
					      (hole_end-hole_start) / PAGE_SIZE);
597 598 599
	}

	/* And finally clear the reserved guard page */
600
	dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
601 602
}

603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622
static bool
intel_enable_ppgtt(struct drm_device *dev)
{
	if (i915_enable_ppgtt >= 0)
		return i915_enable_ppgtt;

#ifdef CONFIG_INTEL_IOMMU
	/* Disable ppgtt on SNB if VT-d is on. */
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
		return false;
#endif

	return true;
}

void i915_gem_init_global_gtt(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned long gtt_size, mappable_size;

623
	gtt_size = dev_priv->gtt.total;
624
	mappable_size = dev_priv->gtt.mappable_end;
625 626

	if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
627
		int ret;
628 629 630 631 632 633 634
		/* PPGTT pdes are stolen from global gtt ptes, so shrink the
		 * aperture accordingly when using aliasing ppgtt. */
		gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;

		i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);

		ret = i915_gem_init_aliasing_ppgtt(dev);
635
		if (!ret)
636
			return;
637 638 639 640

		DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
		drm_mm_takedown(&dev_priv->mm.gtt_space);
		gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
641
	}
642
	i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664
}

static int setup_scratch_page(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct page *page;
	dma_addr_t dma_addr;

	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
	if (page == NULL)
		return -ENOMEM;
	get_page(page);
	set_pages_uc(page, 1);

#ifdef CONFIG_INTEL_IOMMU
	dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
				PCI_DMA_BIDIRECTIONAL);
	if (pci_dma_mapping_error(dev->pdev, dma_addr))
		return -EINVAL;
#else
	dma_addr = page_to_phys(page);
#endif
665 666
	dev_priv->gtt.scratch_page = page;
	dev_priv->gtt.scratch_page_dma = dma_addr;
667 668 669 670 671 672 673

	return 0;
}

static void teardown_scratch_page(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
674 675
	set_pages_wb(dev_priv->gtt.scratch_page, 1);
	pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma,
676
		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
677 678
	put_page(dev_priv->gtt.scratch_page);
	__free_page(dev_priv->gtt.scratch_page);
679 680 681 682 683 684 685 686 687
}

static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
{
	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
	snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
	return snb_gmch_ctl << 20;
}

688
static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
689 690 691 692 693 694
{
	snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
	snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
	return snb_gmch_ctl << 25; /* 32 MB units */
}

695
static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl)
696 697 698 699 700 701 702 703
{
	static const int stolen_decoder[] = {
		0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
	snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
	snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
	return stolen_decoder[snb_gmch_ctl] << 20;
}

704 705
static int gen6_gmch_probe(struct drm_device *dev,
			   size_t *gtt_total,
706 707 708
			   size_t *stolen,
			   phys_addr_t *mappable_base,
			   unsigned long *mappable_end)
709 710 711
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	phys_addr_t gtt_bus_addr;
712
	unsigned int gtt_size;
713 714 715
	u16 snb_gmch_ctl;
	int ret;

716 717 718
	*mappable_base = pci_resource_start(dev->pdev, 2);
	*mappable_end = pci_resource_len(dev->pdev, 2);

719 720
	/* 64/512MB is the current min/max we actually know of, but this is just
	 * a coarse sanity check.
721
	 */
722
	if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
723 724 725
		DRM_ERROR("Unknown GMADR size (%lx)\n",
			  dev_priv->gtt.mappable_end);
		return -ENXIO;
726 727 728 729 730
	}

	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
731
	gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
732

733
	if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev))
734 735 736
		*stolen = gen7_get_stolen_size(snb_gmch_ctl);
	else
		*stolen = gen6_get_stolen_size(snb_gmch_ctl);
737

738
	*gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
739

740 741 742 743
	/* For Modern GENs the PTEs and register space are split in the BAR */
	gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
		(pci_resource_len(dev->pdev, 0) / 2);

744
	dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
B
Ben Widawsky 已提交
745
	if (!dev_priv->gtt.gsm) {
746
		DRM_ERROR("Failed to map the gtt page table\n");
747
		return -ENOMEM;
748 749
	}

750 751 752
	ret = setup_scratch_page(dev);
	if (ret)
		DRM_ERROR("Scratch setup failed\n");
753

754 755 756
	dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
	dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;

757 758 759
	return ret;
}

760
static void gen6_gmch_remove(struct drm_device *dev)
761 762
{
	struct drm_i915_private *dev_priv = dev->dev_private;
B
Ben Widawsky 已提交
763
	iounmap(dev_priv->gtt.gsm);
764
	teardown_scratch_page(dev_priv->dev);
765
}
766 767 768

static int i915_gmch_probe(struct drm_device *dev,
			   size_t *gtt_total,
769 770 771
			   size_t *stolen,
			   phys_addr_t *mappable_base,
			   unsigned long *mappable_end)
772 773 774 775 776 777 778 779 780 781
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
	if (!ret) {
		DRM_ERROR("failed to set up gmch\n");
		return -EIO;
	}

782
	intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811

	dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
	dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
	dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;

	return 0;
}

static void i915_gmch_remove(struct drm_device *dev)
{
	intel_gmch_remove();
}

int i915_gem_gtt_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_gtt *gtt = &dev_priv->gtt;
	unsigned long gtt_size;
	int ret;

	if (INTEL_INFO(dev)->gen <= 5) {
		dev_priv->gtt.gtt_probe = i915_gmch_probe;
		dev_priv->gtt.gtt_remove = i915_gmch_remove;
	} else {
		dev_priv->gtt.gtt_probe = gen6_gmch_probe;
		dev_priv->gtt.gtt_remove = gen6_gmch_remove;
	}

	ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
812 813 814
				     &dev_priv->gtt.stolen_size,
				     &gtt->mappable_base,
				     &gtt->mappable_end);
815
	if (ret)
816 817
		return ret;

818
	gtt_size = (dev_priv->gtt.total >> PAGE_SHIFT) * sizeof(gen6_gtt_pte_t);
819 820 821 822 823 824 825 826 827 828 829

	/* GMADR is the PCI mmio aperture into the global GTT. */
	DRM_INFO("Memory usable by graphics device = %zdM\n",
		 dev_priv->gtt.total >> 20);
	DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
			 dev_priv->gtt.mappable_end >> 20);
	DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
			 dev_priv->gtt.stolen_size >> 20);

	return 0;
}