i915_gem_gtt.c 24.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright © 2010 Daniel Vetter
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

25 26
#include <drm/drmP.h>
#include <drm/i915_drm.h>
27 28 29 30
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"

31 32 33 34 35 36 37 38 39 40 41 42 43 44
/* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))

#define GEN6_PDE_VALID			(1 << 0)
/* gen6+ has bit 11-4 for physical addr bit 39-32 */
#define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)

#define GEN6_PTE_VALID			(1 << 0)
#define GEN6_PTE_UNCACHED		(1 << 1)
#define HSW_PTE_UNCACHED		(0)
#define GEN6_PTE_CACHE_LLC		(2 << 1)
#define GEN6_PTE_CACHE_LLC_MLC		(3 << 1)
#define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)

45 46 47
static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
				      dma_addr_t addr,
				      enum i915_cache_level level)
48
{
49
	gen6_gtt_pte_t pte = GEN6_PTE_VALID;
50
	pte |= GEN6_PTE_ADDR_ENCODE(addr);
51 52 53

	switch (level) {
	case I915_CACHE_LLC_MLC:
54
		pte |= GEN6_PTE_CACHE_LLC_MLC;
55 56 57 58 59
		break;
	case I915_CACHE_LLC:
		pte |= GEN6_PTE_CACHE_LLC;
		break;
	case I915_CACHE_NONE:
60
		pte |= GEN6_PTE_UNCACHED;
61 62 63 64 65
		break;
	default:
		BUG();
	}

66 67 68
	return pte;
}

69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89
#define BYT_PTE_WRITEABLE		(1 << 1)
#define BYT_PTE_SNOOPED_BY_CPU_CACHES	(1 << 2)

static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev,
				     dma_addr_t addr,
				     enum i915_cache_level level)
{
	gen6_gtt_pte_t pte = GEN6_PTE_VALID;
	pte |= GEN6_PTE_ADDR_ENCODE(addr);

	/* Mark the page as writeable.  Other platforms don't have a
	 * setting for read-only/writable, so this matches that behavior.
	 */
	pte |= BYT_PTE_WRITEABLE;

	if (level != I915_CACHE_NONE)
		pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;

	return pte;
}

90 91 92 93 94 95 96 97 98 99 100 101 102
static gen6_gtt_pte_t hsw_pte_encode(struct drm_device *dev,
				     dma_addr_t addr,
				     enum i915_cache_level level)
{
	gen6_gtt_pte_t pte = GEN6_PTE_VALID;
	pte |= GEN6_PTE_ADDR_ENCODE(addr);

	if (level != I915_CACHE_NONE)
		pte |= GEN6_PTE_CACHE_LLC;

	return pte;
}

103
static int gen6_ppgtt_enable(struct drm_device *dev)
B
Ben Widawsky 已提交
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	uint32_t pd_offset;
	struct intel_ring_buffer *ring;
	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
	gen6_gtt_pte_t __iomem *pd_addr;
	uint32_t pd_entry;
	int i;

	pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
		ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
		dma_addr_t pt_addr;

		pt_addr = ppgtt->pt_dma_addr[i];
		pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
		pd_entry |= GEN6_PDE_VALID;

		writel(pd_entry, pd_addr + i);
	}
	readl(pd_addr);

	pd_offset = ppgtt->pd_offset;
	pd_offset /= 64; /* in cachelines, */
	pd_offset <<= 16;

	if (INTEL_INFO(dev)->gen == 6) {
		uint32_t ecochk, gab_ctl, ecobits;

		ecobits = I915_READ(GAC_ECO_BITS);
V
Ville Syrjälä 已提交
134 135
		I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
					 ECOBITS_PPGTT_CACHE64B);
B
Ben Widawsky 已提交
136 137 138 139 140 141 142 143 144

		gab_ctl = I915_READ(GAB_CTL);
		I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);

		ecochk = I915_READ(GAM_ECOCHK);
		I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
				       ECOCHK_PPGTT_CACHE64B);
		I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
	} else if (INTEL_INFO(dev)->gen >= 7) {
145
		uint32_t ecochk, ecobits;
146 147 148 149

		ecobits = I915_READ(GAC_ECO_BITS);
		I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);

150 151 152 153 154 155 156 157
		ecochk = I915_READ(GAM_ECOCHK);
		if (IS_HASWELL(dev)) {
			ecochk |= ECOCHK_PPGTT_WB_HSW;
		} else {
			ecochk |= ECOCHK_PPGTT_LLC_IVB;
			ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
		}
		I915_WRITE(GAM_ECOCHK, ecochk);
B
Ben Widawsky 已提交
158 159 160 161 162 163 164 165 166 167 168
		/* GFX_MODE is per-ring on gen7+ */
	}

	for_each_ring(ring, dev_priv, i) {
		if (INTEL_INFO(dev)->gen >= 7)
			I915_WRITE(RING_MODE_GEN7(ring),
				   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));

		I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
		I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
	}
169
	return 0;
B
Ben Widawsky 已提交
170 171
}

172
/* PPGTT support for Sandybdrige/Gen6 and later */
D
Daniel Vetter 已提交
173
static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
174 175 176
				   unsigned first_entry,
				   unsigned num_entries)
{
177
	gen6_gtt_pte_t *pt_vaddr, scratch_pte;
178
	unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
179 180
	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
	unsigned last_pte, i;
181

182 183 184
	scratch_pte = ppgtt->pte_encode(ppgtt->dev,
					ppgtt->scratch_page_dma_addr,
					I915_CACHE_LLC);
185

186 187 188 189 190
	while (num_entries) {
		last_pte = first_pte + num_entries;
		if (last_pte > I915_PPGTT_PT_ENTRIES)
			last_pte = I915_PPGTT_PT_ENTRIES;

191
		pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
192

193 194
		for (i = first_pte; i < last_pte; i++)
			pt_vaddr[i] = scratch_pte;
195 196 197

		kunmap_atomic(pt_vaddr);

198 199
		num_entries -= last_pte - first_pte;
		first_pte = 0;
200
		act_pt++;
201
	}
202 203
}

D
Daniel Vetter 已提交
204 205 206 207 208
static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
				      struct sg_table *pages,
				      unsigned first_entry,
				      enum i915_cache_level cache_level)
{
209
	gen6_gtt_pte_t *pt_vaddr;
210
	unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
211 212 213
	unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
	struct sg_page_iter sg_iter;

214
	pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
215 216 217
	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
		dma_addr_t page_addr;

218
		page_addr = sg_page_iter_dma_address(&sg_iter);
219 220
		pt_vaddr[act_pte] = ppgtt->pte_encode(ppgtt->dev, page_addr,
						      cache_level);
221 222
		if (++act_pte == I915_PPGTT_PT_ENTRIES) {
			kunmap_atomic(pt_vaddr);
223 224
			act_pt++;
			pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
225
			act_pte = 0;
D
Daniel Vetter 已提交
226 227 228

		}
	}
229
	kunmap_atomic(pt_vaddr);
D
Daniel Vetter 已提交
230 231
}

232
static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
233
{
234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
	int i;

	if (ppgtt->pt_dma_addr) {
		for (i = 0; i < ppgtt->num_pd_entries; i++)
			pci_unmap_page(ppgtt->dev->pdev,
				       ppgtt->pt_dma_addr[i],
				       4096, PCI_DMA_BIDIRECTIONAL);
	}

	kfree(ppgtt->pt_dma_addr);
	for (i = 0; i < ppgtt->num_pd_entries; i++)
		__free_page(ppgtt->pt_pages[i]);
	kfree(ppgtt->pt_pages);
	kfree(ppgtt);
}

static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
	struct drm_device *dev = ppgtt->dev;
253 254 255 256 257 258 259 260
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned first_pd_entry_in_global_pt;
	int i;
	int ret = -ENOMEM;

	/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
	 * entries. For aliasing ppgtt support we just steal them at the end for
	 * now. */
261 262
	first_pd_entry_in_global_pt =
		gtt_total_entries(dev_priv->gtt) - I915_PPGTT_PD_ENTRIES;
263

264 265 266
	if (IS_HASWELL(dev)) {
		ppgtt->pte_encode = hsw_pte_encode;
	} else if (IS_VALLEYVIEW(dev)) {
267 268 269 270
		ppgtt->pte_encode = byt_pte_encode;
	} else {
		ppgtt->pte_encode = gen6_pte_encode;
	}
271
	ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
B
Ben Widawsky 已提交
272
	ppgtt->enable = gen6_ppgtt_enable;
D
Daniel Vetter 已提交
273 274
	ppgtt->clear_range = gen6_ppgtt_clear_range;
	ppgtt->insert_entries = gen6_ppgtt_insert_entries;
275
	ppgtt->cleanup = gen6_ppgtt_cleanup;
276 277 278
	ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
				  GFP_KERNEL);
	if (!ppgtt->pt_pages)
279
		return -ENOMEM;
280 281 282 283 284 285 286

	for (i = 0; i < ppgtt->num_pd_entries; i++) {
		ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
		if (!ppgtt->pt_pages[i])
			goto err_pt_alloc;
	}

B
Ben Widawsky 已提交
287 288 289 290
	ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
				     GFP_KERNEL);
	if (!ppgtt->pt_dma_addr)
		goto err_pt_alloc;
291

B
Ben Widawsky 已提交
292 293
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
		dma_addr_t pt_addr;
D
Daniel Vetter 已提交
294

B
Ben Widawsky 已提交
295 296
		pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
				       PCI_DMA_BIDIRECTIONAL);
297

B
Ben Widawsky 已提交
298 299 300
		if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
			ret = -EIO;
			goto err_pd_pin;
301

D
Daniel Vetter 已提交
302
		}
B
Ben Widawsky 已提交
303
		ppgtt->pt_dma_addr[i] = pt_addr;
304 305
	}

D
Daniel Vetter 已提交
306 307
	ppgtt->clear_range(ppgtt, 0,
			   ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
308

309
	ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325

	return 0;

err_pd_pin:
	if (ppgtt->pt_dma_addr) {
		for (i--; i >= 0; i--)
			pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
				       4096, PCI_DMA_BIDIRECTIONAL);
	}
err_pt_alloc:
	kfree(ppgtt->pt_dma_addr);
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
		if (ppgtt->pt_pages[i])
			__free_page(ppgtt->pt_pages[i]);
	}
	kfree(ppgtt->pt_pages);
326 327 328 329 330 331 332 333 334 335 336 337 338 339 340

	return ret;
}

static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_hw_ppgtt *ppgtt;
	int ret;

	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
	if (!ppgtt)
		return -ENOMEM;

	ppgtt->dev = dev;
341
	ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
342

B
Ben Widawsky 已提交
343 344 345 346 347
	if (INTEL_INFO(dev)->gen < 8)
		ret = gen6_ppgtt_init(ppgtt);
	else
		BUG();

348 349 350 351
	if (ret)
		kfree(ppgtt);
	else
		dev_priv->mm.aliasing_ppgtt = ppgtt;
352 353 354 355 356 357 358 359 360 361 362 363

	return ret;
}

void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;

	if (!ppgtt)
		return;

364
	ppgtt->cleanup(ppgtt);
365
	dev_priv->mm.aliasing_ppgtt = NULL;
366 367
}

368 369 370 371
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
			    struct drm_i915_gem_object *obj,
			    enum i915_cache_level cache_level)
{
D
Daniel Vetter 已提交
372 373 374
	ppgtt->insert_entries(ppgtt, obj->pages,
			      obj->gtt_space->start >> PAGE_SHIFT,
			      cache_level);
375 376 377 378 379
}

void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
			      struct drm_i915_gem_object *obj)
{
D
Daniel Vetter 已提交
380 381 382
	ppgtt->clear_range(ppgtt,
			   obj->gtt_space->start >> PAGE_SHIFT,
			   obj->base.size >> PAGE_SHIFT);
383 384
}

385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
extern int intel_iommu_gfx_mapped;
/* Certain Gen5 chipsets require require idling the GPU before
 * unmapping anything from the GTT when VT-d is enabled.
 */
static inline bool needs_idle_maps(struct drm_device *dev)
{
#ifdef CONFIG_INTEL_IOMMU
	/* Query intel_iommu to see if we need the workaround. Presumably that
	 * was loaded first.
	 */
	if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
		return true;
#endif
	return false;
}

B
Ben Widawsky 已提交
401 402 403 404
static bool do_idling(struct drm_i915_private *dev_priv)
{
	bool ret = dev_priv->mm.interruptible;

405
	if (unlikely(dev_priv->gtt.do_idle_maps)) {
B
Ben Widawsky 已提交
406
		dev_priv->mm.interruptible = false;
407
		if (i915_gpu_idle(dev_priv->dev)) {
B
Ben Widawsky 已提交
408 409 410 411 412 413 414 415 416 417 418
			DRM_ERROR("Couldn't idle GPU\n");
			/* Wait a bit, in hopes it avoids the hang */
			udelay(10);
		}
	}

	return ret;
}

static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
{
419
	if (unlikely(dev_priv->gtt.do_idle_maps))
B
Ben Widawsky 已提交
420 421 422
		dev_priv->mm.interruptible = interruptible;
}

423 424 425
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
426
	struct drm_i915_gem_object *obj;
427

428
	/* First fill our portion of the GTT with scratch pages */
429 430
	dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
				      dev_priv->gtt.total / PAGE_SIZE);
431

C
Chris Wilson 已提交
432
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
433
		i915_gem_clflush_object(obj);
434
		i915_gem_gtt_bind_object(obj, obj->cache_level);
435 436
	}

437
	i915_gem_chipset_flush(dev);
438
}
439

440
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
441
{
442
	if (obj->has_dma_mapping)
443
		return 0;
444 445 446 447 448 449 450

	if (!dma_map_sg(&obj->base.dev->pdev->dev,
			obj->pages->sgl, obj->pages->nents,
			PCI_DMA_BIDIRECTIONAL))
		return -ENOSPC;

	return 0;
451 452
}

453 454 455 456 457 458
/*
 * Binds an object into the global gtt with the specified cache level. The object
 * will be accessible to the GPU via commands whose operands reference offsets
 * within the global GTT as well as accessible by the GPU through the GMADR
 * mapped BAR (dev_priv->mm.gtt->gtt).
 */
459 460 461 462
static void gen6_ggtt_insert_entries(struct drm_device *dev,
				     struct sg_table *st,
				     unsigned int first_entry,
				     enum i915_cache_level level)
463 464
{
	struct drm_i915_private *dev_priv = dev->dev_private;
465 466
	gen6_gtt_pte_t __iomem *gtt_entries =
		(gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
467 468
	int i = 0;
	struct sg_page_iter sg_iter;
469 470
	dma_addr_t addr;

471
	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
472
		addr = sg_page_iter_dma_address(&sg_iter);
473 474
		iowrite32(dev_priv->gtt.pte_encode(dev, addr, level),
			  &gtt_entries[i]);
475
		i++;
476 477 478 479 480 481 482 483 484
	}

	/* XXX: This serves as a posting read to make sure that the PTE has
	 * actually been updated. There is some concern that even though
	 * registers and PTEs are within the same BAR that they are potentially
	 * of NUMA access patterns. Therefore, even with the way we assume
	 * hardware should work, we must keep this posting read for paranoia.
	 */
	if (i != 0)
D
Daniel Vetter 已提交
485
		WARN_ON(readl(&gtt_entries[i-1])
486
			!= dev_priv->gtt.pte_encode(dev, addr, level));
487 488 489 490 491 492 493

	/* This next bit makes the above posting read even more important. We
	 * want to flush the TLBs only after we're certain all the PTE updates
	 * have finished.
	 */
	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
	POSTING_READ(GFX_FLSH_CNTL_GEN6);
494 495
}

496 497 498 499 500
static void gen6_ggtt_clear_range(struct drm_device *dev,
				  unsigned int first_entry,
				  unsigned int num_entries)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
501 502
	gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
		(gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
503
	const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
504 505 506 507 508 509 510
	int i;

	if (WARN(num_entries > max_entries,
		 "First entry = %d; Num entries = %d (max=%d)\n",
		 first_entry, num_entries, max_entries))
		num_entries = max_entries;

511 512 513
	scratch_pte = dev_priv->gtt.pte_encode(dev,
					       dev_priv->gtt.scratch_page_dma,
					       I915_CACHE_LLC);
514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539
	for (i = 0; i < num_entries; i++)
		iowrite32(scratch_pte, &gtt_base[i]);
	readl(gtt_base);
}


static void i915_ggtt_insert_entries(struct drm_device *dev,
				     struct sg_table *st,
				     unsigned int pg_start,
				     enum i915_cache_level cache_level)
{
	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;

	intel_gtt_insert_sg_entries(st, pg_start, flags);

}

static void i915_ggtt_clear_range(struct drm_device *dev,
				  unsigned int first_entry,
				  unsigned int num_entries)
{
	intel_gtt_clear_range(first_entry, num_entries);
}


540 541
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
			      enum i915_cache_level cache_level)
542 543
{
	struct drm_device *dev = obj->base.dev;
544 545 546 547 548
	struct drm_i915_private *dev_priv = dev->dev_private;

	dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
					 obj->gtt_space->start >> PAGE_SHIFT,
					 cache_level);
549

550
	obj->has_global_gtt_mapping = 1;
551 552
}

553
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
554
{
555 556 557 558 559 560
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;

	dev_priv->gtt.gtt_clear_range(obj->base.dev,
				      obj->gtt_space->start >> PAGE_SHIFT,
				      obj->base.size >> PAGE_SHIFT);
561 562

	obj->has_global_gtt_mapping = 0;
563 564 565
}

void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
566
{
B
Ben Widawsky 已提交
567 568 569 570 571 572
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	bool interruptible;

	interruptible = do_idling(dev_priv);

573 574 575 576
	if (!obj->has_dma_mapping)
		dma_unmap_sg(&dev->pdev->dev,
			     obj->pages->sgl, obj->pages->nents,
			     PCI_DMA_BIDIRECTIONAL);
B
Ben Widawsky 已提交
577 578

	undo_idling(dev_priv, interruptible);
579
}
580

581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596
static void i915_gtt_color_adjust(struct drm_mm_node *node,
				  unsigned long color,
				  unsigned long *start,
				  unsigned long *end)
{
	if (node->color != color)
		*start += 4096;

	if (!list_empty(&node->node_list)) {
		node = list_entry(node->node_list.next,
				  struct drm_mm_node,
				  node_list);
		if (node->allocated && node->color != color)
			*end -= 4096;
	}
}
597 598 599 600
void i915_gem_setup_global_gtt(struct drm_device *dev,
			       unsigned long start,
			       unsigned long mappable_end,
			       unsigned long end)
601
{
602 603 604 605 606 607 608 609 610
	/* Let GEM Manage all of the aperture.
	 *
	 * However, leave one page at the end still bound to the scratch page.
	 * There are a number of places where the hardware apparently prefetches
	 * past the end of the object, and we've seen multiple hangs with the
	 * GPU head pointer stuck in a batchbuffer bound at the last page of the
	 * aperture.  One page should be enough to keep any prefetching inside
	 * of the aperture.
	 */
611
	drm_i915_private_t *dev_priv = dev->dev_private;
612 613 614
	struct drm_mm_node *entry;
	struct drm_i915_gem_object *obj;
	unsigned long hole_start, hole_end;
615

616 617
	BUG_ON(mappable_end > end);

618
	/* Subtract the guard page ... */
619
	drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
620 621
	if (!HAS_LLC(dev))
		dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
622

623 624 625 626 627 628 629 630 631 632 633 634 635
	/* Mark any preallocated objects as occupied */
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
		DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
			      obj->gtt_offset, obj->base.size);

		BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
		obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
						     obj->gtt_offset,
						     obj->base.size,
						     false);
		obj->has_global_gtt_mapping = 1;
	}

B
Ben Widawsky 已提交
636 637
	dev_priv->gtt.start = start;
	dev_priv->gtt.total = end - start;
638

639 640 641 642 643
	/* Clear any non-preallocated blocks */
	drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
			     hole_start, hole_end) {
		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
			      hole_start, hole_end);
644 645
		dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
					      (hole_end-hole_start) / PAGE_SIZE);
646 647 648
	}

	/* And finally clear the reserved guard page */
649
	dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
650 651
}

652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
static bool
intel_enable_ppgtt(struct drm_device *dev)
{
	if (i915_enable_ppgtt >= 0)
		return i915_enable_ppgtt;

#ifdef CONFIG_INTEL_IOMMU
	/* Disable ppgtt on SNB if VT-d is on. */
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
		return false;
#endif

	return true;
}

void i915_gem_init_global_gtt(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned long gtt_size, mappable_size;

672
	gtt_size = dev_priv->gtt.total;
673
	mappable_size = dev_priv->gtt.mappable_end;
674 675

	if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
676
		int ret;
677 678 679 680 681 682

		if (INTEL_INFO(dev)->gen <= 7) {
			/* PPGTT pdes are stolen from global gtt ptes, so shrink the
			 * aperture accordingly when using aliasing ppgtt. */
			gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
		}
683 684 685 686

		i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);

		ret = i915_gem_init_aliasing_ppgtt(dev);
687
		if (!ret)
688
			return;
689 690 691 692

		DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
		drm_mm_takedown(&dev_priv->mm.gtt_space);
		gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
693
	}
694
	i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716
}

static int setup_scratch_page(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct page *page;
	dma_addr_t dma_addr;

	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
	if (page == NULL)
		return -ENOMEM;
	get_page(page);
	set_pages_uc(page, 1);

#ifdef CONFIG_INTEL_IOMMU
	dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
				PCI_DMA_BIDIRECTIONAL);
	if (pci_dma_mapping_error(dev->pdev, dma_addr))
		return -EINVAL;
#else
	dma_addr = page_to_phys(page);
#endif
717 718
	dev_priv->gtt.scratch_page = page;
	dev_priv->gtt.scratch_page_dma = dma_addr;
719 720 721 722 723 724 725

	return 0;
}

static void teardown_scratch_page(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
726 727
	set_pages_wb(dev_priv->gtt.scratch_page, 1);
	pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma,
728
		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
729 730
	put_page(dev_priv->gtt.scratch_page);
	__free_page(dev_priv->gtt.scratch_page);
731 732 733 734 735 736 737 738 739
}

static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
{
	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
	snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
	return snb_gmch_ctl << 20;
}

740
static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
741 742 743 744 745 746
{
	snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
	snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
	return snb_gmch_ctl << 25; /* 32 MB units */
}

747
static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl)
748 749 750 751 752 753 754 755
{
	static const int stolen_decoder[] = {
		0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
	snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
	snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
	return stolen_decoder[snb_gmch_ctl] << 20;
}

756 757
static int gen6_gmch_probe(struct drm_device *dev,
			   size_t *gtt_total,
758 759 760
			   size_t *stolen,
			   phys_addr_t *mappable_base,
			   unsigned long *mappable_end)
761 762 763
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	phys_addr_t gtt_bus_addr;
764
	unsigned int gtt_size;
765 766 767
	u16 snb_gmch_ctl;
	int ret;

768 769 770
	*mappable_base = pci_resource_start(dev->pdev, 2);
	*mappable_end = pci_resource_len(dev->pdev, 2);

771 772
	/* 64/512MB is the current min/max we actually know of, but this is just
	 * a coarse sanity check.
773
	 */
774
	if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
775 776 777
		DRM_ERROR("Unknown GMADR size (%lx)\n",
			  dev_priv->gtt.mappable_end);
		return -ENXIO;
778 779 780 781 782
	}

	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
783
	gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
784

785
	if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev))
786 787 788
		*stolen = gen7_get_stolen_size(snb_gmch_ctl);
	else
		*stolen = gen6_get_stolen_size(snb_gmch_ctl);
789

790
	*gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
791

792 793 794 795
	/* For Modern GENs the PTEs and register space are split in the BAR */
	gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
		(pci_resource_len(dev->pdev, 0) / 2);

796
	dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
B
Ben Widawsky 已提交
797
	if (!dev_priv->gtt.gsm) {
798
		DRM_ERROR("Failed to map the gtt page table\n");
799
		return -ENOMEM;
800 801
	}

802 803 804
	ret = setup_scratch_page(dev);
	if (ret)
		DRM_ERROR("Scratch setup failed\n");
805

806 807 808
	dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
	dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;

809 810 811
	return ret;
}

812
static void gen6_gmch_remove(struct drm_device *dev)
813 814
{
	struct drm_i915_private *dev_priv = dev->dev_private;
B
Ben Widawsky 已提交
815
	iounmap(dev_priv->gtt.gsm);
816
	teardown_scratch_page(dev_priv->dev);
817
}
818 819 820

static int i915_gmch_probe(struct drm_device *dev,
			   size_t *gtt_total,
821 822 823
			   size_t *stolen,
			   phys_addr_t *mappable_base,
			   unsigned long *mappable_end)
824 825 826 827 828 829 830 831 832 833
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
	if (!ret) {
		DRM_ERROR("failed to set up gmch\n");
		return -EIO;
	}

834
	intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859

	dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
	dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
	dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;

	return 0;
}

static void i915_gmch_remove(struct drm_device *dev)
{
	intel_gmch_remove();
}

int i915_gem_gtt_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_gtt *gtt = &dev_priv->gtt;
	int ret;

	if (INTEL_INFO(dev)->gen <= 5) {
		dev_priv->gtt.gtt_probe = i915_gmch_probe;
		dev_priv->gtt.gtt_remove = i915_gmch_remove;
	} else {
		dev_priv->gtt.gtt_probe = gen6_gmch_probe;
		dev_priv->gtt.gtt_remove = gen6_gmch_remove;
860 861 862
		if (IS_HASWELL(dev)) {
			dev_priv->gtt.pte_encode = hsw_pte_encode;
		} else if (IS_VALLEYVIEW(dev)) {
863 864 865 866
			dev_priv->gtt.pte_encode = byt_pte_encode;
		} else {
			dev_priv->gtt.pte_encode = gen6_pte_encode;
		}
867 868 869
	}

	ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
870 871 872
				     &dev_priv->gtt.stolen_size,
				     &gtt->mappable_base,
				     &gtt->mappable_end);
873
	if (ret)
874 875 876 877 878 879 880 881 882 883 884 885
		return ret;

	/* GMADR is the PCI mmio aperture into the global GTT. */
	DRM_INFO("Memory usable by graphics device = %zdM\n",
		 dev_priv->gtt.total >> 20);
	DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
			 dev_priv->gtt.mappable_end >> 20);
	DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
			 dev_priv->gtt.stolen_size >> 20);

	return 0;
}