i915_gem_gtt.c 22.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright © 2010 Daniel Vetter
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

25 26
#include <drm/drmP.h>
#include <drm/i915_drm.h>
27 28 29 30
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"

31
typedef uint32_t gen6_gtt_pte_t;
B
Ben Widawsky 已提交
32

33 34 35 36 37 38 39 40 41 42 43 44 45 46
/* PPGTT stuff */
#define GEN6_GTT_ADDR_ENCODE(addr)	((addr) | (((addr) >> 28) & 0xff0))

#define GEN6_PDE_VALID			(1 << 0)
/* gen6+ has bit 11-4 for physical addr bit 39-32 */
#define GEN6_PDE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)

#define GEN6_PTE_VALID			(1 << 0)
#define GEN6_PTE_UNCACHED		(1 << 1)
#define HSW_PTE_UNCACHED		(0)
#define GEN6_PTE_CACHE_LLC		(2 << 1)
#define GEN6_PTE_CACHE_LLC_MLC		(3 << 1)
#define GEN6_PTE_ADDR_ENCODE(addr)	GEN6_GTT_ADDR_ENCODE(addr)

47
static inline gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
B
Ben Widawsky 已提交
48 49
					     dma_addr_t addr,
					     enum i915_cache_level level)
50
{
51
	gen6_gtt_pte_t pte = GEN6_PTE_VALID;
52
	pte |= GEN6_PTE_ADDR_ENCODE(addr);
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74

	switch (level) {
	case I915_CACHE_LLC_MLC:
		/* Haswell doesn't set L3 this way */
		if (IS_HASWELL(dev))
			pte |= GEN6_PTE_CACHE_LLC;
		else
			pte |= GEN6_PTE_CACHE_LLC_MLC;
		break;
	case I915_CACHE_LLC:
		pte |= GEN6_PTE_CACHE_LLC;
		break;
	case I915_CACHE_NONE:
		if (IS_HASWELL(dev))
			pte |= HSW_PTE_UNCACHED;
		else
			pte |= GEN6_PTE_UNCACHED;
		break;
	default:
		BUG();
	}

75 76 77
	return pte;
}

78
static int gen6_ppgtt_enable(struct drm_device *dev)
B
Ben Widawsky 已提交
79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108
{
	drm_i915_private_t *dev_priv = dev->dev_private;
	uint32_t pd_offset;
	struct intel_ring_buffer *ring;
	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
	gen6_gtt_pte_t __iomem *pd_addr;
	uint32_t pd_entry;
	int i;

	pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
		ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
		dma_addr_t pt_addr;

		pt_addr = ppgtt->pt_dma_addr[i];
		pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
		pd_entry |= GEN6_PDE_VALID;

		writel(pd_entry, pd_addr + i);
	}
	readl(pd_addr);

	pd_offset = ppgtt->pd_offset;
	pd_offset /= 64; /* in cachelines, */
	pd_offset <<= 16;

	if (INTEL_INFO(dev)->gen == 6) {
		uint32_t ecochk, gab_ctl, ecobits;

		ecobits = I915_READ(GAC_ECO_BITS);
V
Ville Syrjälä 已提交
109 110
		I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
					 ECOBITS_PPGTT_CACHE64B);
B
Ben Widawsky 已提交
111 112 113 114 115 116 117 118 119

		gab_ctl = I915_READ(GAB_CTL);
		I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);

		ecochk = I915_READ(GAM_ECOCHK);
		I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
				       ECOCHK_PPGTT_CACHE64B);
		I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
	} else if (INTEL_INFO(dev)->gen >= 7) {
120 121 122 123 124
		uint32_t ecobits;

		ecobits = I915_READ(GAC_ECO_BITS);
		I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);

B
Ben Widawsky 已提交
125 126 127 128 129 130 131 132 133 134 135 136
		I915_WRITE(GAM_ECOCHK, ECOCHK_PPGTT_CACHE64B);
		/* GFX_MODE is per-ring on gen7+ */
	}

	for_each_ring(ring, dev_priv, i) {
		if (INTEL_INFO(dev)->gen >= 7)
			I915_WRITE(RING_MODE_GEN7(ring),
				   _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));

		I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
		I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
	}
137
	return 0;
B
Ben Widawsky 已提交
138 139
}

140
/* PPGTT support for Sandybdrige/Gen6 and later */
D
Daniel Vetter 已提交
141
static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
142 143 144
				   unsigned first_entry,
				   unsigned num_entries)
{
145
	gen6_gtt_pte_t *pt_vaddr, scratch_pte;
146
	unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
147 148
	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
	unsigned last_pte, i;
149

D
Daniel Vetter 已提交
150 151 152
	scratch_pte = gen6_pte_encode(ppgtt->dev,
				      ppgtt->scratch_page_dma_addr,
				      I915_CACHE_LLC);
153

154 155 156 157 158
	while (num_entries) {
		last_pte = first_pte + num_entries;
		if (last_pte > I915_PPGTT_PT_ENTRIES)
			last_pte = I915_PPGTT_PT_ENTRIES;

159
		pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
160

161 162
		for (i = first_pte; i < last_pte; i++)
			pt_vaddr[i] = scratch_pte;
163 164 165

		kunmap_atomic(pt_vaddr);

166 167
		num_entries -= last_pte - first_pte;
		first_pte = 0;
168
		act_pt++;
169
	}
170 171
}

D
Daniel Vetter 已提交
172 173 174 175 176
static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
				      struct sg_table *pages,
				      unsigned first_entry,
				      enum i915_cache_level cache_level)
{
177
	gen6_gtt_pte_t *pt_vaddr;
178
	unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
179 180 181
	unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
	struct sg_page_iter sg_iter;

182
	pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
183 184 185
	for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
		dma_addr_t page_addr;

186
		page_addr = sg_page_iter_dma_address(&sg_iter);
187
		pt_vaddr[act_pte] = gen6_pte_encode(ppgtt->dev, page_addr,
188
						    cache_level);
189 190
		if (++act_pte == I915_PPGTT_PT_ENTRIES) {
			kunmap_atomic(pt_vaddr);
191 192
			act_pt++;
			pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
193
			act_pte = 0;
D
Daniel Vetter 已提交
194 195 196

		}
	}
197
	kunmap_atomic(pt_vaddr);
D
Daniel Vetter 已提交
198 199
}

200
static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
201
{
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
	int i;

	if (ppgtt->pt_dma_addr) {
		for (i = 0; i < ppgtt->num_pd_entries; i++)
			pci_unmap_page(ppgtt->dev->pdev,
				       ppgtt->pt_dma_addr[i],
				       4096, PCI_DMA_BIDIRECTIONAL);
	}

	kfree(ppgtt->pt_dma_addr);
	for (i = 0; i < ppgtt->num_pd_entries; i++)
		__free_page(ppgtt->pt_pages[i]);
	kfree(ppgtt->pt_pages);
	kfree(ppgtt);
}

static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
	struct drm_device *dev = ppgtt->dev;
221 222 223 224 225 226 227 228
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned first_pd_entry_in_global_pt;
	int i;
	int ret = -ENOMEM;

	/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
	 * entries. For aliasing ppgtt support we just steal them at the end for
	 * now. */
229 230
	first_pd_entry_in_global_pt =
		gtt_total_entries(dev_priv->gtt) - I915_PPGTT_PD_ENTRIES;
231 232

	ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
B
Ben Widawsky 已提交
233
	ppgtt->enable = gen6_ppgtt_enable;
D
Daniel Vetter 已提交
234 235
	ppgtt->clear_range = gen6_ppgtt_clear_range;
	ppgtt->insert_entries = gen6_ppgtt_insert_entries;
236
	ppgtt->cleanup = gen6_ppgtt_cleanup;
237 238 239
	ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
				  GFP_KERNEL);
	if (!ppgtt->pt_pages)
240
		return -ENOMEM;
241 242 243 244 245 246 247

	for (i = 0; i < ppgtt->num_pd_entries; i++) {
		ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
		if (!ppgtt->pt_pages[i])
			goto err_pt_alloc;
	}

B
Ben Widawsky 已提交
248 249 250 251
	ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
				     GFP_KERNEL);
	if (!ppgtt->pt_dma_addr)
		goto err_pt_alloc;
252

B
Ben Widawsky 已提交
253 254
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
		dma_addr_t pt_addr;
D
Daniel Vetter 已提交
255

B
Ben Widawsky 已提交
256 257
		pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
				       PCI_DMA_BIDIRECTIONAL);
258

B
Ben Widawsky 已提交
259 260 261
		if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
			ret = -EIO;
			goto err_pd_pin;
262

D
Daniel Vetter 已提交
263
		}
B
Ben Widawsky 已提交
264
		ppgtt->pt_dma_addr[i] = pt_addr;
265 266
	}

D
Daniel Vetter 已提交
267 268
	ppgtt->clear_range(ppgtt, 0,
			   ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
269

270
	ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286

	return 0;

err_pd_pin:
	if (ppgtt->pt_dma_addr) {
		for (i--; i >= 0; i--)
			pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
				       4096, PCI_DMA_BIDIRECTIONAL);
	}
err_pt_alloc:
	kfree(ppgtt->pt_dma_addr);
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
		if (ppgtt->pt_pages[i])
			__free_page(ppgtt->pt_pages[i]);
	}
	kfree(ppgtt->pt_pages);
287 288 289 290 291 292 293 294 295 296 297 298 299 300 301

	return ret;
}

static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_hw_ppgtt *ppgtt;
	int ret;

	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
	if (!ppgtt)
		return -ENOMEM;

	ppgtt->dev = dev;
302
	ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
303

B
Ben Widawsky 已提交
304 305 306 307 308
	if (INTEL_INFO(dev)->gen < 8)
		ret = gen6_ppgtt_init(ppgtt);
	else
		BUG();

309 310 311 312
	if (ret)
		kfree(ppgtt);
	else
		dev_priv->mm.aliasing_ppgtt = ppgtt;
313 314 315 316 317 318 319 320 321 322 323 324

	return ret;
}

void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;

	if (!ppgtt)
		return;

325
	ppgtt->cleanup(ppgtt);
326
	dev_priv->mm.aliasing_ppgtt = NULL;
327 328
}

329 330 331 332
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
			    struct drm_i915_gem_object *obj,
			    enum i915_cache_level cache_level)
{
D
Daniel Vetter 已提交
333 334 335
	ppgtt->insert_entries(ppgtt, obj->pages,
			      obj->gtt_space->start >> PAGE_SHIFT,
			      cache_level);
336 337 338 339 340
}

void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
			      struct drm_i915_gem_object *obj)
{
D
Daniel Vetter 已提交
341 342 343
	ppgtt->clear_range(ppgtt,
			   obj->gtt_space->start >> PAGE_SHIFT,
			   obj->base.size >> PAGE_SHIFT);
344 345
}

346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361
extern int intel_iommu_gfx_mapped;
/* Certain Gen5 chipsets require require idling the GPU before
 * unmapping anything from the GTT when VT-d is enabled.
 */
static inline bool needs_idle_maps(struct drm_device *dev)
{
#ifdef CONFIG_INTEL_IOMMU
	/* Query intel_iommu to see if we need the workaround. Presumably that
	 * was loaded first.
	 */
	if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
		return true;
#endif
	return false;
}

B
Ben Widawsky 已提交
362 363 364 365
static bool do_idling(struct drm_i915_private *dev_priv)
{
	bool ret = dev_priv->mm.interruptible;

366
	if (unlikely(dev_priv->gtt.do_idle_maps)) {
B
Ben Widawsky 已提交
367
		dev_priv->mm.interruptible = false;
368
		if (i915_gpu_idle(dev_priv->dev)) {
B
Ben Widawsky 已提交
369 370 371 372 373 374 375 376 377 378 379
			DRM_ERROR("Couldn't idle GPU\n");
			/* Wait a bit, in hopes it avoids the hang */
			udelay(10);
		}
	}

	return ret;
}

static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
{
380
	if (unlikely(dev_priv->gtt.do_idle_maps))
B
Ben Widawsky 已提交
381 382 383
		dev_priv->mm.interruptible = interruptible;
}

384 385 386
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
387
	struct drm_i915_gem_object *obj;
388

389
	/* First fill our portion of the GTT with scratch pages */
390 391
	dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
				      dev_priv->gtt.total / PAGE_SIZE);
392

C
Chris Wilson 已提交
393
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
394
		i915_gem_clflush_object(obj);
395
		i915_gem_gtt_bind_object(obj, obj->cache_level);
396 397
	}

398
	i915_gem_chipset_flush(dev);
399
}
400

401
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
402
{
403
	if (obj->has_dma_mapping)
404
		return 0;
405 406 407 408 409 410 411

	if (!dma_map_sg(&obj->base.dev->pdev->dev,
			obj->pages->sgl, obj->pages->nents,
			PCI_DMA_BIDIRECTIONAL))
		return -ENOSPC;

	return 0;
412 413
}

414 415 416 417 418 419
/*
 * Binds an object into the global gtt with the specified cache level. The object
 * will be accessible to the GPU via commands whose operands reference offsets
 * within the global GTT as well as accessible by the GPU through the GMADR
 * mapped BAR (dev_priv->mm.gtt->gtt).
 */
420 421 422 423
static void gen6_ggtt_insert_entries(struct drm_device *dev,
				     struct sg_table *st,
				     unsigned int first_entry,
				     enum i915_cache_level level)
424 425
{
	struct drm_i915_private *dev_priv = dev->dev_private;
426 427
	gen6_gtt_pte_t __iomem *gtt_entries =
		(gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
428 429
	int i = 0;
	struct sg_page_iter sg_iter;
430 431
	dma_addr_t addr;

432
	for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
433
		addr = sg_page_iter_dma_address(&sg_iter);
434 435
		iowrite32(gen6_pte_encode(dev, addr, level), &gtt_entries[i]);
		i++;
436 437 438 439 440 441 442 443 444
	}

	/* XXX: This serves as a posting read to make sure that the PTE has
	 * actually been updated. There is some concern that even though
	 * registers and PTEs are within the same BAR that they are potentially
	 * of NUMA access patterns. Therefore, even with the way we assume
	 * hardware should work, we must keep this posting read for paranoia.
	 */
	if (i != 0)
D
Daniel Vetter 已提交
445 446
		WARN_ON(readl(&gtt_entries[i-1])
			!= gen6_pte_encode(dev, addr, level));
447 448 449 450 451 452 453

	/* This next bit makes the above posting read even more important. We
	 * want to flush the TLBs only after we're certain all the PTE updates
	 * have finished.
	 */
	I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
	POSTING_READ(GFX_FLSH_CNTL_GEN6);
454 455
}

456 457 458 459 460
static void gen6_ggtt_clear_range(struct drm_device *dev,
				  unsigned int first_entry,
				  unsigned int num_entries)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
461 462
	gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
		(gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
463
	const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
464 465 466 467 468 469 470
	int i;

	if (WARN(num_entries > max_entries,
		 "First entry = %d; Num entries = %d (max=%d)\n",
		 first_entry, num_entries, max_entries))
		num_entries = max_entries;

D
Daniel Vetter 已提交
471 472
	scratch_pte = gen6_pte_encode(dev, dev_priv->gtt.scratch_page_dma,
				      I915_CACHE_LLC);
473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498
	for (i = 0; i < num_entries; i++)
		iowrite32(scratch_pte, &gtt_base[i]);
	readl(gtt_base);
}


static void i915_ggtt_insert_entries(struct drm_device *dev,
				     struct sg_table *st,
				     unsigned int pg_start,
				     enum i915_cache_level cache_level)
{
	unsigned int flags = (cache_level == I915_CACHE_NONE) ?
		AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;

	intel_gtt_insert_sg_entries(st, pg_start, flags);

}

static void i915_ggtt_clear_range(struct drm_device *dev,
				  unsigned int first_entry,
				  unsigned int num_entries)
{
	intel_gtt_clear_range(first_entry, num_entries);
}


499 500
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
			      enum i915_cache_level cache_level)
501 502
{
	struct drm_device *dev = obj->base.dev;
503 504 505 506 507
	struct drm_i915_private *dev_priv = dev->dev_private;

	dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
					 obj->gtt_space->start >> PAGE_SHIFT,
					 cache_level);
508

509
	obj->has_global_gtt_mapping = 1;
510 511
}

512
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
513
{
514 515 516 517 518 519
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;

	dev_priv->gtt.gtt_clear_range(obj->base.dev,
				      obj->gtt_space->start >> PAGE_SHIFT,
				      obj->base.size >> PAGE_SHIFT);
520 521

	obj->has_global_gtt_mapping = 0;
522 523 524
}

void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
525
{
B
Ben Widawsky 已提交
526 527 528 529 530 531
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	bool interruptible;

	interruptible = do_idling(dev_priv);

532 533 534 535
	if (!obj->has_dma_mapping)
		dma_unmap_sg(&dev->pdev->dev,
			     obj->pages->sgl, obj->pages->nents,
			     PCI_DMA_BIDIRECTIONAL);
B
Ben Widawsky 已提交
536 537

	undo_idling(dev_priv, interruptible);
538
}
539

540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555
static void i915_gtt_color_adjust(struct drm_mm_node *node,
				  unsigned long color,
				  unsigned long *start,
				  unsigned long *end)
{
	if (node->color != color)
		*start += 4096;

	if (!list_empty(&node->node_list)) {
		node = list_entry(node->node_list.next,
				  struct drm_mm_node,
				  node_list);
		if (node->allocated && node->color != color)
			*end -= 4096;
	}
}
556 557 558 559
void i915_gem_setup_global_gtt(struct drm_device *dev,
			       unsigned long start,
			       unsigned long mappable_end,
			       unsigned long end)
560
{
561 562 563 564 565 566 567 568 569
	/* Let GEM Manage all of the aperture.
	 *
	 * However, leave one page at the end still bound to the scratch page.
	 * There are a number of places where the hardware apparently prefetches
	 * past the end of the object, and we've seen multiple hangs with the
	 * GPU head pointer stuck in a batchbuffer bound at the last page of the
	 * aperture.  One page should be enough to keep any prefetching inside
	 * of the aperture.
	 */
570
	drm_i915_private_t *dev_priv = dev->dev_private;
571 572 573
	struct drm_mm_node *entry;
	struct drm_i915_gem_object *obj;
	unsigned long hole_start, hole_end;
574

575 576
	BUG_ON(mappable_end > end);

577
	/* Subtract the guard page ... */
578
	drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
579 580
	if (!HAS_LLC(dev))
		dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
581

582 583 584 585 586 587 588 589 590 591 592 593 594
	/* Mark any preallocated objects as occupied */
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
		DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
			      obj->gtt_offset, obj->base.size);

		BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
		obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
						     obj->gtt_offset,
						     obj->base.size,
						     false);
		obj->has_global_gtt_mapping = 1;
	}

B
Ben Widawsky 已提交
595 596
	dev_priv->gtt.start = start;
	dev_priv->gtt.total = end - start;
597

598 599 600 601 602
	/* Clear any non-preallocated blocks */
	drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
			     hole_start, hole_end) {
		DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
			      hole_start, hole_end);
603 604
		dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
					      (hole_end-hole_start) / PAGE_SIZE);
605 606 607
	}

	/* And finally clear the reserved guard page */
608
	dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
609 610
}

611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
static bool
intel_enable_ppgtt(struct drm_device *dev)
{
	if (i915_enable_ppgtt >= 0)
		return i915_enable_ppgtt;

#ifdef CONFIG_INTEL_IOMMU
	/* Disable ppgtt on SNB if VT-d is on. */
	if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
		return false;
#endif

	return true;
}

void i915_gem_init_global_gtt(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned long gtt_size, mappable_size;

631
	gtt_size = dev_priv->gtt.total;
632
	mappable_size = dev_priv->gtt.mappable_end;
633 634

	if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
635
		int ret;
636 637 638 639 640 641

		if (INTEL_INFO(dev)->gen <= 7) {
			/* PPGTT pdes are stolen from global gtt ptes, so shrink the
			 * aperture accordingly when using aliasing ppgtt. */
			gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
		}
642 643 644 645

		i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);

		ret = i915_gem_init_aliasing_ppgtt(dev);
646
		if (!ret)
647
			return;
648 649 650 651

		DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
		drm_mm_takedown(&dev_priv->mm.gtt_space);
		gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
652
	}
653
	i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675
}

static int setup_scratch_page(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct page *page;
	dma_addr_t dma_addr;

	page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
	if (page == NULL)
		return -ENOMEM;
	get_page(page);
	set_pages_uc(page, 1);

#ifdef CONFIG_INTEL_IOMMU
	dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
				PCI_DMA_BIDIRECTIONAL);
	if (pci_dma_mapping_error(dev->pdev, dma_addr))
		return -EINVAL;
#else
	dma_addr = page_to_phys(page);
#endif
676 677
	dev_priv->gtt.scratch_page = page;
	dev_priv->gtt.scratch_page_dma = dma_addr;
678 679 680 681 682 683 684

	return 0;
}

static void teardown_scratch_page(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
685 686
	set_pages_wb(dev_priv->gtt.scratch_page, 1);
	pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma,
687
		       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
688 689
	put_page(dev_priv->gtt.scratch_page);
	__free_page(dev_priv->gtt.scratch_page);
690 691 692 693 694 695 696 697 698
}

static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
{
	snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
	snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
	return snb_gmch_ctl << 20;
}

699
static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
700 701 702 703 704 705
{
	snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
	snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
	return snb_gmch_ctl << 25; /* 32 MB units */
}

706
static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl)
707 708 709 710 711 712 713 714
{
	static const int stolen_decoder[] = {
		0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
	snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
	snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
	return stolen_decoder[snb_gmch_ctl] << 20;
}

715 716
static int gen6_gmch_probe(struct drm_device *dev,
			   size_t *gtt_total,
717 718 719
			   size_t *stolen,
			   phys_addr_t *mappable_base,
			   unsigned long *mappable_end)
720 721 722
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	phys_addr_t gtt_bus_addr;
723
	unsigned int gtt_size;
724 725 726
	u16 snb_gmch_ctl;
	int ret;

727 728 729
	*mappable_base = pci_resource_start(dev->pdev, 2);
	*mappable_end = pci_resource_len(dev->pdev, 2);

730 731
	/* 64/512MB is the current min/max we actually know of, but this is just
	 * a coarse sanity check.
732
	 */
733
	if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
734 735 736
		DRM_ERROR("Unknown GMADR size (%lx)\n",
			  dev_priv->gtt.mappable_end);
		return -ENXIO;
737 738 739 740 741
	}

	if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
		pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
	pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
742
	gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
743

744
	if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev))
745 746 747
		*stolen = gen7_get_stolen_size(snb_gmch_ctl);
	else
		*stolen = gen6_get_stolen_size(snb_gmch_ctl);
748

749
	*gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
750

751 752 753 754
	/* For Modern GENs the PTEs and register space are split in the BAR */
	gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
		(pci_resource_len(dev->pdev, 0) / 2);

755
	dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
B
Ben Widawsky 已提交
756
	if (!dev_priv->gtt.gsm) {
757
		DRM_ERROR("Failed to map the gtt page table\n");
758
		return -ENOMEM;
759 760
	}

761 762 763
	ret = setup_scratch_page(dev);
	if (ret)
		DRM_ERROR("Scratch setup failed\n");
764

765 766 767
	dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
	dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;

768 769 770
	return ret;
}

771
static void gen6_gmch_remove(struct drm_device *dev)
772 773
{
	struct drm_i915_private *dev_priv = dev->dev_private;
B
Ben Widawsky 已提交
774
	iounmap(dev_priv->gtt.gsm);
775
	teardown_scratch_page(dev_priv->dev);
776
}
777 778 779

static int i915_gmch_probe(struct drm_device *dev,
			   size_t *gtt_total,
780 781 782
			   size_t *stolen,
			   phys_addr_t *mappable_base,
			   unsigned long *mappable_end)
783 784 785 786 787 788 789 790 791 792
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	int ret;

	ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
	if (!ret) {
		DRM_ERROR("failed to set up gmch\n");
		return -EIO;
	}

793
	intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822

	dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
	dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
	dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;

	return 0;
}

static void i915_gmch_remove(struct drm_device *dev)
{
	intel_gmch_remove();
}

int i915_gem_gtt_init(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_gtt *gtt = &dev_priv->gtt;
	unsigned long gtt_size;
	int ret;

	if (INTEL_INFO(dev)->gen <= 5) {
		dev_priv->gtt.gtt_probe = i915_gmch_probe;
		dev_priv->gtt.gtt_remove = i915_gmch_remove;
	} else {
		dev_priv->gtt.gtt_probe = gen6_gmch_probe;
		dev_priv->gtt.gtt_remove = gen6_gmch_remove;
	}

	ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
823 824 825
				     &dev_priv->gtt.stolen_size,
				     &gtt->mappable_base,
				     &gtt->mappable_end);
826
	if (ret)
827 828
		return ret;

829
	gtt_size = (dev_priv->gtt.total >> PAGE_SHIFT) * sizeof(gen6_gtt_pte_t);
830 831 832 833 834 835 836 837 838 839 840

	/* GMADR is the PCI mmio aperture into the global GTT. */
	DRM_INFO("Memory usable by graphics device = %zdM\n",
		 dev_priv->gtt.total >> 20);
	DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
			 dev_priv->gtt.mappable_end >> 20);
	DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
			 dev_priv->gtt.stolen_size >> 20);

	return 0;
}