i915_gem_gtt.c 10.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright © 2010 Daniel Vetter
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

25 26
#include <drm/drmP.h>
#include <drm/i915_drm.h>
27 28 29 30
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"

B
Ben Widawsky 已提交
31 32
typedef uint32_t gtt_pte_t;

33 34 35 36 37
/* PPGTT support for Sandybdrige/Gen6 and later */
static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
				   unsigned first_entry,
				   unsigned num_entries)
{
B
Ben Widawsky 已提交
38 39
	gtt_pte_t *pt_vaddr;
	gtt_pte_t scratch_pte;
40 41 42
	unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
	unsigned last_pte, i;
43 44 45 46

	scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
	scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;

47 48 49 50 51 52
	while (num_entries) {
		last_pte = first_pte + num_entries;
		if (last_pte > I915_PPGTT_PT_ENTRIES)
			last_pte = I915_PPGTT_PT_ENTRIES;

		pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
53

54 55
		for (i = first_pte; i < last_pte; i++)
			pt_vaddr[i] = scratch_pte;
56 57 58

		kunmap_atomic(pt_vaddr);

59 60 61 62
		num_entries -= last_pte - first_pte;
		first_pte = 0;
		act_pd++;
	}
63 64 65 66 67 68 69 70 71 72 73 74 75
}

int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_hw_ppgtt *ppgtt;
	unsigned first_pd_entry_in_global_pt;
	int i;
	int ret = -ENOMEM;

	/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
	 * entries. For aliasing ppgtt support we just steal them at the end for
	 * now. */
76
	first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
77 78 79 80 81

	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
	if (!ppgtt)
		return ret;

B
Ben Widawsky 已提交
82
	ppgtt->dev = dev;
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
	ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
	ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
				  GFP_KERNEL);
	if (!ppgtt->pt_pages)
		goto err_ppgtt;

	for (i = 0; i < ppgtt->num_pd_entries; i++) {
		ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
		if (!ppgtt->pt_pages[i])
			goto err_pt_alloc;
	}

	if (dev_priv->mm.gtt->needs_dmar) {
		ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
						*ppgtt->num_pd_entries,
					     GFP_KERNEL);
		if (!ppgtt->pt_dma_addr)
			goto err_pt_alloc;

D
Daniel Vetter 已提交
102 103 104
		for (i = 0; i < ppgtt->num_pd_entries; i++) {
			dma_addr_t pt_addr;

105 106 107 108 109 110 111 112 113 114 115
			pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
					       0, 4096,
					       PCI_DMA_BIDIRECTIONAL);

			if (pci_dma_mapping_error(dev->pdev,
						  pt_addr)) {
				ret = -EIO;
				goto err_pd_pin;

			}
			ppgtt->pt_dma_addr[i] = pt_addr;
D
Daniel Vetter 已提交
116
		}
117 118 119 120 121 122 123
	}

	ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;

	i915_ppgtt_clear_range(ppgtt, 0,
			       ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);

B
Ben Widawsky 已提交
124
	ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170

	dev_priv->mm.aliasing_ppgtt = ppgtt;

	return 0;

err_pd_pin:
	if (ppgtt->pt_dma_addr) {
		for (i--; i >= 0; i--)
			pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
				       4096, PCI_DMA_BIDIRECTIONAL);
	}
err_pt_alloc:
	kfree(ppgtt->pt_dma_addr);
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
		if (ppgtt->pt_pages[i])
			__free_page(ppgtt->pt_pages[i]);
	}
	kfree(ppgtt->pt_pages);
err_ppgtt:
	kfree(ppgtt);

	return ret;
}

void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
	int i;

	if (!ppgtt)
		return;

	if (ppgtt->pt_dma_addr) {
		for (i = 0; i < ppgtt->num_pd_entries; i++)
			pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
				       4096, PCI_DMA_BIDIRECTIONAL);
	}

	kfree(ppgtt->pt_dma_addr);
	for (i = 0; i < ppgtt->num_pd_entries; i++)
		__free_page(ppgtt->pt_pages[i]);
	kfree(ppgtt->pt_pages);
	kfree(ppgtt);
}

171
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
172
					 const struct sg_table *pages,
173
					 unsigned first_entry,
B
Ben Widawsky 已提交
174
					 gtt_pte_t pte_flags)
175
{
B
Ben Widawsky 已提交
176
	gtt_pte_t *pt_vaddr, pte;
177 178 179 180 181 182 183
	unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
	unsigned i, j, m, segment_len;
	dma_addr_t page_addr;
	struct scatterlist *sg;

	/* init sg walking */
184
	sg = pages->sgl;
185 186 187 188
	i = 0;
	segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
	m = 0;

189
	while (i < pages->nents) {
190 191 192 193 194 195 196 197
		pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);

		for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
			page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
			pte = GEN6_PTE_ADDR_ENCODE(page_addr);
			pt_vaddr[j] = pte | pte_flags;

			/* grab the next page */
198 199
			if (++m == segment_len) {
				if (++i == pages->nents)
200 201
					break;

202
				sg = sg_next(sg);
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
				segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
				m = 0;
			}
		}

		kunmap_atomic(pt_vaddr);

		first_pte = 0;
		act_pd++;
	}
}

void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
			    struct drm_i915_gem_object *obj,
			    enum i915_cache_level cache_level)
{
B
Ben Widawsky 已提交
219
	gtt_pte_t pte_flags = GEN6_PTE_VALID;
220 221 222

	switch (cache_level) {
	case I915_CACHE_LLC_MLC:
B
Ben Widawsky 已提交
223
		/* Haswell doesn't set L3 this way */
B
Ben Widawsky 已提交
224
		if (IS_HASWELL(ppgtt->dev))
B
Ben Widawsky 已提交
225 226 227
			pte_flags |= GEN6_PTE_CACHE_LLC;
		else
			pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
228 229 230 231 232
		break;
	case I915_CACHE_LLC:
		pte_flags |= GEN6_PTE_CACHE_LLC;
		break;
	case I915_CACHE_NONE:
B
Ben Widawsky 已提交
233
		if (IS_HASWELL(ppgtt->dev))
D
Daniel Vetter 已提交
234 235 236
			pte_flags |= HSW_PTE_UNCACHED;
		else
			pte_flags |= GEN6_PTE_UNCACHED;
237 238 239 240 241
		break;
	default:
		BUG();
	}

242
	i915_ppgtt_insert_sg_entries(ppgtt,
243
				     obj->pages,
244 245
				     obj->gtt_space->start >> PAGE_SHIFT,
				     pte_flags);
246 247 248 249 250 251 252 253 254 255
}

void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
			      struct drm_i915_gem_object *obj)
{
	i915_ppgtt_clear_range(ppgtt,
			       obj->gtt_space->start >> PAGE_SHIFT,
			       obj->base.size >> PAGE_SHIFT);
}

256 257 258 259 260 261 262 263 264 265
/* XXX kill agp_type! */
static unsigned int cache_level_to_agp_type(struct drm_device *dev,
					    enum i915_cache_level cache_level)
{
	switch (cache_level) {
	case I915_CACHE_LLC_MLC:
		/* Older chipsets do not have this extra level of CPU
		 * cacheing, so fallthrough and request the PTE simply
		 * as cached.
		 */
B
Ben Widawsky 已提交
266 267
		if (INTEL_INFO(dev)->gen >= 6 && !IS_HASWELL(dev))
			return AGP_USER_CACHED_MEMORY_LLC_MLC;
268 269 270 271 272 273 274 275
	case I915_CACHE_LLC:
		return AGP_USER_CACHED_MEMORY;
	default:
	case I915_CACHE_NONE:
		return AGP_USER_MEMORY;
	}
}

B
Ben Widawsky 已提交
276 277 278 279 280 281
static bool do_idling(struct drm_i915_private *dev_priv)
{
	bool ret = dev_priv->mm.interruptible;

	if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
		dev_priv->mm.interruptible = false;
282
		if (i915_gpu_idle(dev_priv->dev)) {
B
Ben Widawsky 已提交
283 284 285 286 287 288 289 290 291 292 293 294 295 296 297
			DRM_ERROR("Couldn't idle GPU\n");
			/* Wait a bit, in hopes it avoids the hang */
			udelay(10);
		}
	}

	return ret;
}

static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
{
	if (unlikely(dev_priv->mm.gtt->do_idle_maps))
		dev_priv->mm.interruptible = interruptible;
}

298 299 300
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
301
	struct drm_i915_gem_object *obj;
302

303 304 305 306
	/* First fill our portion of the GTT with scratch pages */
	intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
			      (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);

C
Chris Wilson 已提交
307
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
308
		i915_gem_clflush_object(obj);
309
		i915_gem_gtt_bind_object(obj, obj->cache_level);
310 311 312 313
	}

	intel_gtt_chipset_flush();
}
314

315
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
316
{
317
	if (obj->has_dma_mapping)
318
		return 0;
319 320 321 322 323 324 325

	if (!dma_map_sg(&obj->base.dev->pdev->dev,
			obj->pages->sgl, obj->pages->nents,
			PCI_DMA_BIDIRECTIONAL))
		return -ENOSPC;

	return 0;
326 327
}

328 329
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
			      enum i915_cache_level cache_level)
330 331 332 333
{
	struct drm_device *dev = obj->base.dev;
	unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);

334
	intel_gtt_insert_sg_entries(obj->pages,
335 336
				    obj->gtt_space->start >> PAGE_SHIFT,
				    agp_type);
337
	obj->has_global_gtt_mapping = 1;
338 339
}

340
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
341 342 343
{
	intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
			      obj->base.size >> PAGE_SHIFT);
344 345

	obj->has_global_gtt_mapping = 0;
346 347 348
}

void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
349
{
B
Ben Widawsky 已提交
350 351 352 353 354 355
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	bool interruptible;

	interruptible = do_idling(dev_priv);

356 357 358 359
	if (!obj->has_dma_mapping)
		dma_unmap_sg(&dev->pdev->dev,
			     obj->pages->sgl, obj->pages->nents,
			     PCI_DMA_BIDIRECTIONAL);
B
Ben Widawsky 已提交
360 361

	undo_idling(dev_priv, interruptible);
362
}
363

364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380
static void i915_gtt_color_adjust(struct drm_mm_node *node,
				  unsigned long color,
				  unsigned long *start,
				  unsigned long *end)
{
	if (node->color != color)
		*start += 4096;

	if (!list_empty(&node->node_list)) {
		node = list_entry(node->node_list.next,
				  struct drm_mm_node,
				  node_list);
		if (node->allocated && node->color != color)
			*end -= 4096;
	}
}

381 382 383 384 385 386 387
void i915_gem_init_global_gtt(struct drm_device *dev,
			      unsigned long start,
			      unsigned long mappable_end,
			      unsigned long end)
{
	drm_i915_private_t *dev_priv = dev->dev_private;

388 389
	/* Substract the guard page ... */
	drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
390 391
	if (!HAS_LLC(dev))
		dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
392 393 394 395 396 397 398

	dev_priv->mm.gtt_start = start;
	dev_priv->mm.gtt_mappable_end = mappable_end;
	dev_priv->mm.gtt_end = end;
	dev_priv->mm.gtt_total = end - start;
	dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;

399
	/* ... but ensure that we clear the entire range. */
400 401
	intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
}