i915_gem_gtt.c 10.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Copyright © 2010 Daniel Vetter
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

25 26
#include <drm/drmP.h>
#include <drm/i915_drm.h>
27 28 29 30
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"

31 32 33 34 35 36 37
/* PPGTT support for Sandybdrige/Gen6 and later */
static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
				   unsigned first_entry,
				   unsigned num_entries)
{
	uint32_t *pt_vaddr;
	uint32_t scratch_pte;
38 39 40
	unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
	unsigned last_pte, i;
41 42 43 44

	scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
	scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;

45 46 47 48 49 50
	while (num_entries) {
		last_pte = first_pte + num_entries;
		if (last_pte > I915_PPGTT_PT_ENTRIES)
			last_pte = I915_PPGTT_PT_ENTRIES;

		pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
51

52 53
		for (i = first_pte; i < last_pte; i++)
			pt_vaddr[i] = scratch_pte;
54 55 56

		kunmap_atomic(pt_vaddr);

57 58 59 60
		num_entries -= last_pte - first_pte;
		first_pte = 0;
		act_pd++;
	}
61 62 63 64 65 66 67 68 69 70 71 72 73
}

int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_hw_ppgtt *ppgtt;
	unsigned first_pd_entry_in_global_pt;
	int i;
	int ret = -ENOMEM;

	/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
	 * entries. For aliasing ppgtt support we just steal them at the end for
	 * now. */
74
	first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
75 76 77 78 79

	ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
	if (!ppgtt)
		return ret;

B
Ben Widawsky 已提交
80
	ppgtt->dev = dev;
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99
	ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
	ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
				  GFP_KERNEL);
	if (!ppgtt->pt_pages)
		goto err_ppgtt;

	for (i = 0; i < ppgtt->num_pd_entries; i++) {
		ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
		if (!ppgtt->pt_pages[i])
			goto err_pt_alloc;
	}

	if (dev_priv->mm.gtt->needs_dmar) {
		ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
						*ppgtt->num_pd_entries,
					     GFP_KERNEL);
		if (!ppgtt->pt_dma_addr)
			goto err_pt_alloc;

D
Daniel Vetter 已提交
100 101 102
		for (i = 0; i < ppgtt->num_pd_entries; i++) {
			dma_addr_t pt_addr;

103 104 105 106 107 108 109 110 111 112 113
			pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
					       0, 4096,
					       PCI_DMA_BIDIRECTIONAL);

			if (pci_dma_mapping_error(dev->pdev,
						  pt_addr)) {
				ret = -EIO;
				goto err_pd_pin;

			}
			ppgtt->pt_dma_addr[i] = pt_addr;
D
Daniel Vetter 已提交
114
		}
115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
	}

	ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;

	i915_ppgtt_clear_range(ppgtt, 0,
			       ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);

	ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(uint32_t);

	dev_priv->mm.aliasing_ppgtt = ppgtt;

	return 0;

err_pd_pin:
	if (ppgtt->pt_dma_addr) {
		for (i--; i >= 0; i--)
			pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
				       4096, PCI_DMA_BIDIRECTIONAL);
	}
err_pt_alloc:
	kfree(ppgtt->pt_dma_addr);
	for (i = 0; i < ppgtt->num_pd_entries; i++) {
		if (ppgtt->pt_pages[i])
			__free_page(ppgtt->pt_pages[i]);
	}
	kfree(ppgtt->pt_pages);
err_ppgtt:
	kfree(ppgtt);

	return ret;
}

void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
	int i;

	if (!ppgtt)
		return;

	if (ppgtt->pt_dma_addr) {
		for (i = 0; i < ppgtt->num_pd_entries; i++)
			pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
				       4096, PCI_DMA_BIDIRECTIONAL);
	}

	kfree(ppgtt->pt_dma_addr);
	for (i = 0; i < ppgtt->num_pd_entries; i++)
		__free_page(ppgtt->pt_pages[i]);
	kfree(ppgtt->pt_pages);
	kfree(ppgtt);
}

169
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
170
					 const struct sg_table *pages,
171 172 173 174 175 176 177 178 179 180 181
					 unsigned first_entry,
					 uint32_t pte_flags)
{
	uint32_t *pt_vaddr, pte;
	unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
	unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
	unsigned i, j, m, segment_len;
	dma_addr_t page_addr;
	struct scatterlist *sg;

	/* init sg walking */
182
	sg = pages->sgl;
183 184 185 186
	i = 0;
	segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
	m = 0;

187
	while (i < pages->nents) {
188 189 190 191 192 193 194 195
		pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);

		for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
			page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
			pte = GEN6_PTE_ADDR_ENCODE(page_addr);
			pt_vaddr[j] = pte | pte_flags;

			/* grab the next page */
196 197
			if (++m == segment_len) {
				if (++i == pages->nents)
198 199
					break;

200
				sg = sg_next(sg);
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220
				segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
				m = 0;
			}
		}

		kunmap_atomic(pt_vaddr);

		first_pte = 0;
		act_pd++;
	}
}

void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
			    struct drm_i915_gem_object *obj,
			    enum i915_cache_level cache_level)
{
	uint32_t pte_flags = GEN6_PTE_VALID;

	switch (cache_level) {
	case I915_CACHE_LLC_MLC:
B
Ben Widawsky 已提交
221
		/* Haswell doesn't set L3 this way */
B
Ben Widawsky 已提交
222
		if (IS_HASWELL(ppgtt->dev))
B
Ben Widawsky 已提交
223 224 225
			pte_flags |= GEN6_PTE_CACHE_LLC;
		else
			pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
226 227 228 229 230
		break;
	case I915_CACHE_LLC:
		pte_flags |= GEN6_PTE_CACHE_LLC;
		break;
	case I915_CACHE_NONE:
B
Ben Widawsky 已提交
231
		if (IS_HASWELL(ppgtt->dev))
D
Daniel Vetter 已提交
232 233 234
			pte_flags |= HSW_PTE_UNCACHED;
		else
			pte_flags |= GEN6_PTE_UNCACHED;
235 236 237 238 239
		break;
	default:
		BUG();
	}

240
	i915_ppgtt_insert_sg_entries(ppgtt,
241
				     obj->pages,
242 243
				     obj->gtt_space->start >> PAGE_SHIFT,
				     pte_flags);
244 245 246 247 248 249 250 251 252 253
}

void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
			      struct drm_i915_gem_object *obj)
{
	i915_ppgtt_clear_range(ppgtt,
			       obj->gtt_space->start >> PAGE_SHIFT,
			       obj->base.size >> PAGE_SHIFT);
}

254 255 256 257 258 259 260 261 262 263
/* XXX kill agp_type! */
static unsigned int cache_level_to_agp_type(struct drm_device *dev,
					    enum i915_cache_level cache_level)
{
	switch (cache_level) {
	case I915_CACHE_LLC_MLC:
		/* Older chipsets do not have this extra level of CPU
		 * cacheing, so fallthrough and request the PTE simply
		 * as cached.
		 */
B
Ben Widawsky 已提交
264 265
		if (INTEL_INFO(dev)->gen >= 6 && !IS_HASWELL(dev))
			return AGP_USER_CACHED_MEMORY_LLC_MLC;
266 267 268 269 270 271 272 273
	case I915_CACHE_LLC:
		return AGP_USER_CACHED_MEMORY;
	default:
	case I915_CACHE_NONE:
		return AGP_USER_MEMORY;
	}
}

B
Ben Widawsky 已提交
274 275 276 277 278 279
static bool do_idling(struct drm_i915_private *dev_priv)
{
	bool ret = dev_priv->mm.interruptible;

	if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
		dev_priv->mm.interruptible = false;
280
		if (i915_gpu_idle(dev_priv->dev)) {
B
Ben Widawsky 已提交
281 282 283 284 285 286 287 288 289 290 291 292 293 294 295
			DRM_ERROR("Couldn't idle GPU\n");
			/* Wait a bit, in hopes it avoids the hang */
			udelay(10);
		}
	}

	return ret;
}

static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
{
	if (unlikely(dev_priv->mm.gtt->do_idle_maps))
		dev_priv->mm.interruptible = interruptible;
}

296 297 298
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
299
	struct drm_i915_gem_object *obj;
300

301 302 303 304
	/* First fill our portion of the GTT with scratch pages */
	intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
			      (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);

C
Chris Wilson 已提交
305
	list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
306
		i915_gem_clflush_object(obj);
307
		i915_gem_gtt_bind_object(obj, obj->cache_level);
308 309 310 311
	}

	intel_gtt_chipset_flush();
}
312

313
int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
314
{
315
	if (obj->has_dma_mapping)
316
		return 0;
317 318 319 320 321 322 323

	if (!dma_map_sg(&obj->base.dev->pdev->dev,
			obj->pages->sgl, obj->pages->nents,
			PCI_DMA_BIDIRECTIONAL))
		return -ENOSPC;

	return 0;
324 325
}

326 327
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
			      enum i915_cache_level cache_level)
328 329 330 331
{
	struct drm_device *dev = obj->base.dev;
	unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);

332
	intel_gtt_insert_sg_entries(obj->pages,
333 334
				    obj->gtt_space->start >> PAGE_SHIFT,
				    agp_type);
335
	obj->has_global_gtt_mapping = 1;
336 337
}

338
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
339 340 341
{
	intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
			      obj->base.size >> PAGE_SHIFT);
342 343

	obj->has_global_gtt_mapping = 0;
344 345 346
}

void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
347
{
B
Ben Widawsky 已提交
348 349 350 351 352 353
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	bool interruptible;

	interruptible = do_idling(dev_priv);

354 355 356 357
	if (!obj->has_dma_mapping)
		dma_unmap_sg(&dev->pdev->dev,
			     obj->pages->sgl, obj->pages->nents,
			     PCI_DMA_BIDIRECTIONAL);
B
Ben Widawsky 已提交
358 359

	undo_idling(dev_priv, interruptible);
360
}
361

362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378
static void i915_gtt_color_adjust(struct drm_mm_node *node,
				  unsigned long color,
				  unsigned long *start,
				  unsigned long *end)
{
	if (node->color != color)
		*start += 4096;

	if (!list_empty(&node->node_list)) {
		node = list_entry(node->node_list.next,
				  struct drm_mm_node,
				  node_list);
		if (node->allocated && node->color != color)
			*end -= 4096;
	}
}

379 380 381 382 383 384 385
void i915_gem_init_global_gtt(struct drm_device *dev,
			      unsigned long start,
			      unsigned long mappable_end,
			      unsigned long end)
{
	drm_i915_private_t *dev_priv = dev->dev_private;

386 387
	/* Substract the guard page ... */
	drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
388 389
	if (!HAS_LLC(dev))
		dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
390 391 392 393 394 395 396

	dev_priv->mm.gtt_start = start;
	dev_priv->mm.gtt_mappable_end = mappable_end;
	dev_priv->mm.gtt_end = end;
	dev_priv->mm.gtt_total = end - start;
	dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;

397
	/* ... but ensure that we clear the entire range. */
398 399
	intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
}