i915_gem_gtt.c 4.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 * Copyright © 2010 Daniel Vetter
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 */

#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_drv.h"

32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51
/* XXX kill agp_type! */
static unsigned int cache_level_to_agp_type(struct drm_device *dev,
					    enum i915_cache_level cache_level)
{
	switch (cache_level) {
	case I915_CACHE_LLC_MLC:
		if (INTEL_INFO(dev)->gen >= 6)
			return AGP_USER_CACHED_MEMORY_LLC_MLC;
		/* Older chipsets do not have this extra level of CPU
		 * cacheing, so fallthrough and request the PTE simply
		 * as cached.
		 */
	case I915_CACHE_LLC:
		return AGP_USER_CACHED_MEMORY;
	default:
	case I915_CACHE_NONE:
		return AGP_USER_MEMORY;
	}
}

B
Ben Widawsky 已提交
52 53 54 55 56 57
static bool do_idling(struct drm_i915_private *dev_priv)
{
	bool ret = dev_priv->mm.interruptible;

	if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
		dev_priv->mm.interruptible = false;
58
		if (i915_gpu_idle(dev_priv->dev, false)) {
B
Ben Widawsky 已提交
59 60 61 62 63 64 65 66 67 68 69 70 71 72 73
			DRM_ERROR("Couldn't idle GPU\n");
			/* Wait a bit, in hopes it avoids the hang */
			udelay(10);
		}
	}

	return ret;
}

static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
{
	if (unlikely(dev_priv->mm.gtt->do_idle_maps))
		dev_priv->mm.interruptible = interruptible;
}

74 75 76
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
77
	struct drm_i915_gem_object *obj;
78

79 80 81 82
	/* First fill our portion of the GTT with scratch pages */
	intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
			      (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);

83
	list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
84
		i915_gem_clflush_object(obj);
85
		i915_gem_gtt_rebind_object(obj, obj->cache_level);
86 87 88 89
	}

	intel_gtt_chipset_flush();
}
90

91
int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
92
{
93
	struct drm_device *dev = obj->base.dev;
D
Daniel Vetter 已提交
94
	struct drm_i915_private *dev_priv = dev->dev_private;
95
	unsigned int agp_type = cache_level_to_agp_type(dev, obj->cache_level);
D
Daniel Vetter 已提交
96
	int ret;
97

D
Daniel Vetter 已提交
98
	if (dev_priv->mm.gtt->needs_dmar) {
99 100 101 102
		ret = intel_gtt_map_memory(obj->pages,
					   obj->base.size >> PAGE_SHIFT,
					   &obj->sg_list,
					   &obj->num_sg);
D
Daniel Vetter 已提交
103 104 105
		if (ret != 0)
			return ret;

106 107 108
		intel_gtt_insert_sg_entries(obj->sg_list,
					    obj->num_sg,
					    obj->gtt_space->start >> PAGE_SHIFT,
109
					    agp_type);
D
Daniel Vetter 已提交
110
	} else
111 112 113
		intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
				       obj->base.size >> PAGE_SHIFT,
				       obj->pages,
114
				       agp_type);
115

D
Daniel Vetter 已提交
116
	return 0;
117 118
}

119 120
void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
				enum i915_cache_level cache_level)
121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139
{
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);

	if (dev_priv->mm.gtt->needs_dmar) {
		BUG_ON(!obj->sg_list);

		intel_gtt_insert_sg_entries(obj->sg_list,
					    obj->num_sg,
					    obj->gtt_space->start >> PAGE_SHIFT,
					    agp_type);
	} else
		intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
				       obj->base.size >> PAGE_SHIFT,
				       obj->pages,
				       agp_type);
}

140
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
141
{
B
Ben Widawsky 已提交
142 143 144 145 146 147
	struct drm_device *dev = obj->base.dev;
	struct drm_i915_private *dev_priv = dev->dev_private;
	bool interruptible;

	interruptible = do_idling(dev_priv);

148 149
	intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
			      obj->base.size >> PAGE_SHIFT);
150

151
	if (obj->sg_list) {
152 153
		intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
		obj->sg_list = NULL;
D
Daniel Vetter 已提交
154
	}
B
Ben Widawsky 已提交
155 156

	undo_idling(dev_priv, interruptible);
157
}