i915_gem_stolen.c 12.7 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008-2012 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Chris Wilson <chris@chris-wilson.co.uk>
 *
 */

29 30
#include <drm/drmP.h>
#include <drm/i915_drm.h>
31 32 33 34 35 36 37 38 39 40 41 42 43 44
#include "i915_drv.h"

/*
 * The BIOS typically reserves some of the system's memory for the exclusive
 * use of the integrated graphics. This memory is no longer available for
 * use by the OS and so the user finds that his system has less memory
 * available than he put in. We refer to this memory as stolen.
 *
 * The BIOS will allocate its framebuffer from the stolen memory. Our
 * goal is try to reuse that object for our own fbcon which must always
 * be available for panics. Anything else we can reuse the stolen memory
 * for is a boon.
 */

45 46 47 48
int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
				struct drm_mm_node *node, u64 size,
				unsigned alignment)
{
49 50
	int ret;

51 52 53
	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		return -ENODEV;

54 55 56 57 58 59
	mutex_lock(&dev_priv->mm.stolen_lock);
	ret = drm_mm_insert_node(&dev_priv->mm.stolen, node, size, alignment,
				 DRM_MM_SEARCH_DEFAULT);
	mutex_unlock(&dev_priv->mm.stolen_lock);

	return ret;
60 61 62 63 64
}

void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
				 struct drm_mm_node *node)
{
65
	mutex_lock(&dev_priv->mm.stolen_lock);
66
	drm_mm_remove_node(node);
67
	mutex_unlock(&dev_priv->mm.stolen_lock);
68 69
}

70
static unsigned long i915_stolen_to_physical(struct drm_device *dev)
71 72
{
	struct drm_i915_private *dev_priv = dev->dev_private;
73
	struct resource *r;
74 75
	u32 base;

76 77 78 79 80 81 82 83
	/* Almost universally we can find the Graphics Base of Stolen Memory
	 * at offset 0x5c in the igfx configuration space. On a few (desktop)
	 * machines this is also mirrored in the bridge device at different
	 * locations, or in the MCHBAR. On gen2, the layout is again slightly
	 * different with the Graphics Segment immediately following Top of
	 * Memory (or Top of Usable DRAM). Note it appears that TOUD is only
	 * reported by 865g, so we just use the top of memory as determined
	 * by the e820 probe.
84
	 *
85
	 * XXX However gen2 requires an unavailable symbol.
86
	 */
87
	base = 0;
88 89
	if (INTEL_INFO(dev)->gen >= 3) {
		/* Read Graphics Base of Stolen Memory directly */
90 91
		pci_read_config_dword(dev->pdev, 0x5c, &base);
		base &= ~((1<<20) - 1);
92
	} else { /* GEN2 */
93 94 95
#if 0
		/* Stolen is immediately above Top of Memory */
		base = max_low_pfn_mapped << PAGE_SHIFT;
96
#endif
97
	}
98

99 100 101
	if (base == 0)
		return 0;

102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145
	/* make sure we don't clobber the GTT if it's within stolen memory */
	if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
		struct {
			u32 start, end;
		} stolen[2] = {
			{ .start = base, .end = base + dev_priv->gtt.stolen_size, },
			{ .start = base, .end = base + dev_priv->gtt.stolen_size, },
		};
		u64 gtt_start, gtt_end;

		gtt_start = I915_READ(PGTBL_CTL);
		if (IS_GEN4(dev))
			gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) |
				(gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
		else
			gtt_start &= PGTBL_ADDRESS_LO_MASK;
		gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;

		if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
			stolen[0].end = gtt_start;
		if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end)
			stolen[1].start = gtt_end;

		/* pick the larger of the two chunks */
		if (stolen[0].end - stolen[0].start >
		    stolen[1].end - stolen[1].start) {
			base = stolen[0].start;
			dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
		} else {
			base = stolen[1].start;
			dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
		}

		if (stolen[0].start != stolen[1].start ||
		    stolen[0].end != stolen[1].end) {
			DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
				      (unsigned long long) gtt_start,
				      (unsigned long long) gtt_end - 1);
			DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
				      base, base + (u32) dev_priv->gtt.stolen_size - 1);
		}
	}


146 147 148 149 150 151 152 153
	/* Verify that nothing else uses this physical address. Stolen
	 * memory should be reserved by the BIOS and hidden from the
	 * kernel. So if the region is already marked as busy, something
	 * is seriously wrong.
	 */
	r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
				    "Graphics Stolen Memory");
	if (r == NULL) {
154 155 156 157 158 159 160 161 162 163 164
		/*
		 * One more attempt but this time requesting region from
		 * base + 1, as we have seen that this resolves the region
		 * conflict with the PCI Bus.
		 * This is a BIOS w/a: Some BIOS wrap stolen in the root
		 * PCI bus, but have an off-by-one error. Hence retry the
		 * reservation starting from 1 instead of 0.
		 */
		r = devm_request_mem_region(dev->dev, base + 1,
					    dev_priv->gtt.stolen_size - 1,
					    "Graphics Stolen Memory");
165 166 167 168 169
		/*
		 * GEN3 firmware likes to smash pci bridges into the stolen
		 * range. Apparently this works.
		 */
		if (r == NULL && !IS_GEN3(dev)) {
170 171 172 173
			DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
				  base, base + (uint32_t)dev_priv->gtt.stolen_size);
			base = 0;
		}
174 175
	}

176
	return base;
177 178 179 180
}

void i915_gem_cleanup_stolen(struct drm_device *dev)
{
181 182
	struct drm_i915_private *dev_priv = dev->dev_private;

183 184 185
	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		return;

186
	drm_mm_takedown(&dev_priv->mm.stolen);
187 188 189 190 191
}

int i915_gem_init_stolen(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
192
	u32 tmp;
193
	int bios_reserved = 0;
194

195 196
	mutex_init(&dev_priv->mm.stolen_lock);

197
#ifdef CONFIG_INTEL_IOMMU
198
	if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
199 200 201 202 203
		DRM_INFO("DMAR active, disabling use of stolen memory\n");
		return 0;
	}
#endif

204 205 206
	if (dev_priv->gtt.stolen_size == 0)
		return 0;

207 208 209 210
	dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
	if (dev_priv->mm.stolen_base == 0)
		return 0;

211 212
	DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
		      dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
213

214 215 216 217 218 219 220 221 222 223
	if (INTEL_INFO(dev)->gen >= 8) {
		tmp = I915_READ(GEN7_BIOS_RESERVED);
		tmp >>= GEN8_BIOS_RESERVED_SHIFT;
		tmp &= GEN8_BIOS_RESERVED_MASK;
		bios_reserved = (1024*1024) << tmp;
	} else if (IS_GEN7(dev)) {
		tmp = I915_READ(GEN7_BIOS_RESERVED);
		bios_reserved = tmp & GEN7_BIOS_RESERVED_256K ?
			256*1024 : 1024*1024;
	}
224

225 226 227
	if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
		return 0;

228
	/* Basic memrange allocator for stolen space */
229 230
	drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
		    bios_reserved);
231 232 233

	return 0;
}
234 235 236 237 238 239 240 241 242 243

static struct sg_table *
i915_pages_create_for_stolen(struct drm_device *dev,
			     u32 offset, u32 size)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct sg_table *st;
	struct scatterlist *sg;

	DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
244
	BUG_ON(offset > dev_priv->gtt.stolen_size - size);
245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260

	/* We hide that we have no struct page backing our stolen object
	 * by wrapping the contiguous physical allocation with a fake
	 * dma mapping in a single scatterlist.
	 */

	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
		return NULL;

	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
		kfree(st);
		return NULL;
	}

	sg = st->sgl;
261
	sg->offset = 0;
262
	sg->length = size;
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282

	sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
	sg_dma_len(sg) = size;

	return st;
}

static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
{
	BUG();
	return -EINVAL;
}

static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
{
	/* Should only be called during free */
	sg_free_table(obj->pages);
	kfree(obj->pages);
}

283 284 285 286

static void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
287 288
	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;

289
	if (obj->stolen) {
290
		i915_gem_stolen_remove_node(dev_priv, obj->stolen);
291 292 293 294
		kfree(obj->stolen);
		obj->stolen = NULL;
	}
}
295 296 297
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
	.get_pages = i915_gem_object_get_pages_stolen,
	.put_pages = i915_gem_object_put_pages_stolen,
298
	.release = i915_gem_object_release_stolen,
299 300 301 302 303 304 305 306
};

static struct drm_i915_gem_object *
_i915_gem_object_create_stolen(struct drm_device *dev,
			       struct drm_mm_node *stolen)
{
	struct drm_i915_gem_object *obj;

307
	obj = i915_gem_object_alloc(dev);
308 309 310
	if (obj == NULL)
		return NULL;

311
	drm_gem_private_object_init(dev, &obj->base, stolen->size);
312 313 314 315 316 317 318
	i915_gem_object_init(obj, &i915_gem_object_stolen_ops);

	obj->pages = i915_pages_create_for_stolen(dev,
						  stolen->start, stolen->size);
	if (obj->pages == NULL)
		goto cleanup;

319
	i915_gem_object_pin_pages(obj);
320 321
	obj->stolen = stolen;

322 323
	obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
	obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
324 325 326 327

	return obj;

cleanup:
328
	i915_gem_object_free(obj);
329 330 331 332 333 334 335 336 337
	return NULL;
}

struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_gem_object *obj;
	struct drm_mm_node *stolen;
338
	int ret;
339

340
	if (!drm_mm_initialized(&dev_priv->mm.stolen))
341 342 343 344 345 346
		return NULL;

	DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
	if (size == 0)
		return NULL;

347 348
	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
	if (!stolen)
349 350
		return NULL;

351
	ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
352 353 354 355 356
	if (ret) {
		kfree(stolen);
		return NULL;
	}

357 358 359 360
	obj = _i915_gem_object_create_stolen(dev, stolen);
	if (obj)
		return obj;

361
	i915_gem_stolen_remove_node(dev_priv, stolen);
362
	kfree(stolen);
363 364 365
	return NULL;
}

366 367 368 369 370 371 372
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
					       u32 stolen_offset,
					       u32 gtt_offset,
					       u32 size)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
373
	struct i915_address_space *ggtt = &dev_priv->gtt.base;
374 375
	struct drm_i915_gem_object *obj;
	struct drm_mm_node *stolen;
B
Ben Widawsky 已提交
376
	struct i915_vma *vma;
377
	int ret;
378

379
	if (!drm_mm_initialized(&dev_priv->mm.stolen))
380 381 382 383 384 385
		return NULL;

	DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
			stolen_offset, gtt_offset, size);

	/* KISS and expect everything to be page-aligned */
386 387
	if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
	    WARN_ON(stolen_offset & 4095))
388 389
		return NULL;

390 391 392 393
	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
	if (!stolen)
		return NULL;

394 395
	stolen->start = stolen_offset;
	stolen->size = size;
396
	mutex_lock(&dev_priv->mm.stolen_lock);
397
	ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
398
	mutex_unlock(&dev_priv->mm.stolen_lock);
399
	if (ret) {
400
		DRM_DEBUG_KMS("failed to allocate stolen space\n");
401
		kfree(stolen);
402 403 404 405 406 407
		return NULL;
	}

	obj = _i915_gem_object_create_stolen(dev, stolen);
	if (obj == NULL) {
		DRM_DEBUG_KMS("failed to allocate stolen object\n");
408
		i915_gem_stolen_remove_node(dev_priv, stolen);
409
		kfree(stolen);
410 411 412
		return NULL;
	}

413
	/* Some objects just need physical mem from stolen space */
414
	if (gtt_offset == I915_GTT_OFFSET_NONE)
415 416
		return obj;

417
	vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
418 419
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
B
Ben Widawsky 已提交
420 421 422
		goto err_out;
	}

423 424 425 426 427
	/* To simplify the initialisation sequence between KMS and GTT,
	 * we allow construction of the stolen object prior to
	 * setting up the GTT space. The actual reservation will occur
	 * later.
	 */
B
Ben Widawsky 已提交
428 429
	vma->node.start = gtt_offset;
	vma->node.size = size;
430 431
	if (drm_mm_initialized(&ggtt->mm)) {
		ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
432
		if (ret) {
433
			DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
434
			goto err_vma;
435
		}
B
Ben Widawsky 已提交
436
	}
437

438
	vma->bound |= GLOBAL_BIND;
439

440
	list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
B
Ben Widawsky 已提交
441
	list_add_tail(&vma->mm_list, &ggtt->inactive_list);
442
	i915_gem_object_pin_pages(obj);
443 444

	return obj;
445

446 447
err_vma:
	i915_gem_vma_destroy(vma);
448
err_out:
449
	i915_gem_stolen_remove_node(dev_priv, stolen);
450
	kfree(stolen);
451 452
	drm_gem_object_unreference(&obj->base);
	return NULL;
453
}