i915_gem_stolen.c 12.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008-2012 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Chris Wilson <chris@chris-wilson.co.uk>
 *
 */

29 30
#include <drm/drmP.h>
#include <drm/i915_drm.h>
31 32 33 34 35 36 37 38 39 40 41 42 43 44
#include "i915_drv.h"

/*
 * The BIOS typically reserves some of the system's memory for the exclusive
 * use of the integrated graphics. This memory is no longer available for
 * use by the OS and so the user finds that his system has less memory
 * available than he put in. We refer to this memory as stolen.
 *
 * The BIOS will allocate its framebuffer from the stolen memory. Our
 * goal is try to reuse that object for our own fbcon which must always
 * be available for panics. Anything else we can reuse the stolen memory
 * for is a boon.
 */

45
static unsigned long i915_stolen_to_physical(struct drm_device *dev)
46 47
{
	struct drm_i915_private *dev_priv = dev->dev_private;
48
	struct resource *r;
49 50
	u32 base;

51 52 53 54 55 56 57 58
	/* Almost universally we can find the Graphics Base of Stolen Memory
	 * at offset 0x5c in the igfx configuration space. On a few (desktop)
	 * machines this is also mirrored in the bridge device at different
	 * locations, or in the MCHBAR. On gen2, the layout is again slightly
	 * different with the Graphics Segment immediately following Top of
	 * Memory (or Top of Usable DRAM). Note it appears that TOUD is only
	 * reported by 865g, so we just use the top of memory as determined
	 * by the e820 probe.
59
	 *
60
	 * XXX However gen2 requires an unavailable symbol.
61
	 */
62
	base = 0;
63 64
	if (INTEL_INFO(dev)->gen >= 3) {
		/* Read Graphics Base of Stolen Memory directly */
65 66
		pci_read_config_dword(dev->pdev, 0x5c, &base);
		base &= ~((1<<20) - 1);
67
	} else { /* GEN2 */
68 69 70
#if 0
		/* Stolen is immediately above Top of Memory */
		base = max_low_pfn_mapped << PAGE_SHIFT;
71
#endif
72
	}
73

74 75 76 77 78 79 80 81 82 83 84
	if (base == 0)
		return 0;

	/* Verify that nothing else uses this physical address. Stolen
	 * memory should be reserved by the BIOS and hidden from the
	 * kernel. So if the region is already marked as busy, something
	 * is seriously wrong.
	 */
	r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
				    "Graphics Stolen Memory");
	if (r == NULL) {
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100
		/*
		 * One more attempt but this time requesting region from
		 * base + 1, as we have seen that this resolves the region
		 * conflict with the PCI Bus.
		 * This is a BIOS w/a: Some BIOS wrap stolen in the root
		 * PCI bus, but have an off-by-one error. Hence retry the
		 * reservation starting from 1 instead of 0.
		 */
		r = devm_request_mem_region(dev->dev, base + 1,
					    dev_priv->gtt.stolen_size - 1,
					    "Graphics Stolen Memory");
		if (r == NULL) {
			DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
				  base, base + (uint32_t)dev_priv->gtt.stolen_size);
			base = 0;
		}
101 102
	}

103
	return base;
104 105
}

106
static int i915_setup_compression(struct drm_device *dev, int size)
107 108 109
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
110
	int ret;
111

112
	compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL);
113
	if (!compressed_fb)
114 115 116 117 118 119 120 121 122 123 124
		goto err_llb;

	/* Try to over-allocate to reduce reallocations and fragmentation */
	ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
				 size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
	if (ret)
		ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
					 size >>= 1, 4096,
					 DRM_MM_SEARCH_DEFAULT);
	if (ret)
		goto err_llb;
125

126 127 128 129 130
	if (HAS_PCH_SPLIT(dev))
		I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
	else if (IS_GM45(dev)) {
		I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
	} else {
131
		compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
132 133 134
		if (!compressed_llb)
			goto err_fb;

135 136 137 138 139
		ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb,
					 4096, 4096, DRM_MM_SEARCH_DEFAULT);
		if (ret)
			goto err_fb;

140
		dev_priv->fbc.compressed_llb = compressed_llb;
141 142 143 144 145

		I915_WRITE(FBC_CFB_BASE,
			   dev_priv->mm.stolen_base + compressed_fb->start);
		I915_WRITE(FBC_LL_BASE,
			   dev_priv->mm.stolen_base + compressed_llb->start);
146 147
	}

148 149
	dev_priv->fbc.compressed_fb = compressed_fb;
	dev_priv->fbc.size = size;
150

151 152
	DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
		      size);
153

154
	return 0;
155 156

err_fb:
157 158 159 160
	kfree(compressed_llb);
	drm_mm_remove_node(compressed_fb);
err_llb:
	kfree(compressed_fb);
161
	pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
162 163 164 165 166 167 168
	return -ENOSPC;
}

int i915_gem_stolen_setup_compression(struct drm_device *dev, int size)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

169
	if (!drm_mm_initialized(&dev_priv->mm.stolen))
170 171
		return -ENODEV;

172
	if (size < dev_priv->fbc.size)
173 174 175 176 177 178
		return 0;

	/* Release any current block */
	i915_gem_stolen_cleanup_compression(dev);

	return i915_setup_compression(dev, size);
179 180
}

181
void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
182 183 184
{
	struct drm_i915_private *dev_priv = dev->dev_private;

185
	if (dev_priv->fbc.size == 0)
186 187
		return;

188 189 190 191
	if (dev_priv->fbc.compressed_fb) {
		drm_mm_remove_node(dev_priv->fbc.compressed_fb);
		kfree(dev_priv->fbc.compressed_fb);
	}
192

193 194 195 196
	if (dev_priv->fbc.compressed_llb) {
		drm_mm_remove_node(dev_priv->fbc.compressed_llb);
		kfree(dev_priv->fbc.compressed_llb);
	}
197

198
	dev_priv->fbc.size = 0;
199 200 201 202
}

void i915_gem_cleanup_stolen(struct drm_device *dev)
{
203 204
	struct drm_i915_private *dev_priv = dev->dev_private;

205 206 207
	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		return;

208
	i915_gem_stolen_cleanup_compression(dev);
209
	drm_mm_takedown(&dev_priv->mm.stolen);
210 211 212 213 214
}

int i915_gem_init_stolen(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
215
	int bios_reserved = 0;
216

217
#ifdef CONFIG_INTEL_IOMMU
218
	if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
219 220 221 222 223
		DRM_INFO("DMAR active, disabling use of stolen memory\n");
		return 0;
	}
#endif

224 225 226
	if (dev_priv->gtt.stolen_size == 0)
		return 0;

227 228 229 230
	dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
	if (dev_priv->mm.stolen_base == 0)
		return 0;

231 232
	DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
		      dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
233

234 235 236
	if (IS_VALLEYVIEW(dev))
		bios_reserved = 1024*1024; /* top 1M on VLV/BYT */

237 238 239
	if (WARN_ON(bios_reserved > dev_priv->gtt.stolen_size))
		return 0;

240
	/* Basic memrange allocator for stolen space */
241 242
	drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size -
		    bios_reserved);
243 244 245

	return 0;
}
246 247 248 249 250 251 252 253 254 255

static struct sg_table *
i915_pages_create_for_stolen(struct drm_device *dev,
			     u32 offset, u32 size)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct sg_table *st;
	struct scatterlist *sg;

	DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
256
	BUG_ON(offset > dev_priv->gtt.stolen_size - size);
257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272

	/* We hide that we have no struct page backing our stolen object
	 * by wrapping the contiguous physical allocation with a fake
	 * dma mapping in a single scatterlist.
	 */

	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
		return NULL;

	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
		kfree(st);
		return NULL;
	}

	sg = st->sgl;
273
	sg->offset = 0;
274
	sg->length = size;
275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305

	sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
	sg_dma_len(sg) = size;

	return st;
}

static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
{
	BUG();
	return -EINVAL;
}

static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
{
	/* Should only be called during free */
	sg_free_table(obj->pages);
	kfree(obj->pages);
}

static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
	.get_pages = i915_gem_object_get_pages_stolen,
	.put_pages = i915_gem_object_put_pages_stolen,
};

static struct drm_i915_gem_object *
_i915_gem_object_create_stolen(struct drm_device *dev,
			       struct drm_mm_node *stolen)
{
	struct drm_i915_gem_object *obj;

306
	obj = i915_gem_object_alloc(dev);
307 308 309
	if (obj == NULL)
		return NULL;

310
	drm_gem_private_object_init(dev, &obj->base, stolen->size);
311 312 313 314 315 316 317 318
	i915_gem_object_init(obj, &i915_gem_object_stolen_ops);

	obj->pages = i915_pages_create_for_stolen(dev,
						  stolen->start, stolen->size);
	if (obj->pages == NULL)
		goto cleanup;

	obj->has_dma_mapping = true;
319
	i915_gem_object_pin_pages(obj);
320 321
	obj->stolen = stolen;

322 323
	obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
	obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
324 325 326 327

	return obj;

cleanup:
328
	i915_gem_object_free(obj);
329 330 331 332 333 334 335 336 337
	return NULL;
}

struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_gem_object *obj;
	struct drm_mm_node *stolen;
338
	int ret;
339

340
	if (!drm_mm_initialized(&dev_priv->mm.stolen))
341 342 343 344 345 346
		return NULL;

	DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
	if (size == 0)
		return NULL;

347 348
	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
	if (!stolen)
349 350
		return NULL;

351 352 353 354 355 356 357
	ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
				 4096, DRM_MM_SEARCH_DEFAULT);
	if (ret) {
		kfree(stolen);
		return NULL;
	}

358 359 360 361
	obj = _i915_gem_object_create_stolen(dev, stolen);
	if (obj)
		return obj;

362 363
	drm_mm_remove_node(stolen);
	kfree(stolen);
364 365 366
	return NULL;
}

367 368 369 370 371 372 373
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
					       u32 stolen_offset,
					       u32 gtt_offset,
					       u32 size)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
374
	struct i915_address_space *ggtt = &dev_priv->gtt.base;
375 376
	struct drm_i915_gem_object *obj;
	struct drm_mm_node *stolen;
B
Ben Widawsky 已提交
377
	struct i915_vma *vma;
378
	int ret;
379

380
	if (!drm_mm_initialized(&dev_priv->mm.stolen))
381 382 383 384 385 386 387 388 389 390 391 392
		return NULL;

	DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
			stolen_offset, gtt_offset, size);

	/* KISS and expect everything to be page-aligned */
	BUG_ON(stolen_offset & 4095);
	BUG_ON(size & 4095);

	if (WARN_ON(size == 0))
		return NULL;

393 394 395 396
	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
	if (!stolen)
		return NULL;

397 398 399
	stolen->start = stolen_offset;
	stolen->size = size;
	ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
400
	if (ret) {
401
		DRM_DEBUG_KMS("failed to allocate stolen space\n");
402
		kfree(stolen);
403 404 405 406 407 408
		return NULL;
	}

	obj = _i915_gem_object_create_stolen(dev, stolen);
	if (obj == NULL) {
		DRM_DEBUG_KMS("failed to allocate stolen object\n");
409 410
		drm_mm_remove_node(stolen);
		kfree(stolen);
411 412 413
		return NULL;
	}

414
	/* Some objects just need physical mem from stolen space */
415
	if (gtt_offset == I915_GTT_OFFSET_NONE)
416 417
		return obj;

418
	vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
419 420
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
B
Ben Widawsky 已提交
421 422 423
		goto err_out;
	}

424 425 426 427 428
	/* To simplify the initialisation sequence between KMS and GTT,
	 * we allow construction of the stolen object prior to
	 * setting up the GTT space. The actual reservation will occur
	 * later.
	 */
B
Ben Widawsky 已提交
429 430
	vma->node.start = gtt_offset;
	vma->node.size = size;
431 432
	if (drm_mm_initialized(&ggtt->mm)) {
		ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
433
		if (ret) {
434
			DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
435
			goto err_vma;
436
		}
B
Ben Widawsky 已提交
437
	}
438 439 440

	obj->has_global_gtt_mapping = 1;

441
	list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
B
Ben Widawsky 已提交
442
	list_add_tail(&vma->mm_list, &ggtt->inactive_list);
443
	i915_gem_object_pin_pages(obj);
444 445

	return obj;
446

447 448
err_vma:
	i915_gem_vma_destroy(vma);
449
err_out:
450 451
	drm_mm_remove_node(stolen);
	kfree(stolen);
452 453
	drm_gem_object_unreference(&obj->base);
	return NULL;
454 455
}

456 457 458 459
void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
	if (obj->stolen) {
460 461
		drm_mm_remove_node(obj->stolen);
		kfree(obj->stolen);
462 463 464
		obj->stolen = NULL;
	}
}