i915_gem_stolen.c 18.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright © 2008-2012 Intel Corporation
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice (including the next
 * paragraph) shall be included in all copies or substantial portions of the
 * Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
 * IN THE SOFTWARE.
 *
 * Authors:
 *    Eric Anholt <eric@anholt.net>
 *    Chris Wilson <chris@chris-wilson.co.uk>
 *
 */

29 30
#include <drm/drmP.h>
#include <drm/i915_drm.h>
31 32 33 34 35 36 37 38 39 40 41 42 43 44
#include "i915_drv.h"

/*
 * The BIOS typically reserves some of the system's memory for the exclusive
 * use of the integrated graphics. This memory is no longer available for
 * use by the OS and so the user finds that his system has less memory
 * available than he put in. We refer to this memory as stolen.
 *
 * The BIOS will allocate its framebuffer from the stolen memory. Our
 * goal is try to reuse that object for our own fbcon which must always
 * be available for panics. Anything else we can reuse the stolen memory
 * for is a boon.
 */

45 46 47
int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
					 struct drm_mm_node *node, u64 size,
					 unsigned alignment, u64 start, u64 end)
48
{
49 50
	int ret;

51 52 53
	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		return -ENODEV;

54
	mutex_lock(&dev_priv->mm.stolen_lock);
55 56 57
	ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node,
					  size, alignment, 0,
					  start, end, DRM_MM_INSERT_BEST);
58 59 60
	mutex_unlock(&dev_priv->mm.stolen_lock);

	return ret;
61 62
}

63 64 65 66 67
int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
				struct drm_mm_node *node, u64 size,
				unsigned alignment)
{
	return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
68
						    alignment, 0, U64_MAX);
69 70
}

71 72 73
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
				 struct drm_mm_node *node)
{
74
	mutex_lock(&dev_priv->mm.stolen_lock);
75
	drm_mm_remove_node(node);
76
	mutex_unlock(&dev_priv->mm.stolen_lock);
77 78
}

79
static dma_addr_t i915_stolen_to_dma(struct drm_i915_private *dev_priv)
80
{
81
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
82
	dma_addr_t base = intel_graphics_stolen_res.start;
83
	struct resource *r;
84

85
	GEM_BUG_ON(overflows_type(intel_graphics_stolen_res.start, base));
86

87
	if (base == 0 || add_overflows(base, ggtt->stolen_size))
88 89
		return 0;

90
	/* make sure we don't clobber the GTT if it's within stolen memory */
91 92
	if (INTEL_GEN(dev_priv) <= 4 &&
	    !IS_G33(dev_priv) && !IS_PINEVIEW(dev_priv) && !IS_G4X(dev_priv)) {
93
		struct {
94
			dma_addr_t start, end;
95
		} stolen[2] = {
96 97
			{ .start = base, .end = base + ggtt->stolen_size, },
			{ .start = base, .end = base + ggtt->stolen_size, },
98
		};
99
		u64 ggtt_start, ggtt_end;
100

101
		ggtt_start = I915_READ(PGTBL_CTL);
102
		if (IS_GEN4(dev_priv))
103 104
			ggtt_start = (ggtt_start & PGTBL_ADDRESS_LO_MASK) |
				     (ggtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
105
		else
106 107
			ggtt_start &= PGTBL_ADDRESS_LO_MASK;
		ggtt_end = ggtt_start + ggtt_total_entries(ggtt) * 4;
108

109 110 111 112
		if (ggtt_start >= stolen[0].start && ggtt_start < stolen[0].end)
			stolen[0].end = ggtt_start;
		if (ggtt_end > stolen[1].start && ggtt_end <= stolen[1].end)
			stolen[1].start = ggtt_end;
113 114 115 116 117

		/* pick the larger of the two chunks */
		if (stolen[0].end - stolen[0].start >
		    stolen[1].end - stolen[1].start) {
			base = stolen[0].start;
118
			ggtt->stolen_size = stolen[0].end - stolen[0].start;
119 120
		} else {
			base = stolen[1].start;
121
			ggtt->stolen_size = stolen[1].end - stolen[1].start;
122 123 124 125
		}

		if (stolen[0].start != stolen[1].start ||
		    stolen[0].end != stolen[1].end) {
126
			dma_addr_t end = base + ggtt->stolen_size - 1;
127

128
			DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
129 130
				      (unsigned long long)ggtt_start,
				      (unsigned long long)ggtt_end - 1);
131
			DRM_DEBUG_KMS("Stolen memory adjusted to %pad - %pad\n",
132
				      &base, &end);
133 134 135 136
		}
	}


137 138 139 140 141
	/* Verify that nothing else uses this physical address. Stolen
	 * memory should be reserved by the BIOS and hidden from the
	 * kernel. So if the region is already marked as busy, something
	 * is seriously wrong.
	 */
142
	r = devm_request_mem_region(dev_priv->drm.dev, base, ggtt->stolen_size,
143 144
				    "Graphics Stolen Memory");
	if (r == NULL) {
145 146 147 148 149 150 151
		/*
		 * One more attempt but this time requesting region from
		 * base + 1, as we have seen that this resolves the region
		 * conflict with the PCI Bus.
		 * This is a BIOS w/a: Some BIOS wrap stolen in the root
		 * PCI bus, but have an off-by-one error. Hence retry the
		 * reservation starting from 1 instead of 0.
D
Daniel Vetter 已提交
152
		 * There's also BIOS with off-by-one on the other end.
153
		 */
154
		r = devm_request_mem_region(dev_priv->drm.dev, base + 1,
D
Daniel Vetter 已提交
155
					    ggtt->stolen_size - 2,
156
					    "Graphics Stolen Memory");
157 158 159 160
		/*
		 * GEN3 firmware likes to smash pci bridges into the stolen
		 * range. Apparently this works.
		 */
161
		if (r == NULL && !IS_GEN3(dev_priv)) {
162
			dma_addr_t end = base + ggtt->stolen_size;
163

164
			DRM_ERROR("conflict detected with stolen region: [%pad - %pad]\n",
165
				  &base, &end);
166 167
			base = 0;
		}
168 169
	}

170
	return base;
171 172 173 174
}

void i915_gem_cleanup_stolen(struct drm_device *dev)
{
175
	struct drm_i915_private *dev_priv = to_i915(dev);
176

177 178 179
	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		return;

180
	drm_mm_takedown(&dev_priv->mm.stolen);
181 182
}

183
static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
184
				    dma_addr_t *base, u32 *size)
185
{
186
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
187 188 189
	uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
				     CTG_STOLEN_RESERVED :
				     ELK_STOLEN_RESERVED);
190
	dma_addr_t stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
191

192 193 194 195 196 197
	if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0) {
		*base = 0;
		*size = 0;
		return;
	}

198 199 200 201 202 203
	/*
	 * Whether ILK really reuses the ELK register for this is unclear.
	 * Let's see if we catch anyone with this supposedly enabled on ILK.
	 */
	WARN(IS_GEN5(dev_priv), "ILK stolen reserved found? 0x%08x\n", reg_val);

204 205 206 207 208 209 210 211 212 213 214 215 216 217
	*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;

	WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);

	/* On these platforms, the register doesn't have a size field, so the
	 * size is the distance between the base and the top of the stolen
	 * memory. We also have the genuine case where base is zero and there's
	 * nothing reserved. */
	if (*base == 0)
		*size = 0;
	else
		*size = stolen_top - *base;
}

218
static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
219
				     dma_addr_t *base, u32 *size)
220 221 222
{
	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);

223 224 225 226 227 228
	if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
		*base = 0;
		*size = 0;
		return;
	}

229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250
	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;

	switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
	case GEN6_STOLEN_RESERVED_1M:
		*size = 1024 * 1024;
		break;
	case GEN6_STOLEN_RESERVED_512K:
		*size = 512 * 1024;
		break;
	case GEN6_STOLEN_RESERVED_256K:
		*size = 256 * 1024;
		break;
	case GEN6_STOLEN_RESERVED_128K:
		*size = 128 * 1024;
		break;
	default:
		*size = 1024 * 1024;
		MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
	}
}

static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
251
				     dma_addr_t *base, u32 *size)
252 253 254
{
	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);

255 256 257 258 259 260
	if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
		*base = 0;
		*size = 0;
		return;
	}

261 262 263 264 265 266 267 268 269 270 271 272 273 274 275
	*base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;

	switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
	case GEN7_STOLEN_RESERVED_1M:
		*size = 1024 * 1024;
		break;
	case GEN7_STOLEN_RESERVED_256K:
		*size = 256 * 1024;
		break;
	default:
		*size = 1024 * 1024;
		MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
	}
}

276
static void chv_get_stolen_reserved(struct drm_i915_private *dev_priv,
277
				    dma_addr_t *base, u32 *size)
278 279 280
{
	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);

281 282 283 284 285 286
	if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
		*base = 0;
		*size = 0;
		return;
	}

287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;

	switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
	case GEN8_STOLEN_RESERVED_1M:
		*size = 1024 * 1024;
		break;
	case GEN8_STOLEN_RESERVED_2M:
		*size = 2 * 1024 * 1024;
		break;
	case GEN8_STOLEN_RESERVED_4M:
		*size = 4 * 1024 * 1024;
		break;
	case GEN8_STOLEN_RESERVED_8M:
		*size = 8 * 1024 * 1024;
		break;
	default:
		*size = 8 * 1024 * 1024;
		MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
	}
}

static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
309
				    dma_addr_t *base, u32 *size)
310
{
311
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
312
	uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
313
	dma_addr_t stolen_top;
314

315 316 317 318 319 320
	if ((reg_val & GEN6_STOLEN_RESERVED_ENABLE) == 0) {
		*base = 0;
		*size = 0;
		return;
	}

321
	stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
322 323 324 325 326 327 328 329 330 331 332 333 334

	*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;

	/* On these platforms, the register doesn't have a size field, so the
	 * size is the distance between the base and the top of the stolen
	 * memory. We also have the genuine case where base is zero and there's
	 * nothing reserved. */
	if (*base == 0)
		*size = 0;
	else
		*size = stolen_top - *base;
}

335
int i915_gem_init_stolen(struct drm_i915_private *dev_priv)
336
{
337
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
338
	dma_addr_t reserved_base, stolen_top;
339 340
	u32 reserved_total, reserved_size;
	u32 stolen_usable_start;
341

342 343
	mutex_init(&dev_priv->mm.stolen_lock);

344 345 346 347 348
	if (intel_vgpu_active(dev_priv)) {
		DRM_INFO("iGVT-g active, disabling use of stolen memory\n");
		return 0;
	}

349
	if (intel_vtd_active() && INTEL_GEN(dev_priv) < 8) {
350 351 352 353
		DRM_INFO("DMAR active, disabling use of stolen memory\n");
		return 0;
	}

354
	if (ggtt->stolen_size == 0)
355 356
		return 0;

357
	dev_priv->mm.stolen_base = i915_stolen_to_dma(dev_priv);
358 359 360
	if (dev_priv->mm.stolen_base == 0)
		return 0;

361
	stolen_top = dev_priv->mm.stolen_base + ggtt->stolen_size;
362 363
	reserved_base = 0;
	reserved_size = 0;
364 365 366 367

	switch (INTEL_INFO(dev_priv)->gen) {
	case 2:
	case 3:
368
		break;
369
	case 4:
370 371 372
		if (!IS_G4X(dev_priv))
			break;
		/* fall through */
373
	case 5:
374 375
		g4x_get_stolen_reserved(dev_priv,
					&reserved_base, &reserved_size);
376 377
		break;
	case 6:
378 379
		gen6_get_stolen_reserved(dev_priv,
					 &reserved_base, &reserved_size);
380 381
		break;
	case 7:
382 383
		gen7_get_stolen_reserved(dev_priv,
					 &reserved_base, &reserved_size);
384 385
		break;
	default:
386
		if (IS_LP(dev_priv))
387 388
			chv_get_stolen_reserved(dev_priv,
						&reserved_base, &reserved_size);
389
		else
390 391
			bdw_get_stolen_reserved(dev_priv,
						&reserved_base, &reserved_size);
392 393 394 395 396 397 398 399
		break;
	}

	/* It is possible for the reserved base to be zero, but the register
	 * field for size doesn't have a zero option. */
	if (reserved_base == 0) {
		reserved_size = 0;
		reserved_base = stolen_top;
400
	}
401

402 403
	if (reserved_base < dev_priv->mm.stolen_base ||
	    reserved_base + reserved_size > stolen_top) {
404
		dma_addr_t reserved_top = reserved_base + reserved_size;
405 406 407
		DRM_ERROR("Stolen reserved area [%pad - %pad] outside stolen memory [%pad - %pad]\n",
			  &reserved_base, &reserved_top,
			  &dev_priv->mm.stolen_base, &stolen_top);
408
		return 0;
409 410
	}

411 412
	ggtt->stolen_reserved_base = reserved_base;
	ggtt->stolen_reserved_size = reserved_size;
413

414 415 416 417
	/* It is possible for the reserved area to end before the end of stolen
	 * memory, so just consider the start. */
	reserved_total = stolen_top - reserved_base;

418
	DRM_DEBUG_KMS("Memory reserved for graphics device: %uK, usable: %uK\n",
419 420
		      ggtt->stolen_size >> 10,
		      (ggtt->stolen_size - reserved_total) >> 10);
421

422 423 424 425
	stolen_usable_start = 0;
	/* WaSkipStolenMemoryFirstPage:bdw+ */
	if (INTEL_GEN(dev_priv) >= 8)
		stolen_usable_start = 4096;
426

427 428
	ggtt->stolen_usable_size =
		ggtt->stolen_size - reserved_total - stolen_usable_start;
429 430 431 432

	/* Basic memrange allocator for stolen space. */
	drm_mm_init(&dev_priv->mm.stolen, stolen_usable_start,
		    ggtt->stolen_usable_size);
433 434 435

	return 0;
}
436 437 438 439 440

static struct sg_table *
i915_pages_create_for_stolen(struct drm_device *dev,
			     u32 offset, u32 size)
{
441
	struct drm_i915_private *dev_priv = to_i915(dev);
442 443 444
	struct sg_table *st;
	struct scatterlist *sg;

C
Chris Wilson 已提交
445
	GEM_BUG_ON(range_overflows(offset, size, dev_priv->ggtt.stolen_size));
446 447 448 449 450 451 452 453

	/* We hide that we have no struct page backing our stolen object
	 * by wrapping the contiguous physical allocation with a fake
	 * dma mapping in a single scatterlist.
	 */

	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (st == NULL)
454
		return ERR_PTR(-ENOMEM);
455 456 457

	if (sg_alloc_table(st, 1, GFP_KERNEL)) {
		kfree(st);
458
		return ERR_PTR(-ENOMEM);
459 460 461
	}

	sg = st->sgl;
462
	sg->offset = 0;
463
	sg->length = size;
464 465 466 467 468 469 470

	sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
	sg_dma_len(sg) = size;

	return st;
}

471
static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
472
{
473 474 475 476 477 478 479
	struct sg_table *pages =
		i915_pages_create_for_stolen(obj->base.dev,
					     obj->stolen->start,
					     obj->stolen->size);
	if (IS_ERR(pages))
		return PTR_ERR(pages);

480
	__i915_gem_object_set_pages(obj, pages, obj->stolen->size);
481 482

	return 0;
483 484
}

485 486
static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj,
					     struct sg_table *pages)
487
{
488
	/* Should only be called from i915_gem_object_release_stolen() */
489 490
	sg_free_table(pages);
	kfree(pages);
491 492
}

493 494 495
static void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
496
	struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
497 498 499
	struct drm_mm_node *stolen = fetch_and_zero(&obj->stolen);

	GEM_BUG_ON(!stolen);
500

C
Chris Wilson 已提交
501 502
	__i915_gem_object_unpin_pages(obj);

503 504
	i915_gem_stolen_remove_node(dev_priv, stolen);
	kfree(stolen);
505
}
506

507 508 509
static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
	.get_pages = i915_gem_object_get_pages_stolen,
	.put_pages = i915_gem_object_put_pages_stolen,
510
	.release = i915_gem_object_release_stolen,
511 512 513
};

static struct drm_i915_gem_object *
514
_i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
515 516 517
			       struct drm_mm_node *stolen)
{
	struct drm_i915_gem_object *obj;
518
	unsigned int cache_level;
519

520
	obj = i915_gem_object_alloc(dev_priv);
521 522 523
	if (obj == NULL)
		return NULL;

524
	drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size);
525 526 527
	i915_gem_object_init(obj, &i915_gem_object_stolen_ops);

	obj->stolen = stolen;
528
	obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
529 530
	cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
	i915_gem_object_set_cache_coherency(obj, cache_level);
531

532 533 534
	if (i915_gem_object_pin_pages(obj))
		goto cleanup;

535 536 537
	return obj;

cleanup:
538
	i915_gem_object_free(obj);
539 540 541 542
	return NULL;
}

struct drm_i915_gem_object *
543
i915_gem_object_create_stolen(struct drm_i915_private *dev_priv, u32 size)
544 545 546
{
	struct drm_i915_gem_object *obj;
	struct drm_mm_node *stolen;
547
	int ret;
548

549
	if (!drm_mm_initialized(&dev_priv->mm.stolen))
550 551 552 553 554
		return NULL;

	if (size == 0)
		return NULL;

555 556
	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
	if (!stolen)
557 558
		return NULL;

559
	ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
560 561 562 563 564
	if (ret) {
		kfree(stolen);
		return NULL;
	}

565
	obj = _i915_gem_object_create_stolen(dev_priv, stolen);
566 567 568
	if (obj)
		return obj;

569
	i915_gem_stolen_remove_node(dev_priv, stolen);
570
	kfree(stolen);
571 572 573
	return NULL;
}

574
struct drm_i915_gem_object *
575
i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
576 577 578 579
					       u32 stolen_offset,
					       u32 gtt_offset,
					       u32 size)
{
580
	struct i915_ggtt *ggtt = &dev_priv->ggtt;
581 582
	struct drm_i915_gem_object *obj;
	struct drm_mm_node *stolen;
B
Ben Widawsky 已提交
583
	struct i915_vma *vma;
584
	int ret;
585

586
	if (!drm_mm_initialized(&dev_priv->mm.stolen))
587 588
		return NULL;

589
	lockdep_assert_held(&dev_priv->drm.struct_mutex);
590

591 592 593 594
	DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
			stolen_offset, gtt_offset, size);

	/* KISS and expect everything to be page-aligned */
595 596 597
	if (WARN_ON(size == 0) ||
	    WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
	    WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
598 599
		return NULL;

600 601 602 603
	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
	if (!stolen)
		return NULL;

604 605
	stolen->start = stolen_offset;
	stolen->size = size;
606
	mutex_lock(&dev_priv->mm.stolen_lock);
607
	ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
608
	mutex_unlock(&dev_priv->mm.stolen_lock);
609
	if (ret) {
610
		DRM_DEBUG_KMS("failed to allocate stolen space\n");
611
		kfree(stolen);
612 613 614
		return NULL;
	}

615
	obj = _i915_gem_object_create_stolen(dev_priv, stolen);
616 617
	if (obj == NULL) {
		DRM_DEBUG_KMS("failed to allocate stolen object\n");
618
		i915_gem_stolen_remove_node(dev_priv, stolen);
619
		kfree(stolen);
620 621 622
		return NULL;
	}

623
	/* Some objects just need physical mem from stolen space */
624
	if (gtt_offset == I915_GTT_OFFSET_NONE)
625 626
		return obj;

627 628 629 630
	ret = i915_gem_object_pin_pages(obj);
	if (ret)
		goto err;

631
	vma = i915_vma_instance(obj, &ggtt->base, NULL);
632 633
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
634
		goto err_pages;
B
Ben Widawsky 已提交
635 636
	}

637 638 639 640 641
	/* To simplify the initialisation sequence between KMS and GTT,
	 * we allow construction of the stolen object prior to
	 * setting up the GTT space. The actual reservation will occur
	 * later.
	 */
642 643 644
	ret = i915_gem_gtt_reserve(&ggtt->base, &vma->node,
				   size, gtt_offset, obj->cache_level,
				   0);
645 646
	if (ret) {
		DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
647
		goto err_pages;
648
	}
649

650 651
	GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));

C
Chris Wilson 已提交
652
	vma->pages = obj->mm.pages;
653
	vma->flags |= I915_VMA_GLOBAL_BIND;
654
	__i915_vma_set_map_and_fenceable(vma);
655
	list_move_tail(&vma->vm_link, &ggtt->base.inactive_list);
656 657 658

	spin_lock(&dev_priv->mm.obj_lock);
	list_move_tail(&obj->mm.link, &dev_priv->mm.bound_list);
659
	obj->bind_count++;
660
	spin_unlock(&dev_priv->mm.obj_lock);
661

662
	return obj;
663

664 665
err_pages:
	i915_gem_object_unpin_pages(obj);
666
err:
667
	i915_gem_object_put(obj);
668
	return NULL;
669
}