radeon_object.c 15.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Copyright 2009 Jerome Glisse.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 */
/*
 * Authors:
 *    Jerome Glisse <glisse@freedesktop.org>
 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
 *    Dave Airlie
 */
#include <linux/list.h>
33
#include <linux/slab.h>
34
#include <drm/drmP.h>
35
#include <drm/radeon_drm.h>
36
#include "radeon.h"
37
#include "radeon_trace.h"
38 39 40 41


int radeon_ttm_init(struct radeon_device *rdev);
void radeon_ttm_fini(struct radeon_device *rdev);
42
static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
43 44 45 46 47 48

/*
 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
 * function are calling it.
 */

49 50 51 52 53 54
void radeon_bo_clear_va(struct radeon_bo *bo)
{
	struct radeon_bo_va *bo_va, *tmp;

	list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
		/* remove from all vm address space */
55
		radeon_vm_bo_rmv(bo->rdev, bo_va);
56 57 58
	}
}

59
static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
60
{
61
	struct radeon_bo *bo;
62

63 64 65 66 67
	bo = container_of(tbo, struct radeon_bo, tbo);
	mutex_lock(&bo->rdev->gem.mutex);
	list_del_init(&bo->list);
	mutex_unlock(&bo->rdev->gem.mutex);
	radeon_bo_clear_surface_reg(bo);
68
	radeon_bo_clear_va(bo);
69
	drm_gem_object_release(&bo->gem_base);
70
	kfree(bo);
71 72
}

73 74 75 76 77 78 79
bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
{
	if (bo->destroy == &radeon_ttm_bo_destroy)
		return true;
	return false;
}

80 81 82 83 84
void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
{
	u32 c = 0;

	rbo->placement.fpfn = 0;
85
	rbo->placement.lpfn = 0;
86
	rbo->placement.placement = rbo->placements;
87
	rbo->placement.busy_placement = rbo->placements;
88 89 90
	if (domain & RADEON_GEM_DOMAIN_VRAM)
		rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
					TTM_PL_FLAG_VRAM;
91 92 93 94 95 96 97 98 99
	if (domain & RADEON_GEM_DOMAIN_GTT) {
		if (rbo->rdev->flags & RADEON_IS_AGP) {
			rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
		} else {
			rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
		}
	}
	if (domain & RADEON_GEM_DOMAIN_CPU) {
		if (rbo->rdev->flags & RADEON_IS_AGP) {
100
			rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM;
101
		} else {
102
			rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
103 104
		}
	}
105 106
	if (!c)
		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
107 108 109 110
	rbo->placement.num_placement = c;
	rbo->placement.num_busy_placement = c;
}

111
int radeon_bo_create(struct radeon_device *rdev,
112
		     unsigned long size, int byte_align, bool kernel, u32 domain,
113
		     struct sg_table *sg, struct radeon_bo **bo_ptr)
114
{
115
	struct radeon_bo *bo;
116
	enum ttm_bo_type type;
117
	unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
118
	size_t acc_size;
119 120
	int r;

121 122
	size = ALIGN(size, PAGE_SIZE);

123
	rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
124 125
	if (kernel) {
		type = ttm_bo_type_kernel;
126 127
	} else if (sg) {
		type = ttm_bo_type_sg;
128 129 130
	} else {
		type = ttm_bo_type_device;
	}
131
	*bo_ptr = NULL;
132

133 134 135
	acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
				       sizeof(struct radeon_bo));

136 137
	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
	if (bo == NULL)
138
		return -ENOMEM;
139 140 141 142 143
	r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
	if (unlikely(r)) {
		kfree(bo);
		return r;
	}
144
	bo->rdev = rdev;
145
	bo->gem_base.driver_private = NULL;
146 147
	bo->surface_reg = -1;
	INIT_LIST_HEAD(&bo->list);
148
	INIT_LIST_HEAD(&bo->va);
149
	radeon_ttm_placement_from_domain(bo, domain);
150
	/* Kernel allocation are uninterruptible */
151
	down_read(&rdev->pm.mclk_lock);
152
	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
153
			&bo->placement, page_align, !kernel, NULL,
154
			acc_size, sg, &radeon_ttm_bo_destroy);
155
	up_read(&rdev->pm.mclk_lock);
156 157 158
	if (unlikely(r != 0)) {
		return r;
	}
159
	*bo_ptr = bo;
160

161
	trace_radeon_bo_create(bo);
162

163 164 165
	return 0;
}

166
int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
167
{
168
	bool is_iomem;
169 170
	int r;

171
	if (bo->kptr) {
172
		if (ptr) {
173
			*ptr = bo->kptr;
174 175 176
		}
		return 0;
	}
177
	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
178 179 180
	if (r) {
		return r;
	}
181
	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
182
	if (ptr) {
183
		*ptr = bo->kptr;
184
	}
185
	radeon_bo_check_tiling(bo, 0, 0);
186 187 188
	return 0;
}

189
void radeon_bo_kunmap(struct radeon_bo *bo)
190
{
191
	if (bo->kptr == NULL)
192
		return;
193 194 195
	bo->kptr = NULL;
	radeon_bo_check_tiling(bo, 0, 0);
	ttm_bo_kunmap(&bo->kmap);
196 197
}

198
void radeon_bo_unref(struct radeon_bo **bo)
199
{
200
	struct ttm_buffer_object *tbo;
201
	struct radeon_device *rdev;
202

203
	if ((*bo) == NULL)
204
		return;
205
	rdev = (*bo)->rdev;
206
	tbo = &((*bo)->tbo);
207
	down_read(&rdev->pm.mclk_lock);
208
	ttm_bo_unref(&tbo);
209
	up_read(&rdev->pm.mclk_lock);
210 211
	if (tbo == NULL)
		*bo = NULL;
212 213
}

214 215
int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
			     u64 *gpu_addr)
216
{
217
	int r, i;
218

219 220 221 222
	if (bo->pin_count) {
		bo->pin_count++;
		if (gpu_addr)
			*gpu_addr = radeon_bo_gpu_offset(bo);
223 224 225 226 227 228 229 230

		if (max_offset != 0) {
			u64 domain_start;

			if (domain == RADEON_GEM_DOMAIN_VRAM)
				domain_start = bo->rdev->mc.vram_start;
			else
				domain_start = bo->rdev->mc.gtt_start;
231 232
			WARN_ON_ONCE(max_offset <
				     (radeon_bo_gpu_offset(bo) - domain_start));
233 234
		}

235 236
		return 0;
	}
237
	radeon_ttm_placement_from_domain(bo, domain);
238 239 240 241
	if (domain == RADEON_GEM_DOMAIN_VRAM) {
		/* force to pin into visible video ram */
		bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
	}
242 243 244 245 246 247 248 249 250
	if (max_offset) {
		u64 lpfn = max_offset >> PAGE_SHIFT;

		if (!bo->placement.lpfn)
			bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT;

		if (lpfn < bo->placement.lpfn)
			bo->placement.lpfn = lpfn;
	}
251 252
	for (i = 0; i < bo->placement.num_placement; i++)
		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
253
	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
254 255 256 257
	if (likely(r == 0)) {
		bo->pin_count = 1;
		if (gpu_addr != NULL)
			*gpu_addr = radeon_bo_gpu_offset(bo);
258
	}
259
	if (unlikely(r != 0))
260
		dev_err(bo->rdev->dev, "%p pin failed\n", bo);
261 262
	return r;
}
263 264 265 266 267

int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
{
	return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
}
268

269
int radeon_bo_unpin(struct radeon_bo *bo)
270
{
271
	int r, i;
272

273 274 275
	if (!bo->pin_count) {
		dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
		return 0;
276
	}
277 278 279
	bo->pin_count--;
	if (bo->pin_count)
		return 0;
280 281
	for (i = 0; i < bo->placement.num_placement; i++)
		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
282
	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
283
	if (unlikely(r != 0))
284
		dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
285
	return r;
286 287
}

288
int radeon_bo_evict_vram(struct radeon_device *rdev)
289
{
290 291
	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
	if (0 && (rdev->flags & RADEON_IS_IGP)) {
292 293 294
		if (rdev->mc.igp_sideport_enabled == false)
			/* Useless to evict on IGP chips */
			return 0;
295 296 297 298
	}
	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
}

299
void radeon_bo_force_delete(struct radeon_device *rdev)
300
{
301
	struct radeon_bo *bo, *n;
302 303 304 305

	if (list_empty(&rdev->gem.objects)) {
		return;
	}
306 307
	dev_err(rdev->dev, "Userspace still has active objects !\n");
	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
308
		mutex_lock(&rdev->ddev->struct_mutex);
309
		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
310 311
			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
			*((unsigned long *)&bo->gem_base.refcount));
312 313 314
		mutex_lock(&bo->rdev->gem.mutex);
		list_del_init(&bo->list);
		mutex_unlock(&bo->rdev->gem.mutex);
315
		/* this should unref the ttm bo */
316
		drm_gem_object_unreference(&bo->gem_base);
317 318 319 320
		mutex_unlock(&rdev->ddev->struct_mutex);
	}
}

321
int radeon_bo_init(struct radeon_device *rdev)
322
{
323
	/* Add an MTRR for the VRAM */
324
	if (!rdev->fastfb_working) {
325 326
		rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
						      rdev->mc.aper_size);
327
	}
328 329 330 331 332
	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
		rdev->mc.mc_vram_size >> 20,
		(unsigned long long)rdev->mc.aper_size >> 20);
	DRM_INFO("RAM width %dbits %cDR\n",
			rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
333 334 335
	return radeon_ttm_init(rdev);
}

336
void radeon_bo_fini(struct radeon_device *rdev)
337 338
{
	radeon_ttm_fini(rdev);
339
	arch_phys_wc_del(rdev->mc.vram_mtrr);
340 341
}

342 343
void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
				struct list_head *head)
344
{
345
	if (lobj->written) {
346
		list_add(&lobj->tv.head, head);
347
	} else {
348
		list_add_tail(&lobj->tv.head, head);
349 350 351
	}
}

352 353
int radeon_bo_list_validate(struct ww_acquire_ctx *ticket,
			    struct list_head *head, int ring)
354
{
355 356
	struct radeon_bo_list *lobj;
	struct radeon_bo *bo;
357
	u32 domain;
358 359
	int r;

360
	r = ttm_eu_reserve_buffers(ticket, head);
361 362 363
	if (unlikely(r != 0)) {
		return r;
	}
364
	list_for_each_entry(lobj, head, tv.head) {
365 366
		bo = lobj->bo;
		if (!bo->pin_count) {
367
			domain = lobj->domain;
368 369 370
			
		retry:
			radeon_ttm_placement_from_domain(bo, domain);
C
Christian König 已提交
371 372
			if (ring == R600_RING_TYPE_UVD_INDEX)
				radeon_uvd_force_into_uvd_segment(bo);
373
			r = ttm_bo_validate(&bo->tbo, &bo->placement,
374
						true, false);
375
			if (unlikely(r)) {
376 377
				if (r != -ERESTARTSYS && domain != lobj->alt_domain) {
					domain = lobj->alt_domain;
378 379
					goto retry;
				}
380
				return r;
381
			}
382
		}
383 384
		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
		lobj->tiling_flags = bo->tiling_flags;
385 386 387 388
	}
	return 0;
}

389
int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
390 391
			     struct vm_area_struct *vma)
{
392
	return ttm_fbdev_mmap(vma, &bo->tbo);
393 394
}

395
int radeon_bo_get_surface_reg(struct radeon_bo *bo)
396
{
397
	struct radeon_device *rdev = bo->rdev;
398
	struct radeon_surface_reg *reg;
399
	struct radeon_bo *old_object;
400 401 402
	int steal;
	int i;

403
	lockdep_assert_held(&bo->tbo.resv->lock.base);
404 405

	if (!bo->tiling_flags)
406 407
		return 0;

408 409 410
	if (bo->surface_reg >= 0) {
		reg = &rdev->surface_regs[bo->surface_reg];
		i = bo->surface_reg;
411 412 413 414 415 416 417
		goto out;
	}

	steal = -1;
	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {

		reg = &rdev->surface_regs[i];
418
		if (!reg->bo)
419 420
			break;

421
		old_object = reg->bo;
422 423 424 425 426 427 428 429 430 431
		if (old_object->pin_count == 0)
			steal = i;
	}

	/* if we are all out */
	if (i == RADEON_GEM_MAX_SURFACES) {
		if (steal == -1)
			return -ENOMEM;
		/* find someone with a surface reg and nuke their BO */
		reg = &rdev->surface_regs[steal];
432
		old_object = reg->bo;
433 434
		/* blow away the mapping */
		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
435
		ttm_bo_unmap_virtual(&old_object->tbo);
436 437 438 439
		old_object->surface_reg = -1;
		i = steal;
	}

440 441
	bo->surface_reg = i;
	reg->bo = bo;
442 443

out:
444
	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
445
			       bo->tbo.mem.start << PAGE_SHIFT,
446
			       bo->tbo.num_pages << PAGE_SHIFT);
447 448 449
	return 0;
}

450
static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
451
{
452
	struct radeon_device *rdev = bo->rdev;
453 454
	struct radeon_surface_reg *reg;

455
	if (bo->surface_reg == -1)
456 457
		return;

458 459
	reg = &rdev->surface_regs[bo->surface_reg];
	radeon_clear_surface_reg(rdev, bo->surface_reg);
460

461 462
	reg->bo = NULL;
	bo->surface_reg = -1;
463 464
}

465 466
int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
				uint32_t tiling_flags, uint32_t pitch)
467
{
468
	struct radeon_device *rdev = bo->rdev;
469 470
	int r;

471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515
	if (rdev->family >= CHIP_CEDAR) {
		unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;

		bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
		bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
		mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
		tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
		stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
		switch (bankw) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		switch (bankh) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		switch (mtaspect) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		if (tilesplit > 6) {
			return -EINVAL;
		}
		if (stilesplit > 6) {
			return -EINVAL;
		}
	}
516 517 518 519 520 521 522
	r = radeon_bo_reserve(bo, false);
	if (unlikely(r != 0))
		return r;
	bo->tiling_flags = tiling_flags;
	bo->pitch = pitch;
	radeon_bo_unreserve(bo);
	return 0;
523 524
}

525 526 527
void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
				uint32_t *tiling_flags,
				uint32_t *pitch)
528
{
529 530
	lockdep_assert_held(&bo->tbo.resv->lock.base);

531
	if (tiling_flags)
532
		*tiling_flags = bo->tiling_flags;
533
	if (pitch)
534
		*pitch = bo->pitch;
535 536
}

537 538
int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
				bool force_drop)
539
{
540 541
	if (!force_drop)
		lockdep_assert_held(&bo->tbo.resv->lock.base);
542 543

	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
544 545 546
		return 0;

	if (force_drop) {
547
		radeon_bo_clear_surface_reg(bo);
548 549 550
		return 0;
	}

551
	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
552 553 554
		if (!has_moved)
			return 0;

555 556
		if (bo->surface_reg >= 0)
			radeon_bo_clear_surface_reg(bo);
557 558 559
		return 0;
	}

560
	if ((bo->surface_reg >= 0) && !has_moved)
561 562
		return 0;

563
	return radeon_bo_get_surface_reg(bo);
564 565 566
}

void radeon_bo_move_notify(struct ttm_buffer_object *bo,
567
			   struct ttm_mem_reg *mem)
568
{
569 570 571 572
	struct radeon_bo *rbo;
	if (!radeon_ttm_bo_is_radeon_bo(bo))
		return;
	rbo = container_of(bo, struct radeon_bo, tbo);
573
	radeon_bo_check_tiling(rbo, 0, 1);
574
	radeon_vm_bo_invalidate(rbo->rdev, rbo);
575 576
}

577
int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
578
{
579
	struct radeon_device *rdev;
580
	struct radeon_bo *rbo;
581 582 583
	unsigned long offset, size;
	int r;

584
	if (!radeon_ttm_bo_is_radeon_bo(bo))
585
		return 0;
586
	rbo = container_of(bo, struct radeon_bo, tbo);
587
	radeon_bo_check_tiling(rbo, 0, 0);
588 589 590
	rdev = rbo->rdev;
	if (bo->mem.mem_type == TTM_PL_VRAM) {
		size = bo->mem.num_pages << PAGE_SHIFT;
591
		offset = bo->mem.start << PAGE_SHIFT;
592 593 594 595
		if ((offset + size) > rdev->mc.visible_vram_size) {
			/* hurrah the memory is not visible ! */
			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
			rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
596
			r = ttm_bo_validate(bo, &rbo->placement, false, false);
597 598
			if (unlikely(r != 0))
				return r;
599
			offset = bo->mem.start << PAGE_SHIFT;
600 601 602 603 604 605
			/* this should not happen */
			if ((offset + size) > rdev->mc.visible_vram_size)
				return -EINVAL;
		}
	}
	return 0;
606
}
607

608
int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
609 610 611 612 613 614 615 616 617 618
{
	int r;

	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
	if (unlikely(r != 0))
		return r;
	spin_lock(&bo->tbo.bdev->fence_lock);
	if (mem_type)
		*mem_type = bo->tbo.mem.mem_type;
	if (bo->tbo.sync_obj)
619
		r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
620 621 622 623
	spin_unlock(&bo->tbo.bdev->fence_lock);
	ttm_bo_unreserve(&bo->tbo);
	return r;
}