radeon_object.c 16.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Copyright 2009 Jerome Glisse.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 */
/*
 * Authors:
 *    Jerome Glisse <glisse@freedesktop.org>
 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
 *    Dave Airlie
 */
#include <linux/list.h>
33
#include <linux/slab.h>
34 35 36
#include <drm/drmP.h>
#include "radeon_drm.h"
#include "radeon.h"
37
#include "radeon_trace.h"
38 39 40 41


int radeon_ttm_init(struct radeon_device *rdev);
void radeon_ttm_fini(struct radeon_device *rdev);
42
static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
43 44 45 46 47 48

/*
 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
 * function are calling it.
 */

49 50 51 52 53 54 55 56 57 58 59 60 61 62
void radeon_bo_clear_va(struct radeon_bo *bo)
{
	struct radeon_bo_va *bo_va, *tmp;

	list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
		/* remove from all vm address space */
		mutex_lock(&bo_va->vm->mutex);
		list_del(&bo_va->vm_list);
		mutex_unlock(&bo_va->vm->mutex);
		list_del(&bo_va->bo_list);
		kfree(bo_va);
	}
}

63
static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
64
{
65
	struct radeon_bo *bo;
66

67 68 69 70 71
	bo = container_of(tbo, struct radeon_bo, tbo);
	mutex_lock(&bo->rdev->gem.mutex);
	list_del_init(&bo->list);
	mutex_unlock(&bo->rdev->gem.mutex);
	radeon_bo_clear_surface_reg(bo);
72
	radeon_bo_clear_va(bo);
73
	drm_gem_object_release(&bo->gem_base);
74
	kfree(bo);
75 76
}

77 78 79 80 81 82 83
bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
{
	if (bo->destroy == &radeon_ttm_bo_destroy)
		return true;
	return false;
}

84 85 86 87 88
void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
{
	u32 c = 0;

	rbo->placement.fpfn = 0;
89
	rbo->placement.lpfn = 0;
90 91 92 93 94 95 96 97 98
	rbo->placement.placement = rbo->placements;
	rbo->placement.busy_placement = rbo->placements;
	if (domain & RADEON_GEM_DOMAIN_VRAM)
		rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
					TTM_PL_FLAG_VRAM;
	if (domain & RADEON_GEM_DOMAIN_GTT)
		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
	if (domain & RADEON_GEM_DOMAIN_CPU)
		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
99 100
	if (!c)
		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
101 102 103 104
	rbo->placement.num_placement = c;
	rbo->placement.num_busy_placement = c;
}

105
int radeon_bo_create(struct radeon_device *rdev,
106 107
		     unsigned long size, int byte_align, bool kernel, u32 domain,
		     struct radeon_bo **bo_ptr)
108
{
109
	struct radeon_bo *bo;
110
	enum ttm_bo_type type;
111 112
	unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
	unsigned long max_size = 0;
113
	size_t acc_size;
114 115
	int r;

116 117
	size = ALIGN(size, PAGE_SIZE);

118 119 120 121 122 123 124 125
	if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
		rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
	}
	if (kernel) {
		type = ttm_bo_type_kernel;
	} else {
		type = ttm_bo_type_device;
	}
126
	*bo_ptr = NULL;
127

128 129 130 131 132 133 134 135
	/* maximun bo size is the minimun btw visible vram and gtt size */
	max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
	if ((page_align << PAGE_SHIFT) >= max_size) {
		printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n",
			__func__, __LINE__, page_align  >> (20 - PAGE_SHIFT), max_size >> 20);
		return -ENOMEM;
	}

136 137 138
	acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
				       sizeof(struct radeon_bo));

139
retry:
140 141
	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
	if (bo == NULL)
142
		return -ENOMEM;
143 144 145 146 147
	r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
	if (unlikely(r)) {
		kfree(bo);
		return r;
	}
148
	bo->rdev = rdev;
149
	bo->gem_base.driver_private = NULL;
150 151
	bo->surface_reg = -1;
	INIT_LIST_HEAD(&bo->list);
152
	INIT_LIST_HEAD(&bo->va);
153
	radeon_ttm_placement_from_domain(bo, domain);
154
	/* Kernel allocation are uninterruptible */
155
	mutex_lock(&rdev->vram_mutex);
156
	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
157 158
			&bo->placement, page_align, 0, !kernel, NULL,
			acc_size, &radeon_ttm_bo_destroy);
159
	mutex_unlock(&rdev->vram_mutex);
160
	if (unlikely(r != 0)) {
161 162 163 164 165
		if (r != -ERESTARTSYS) {
			if (domain == RADEON_GEM_DOMAIN_VRAM) {
				domain |= RADEON_GEM_DOMAIN_GTT;
				goto retry;
			}
166
			dev_err(rdev->dev,
167 168
				"object_init failed for (%lu, 0x%08X)\n",
				size, domain);
169
		}
170 171
		return r;
	}
172
	*bo_ptr = bo;
173

174
	trace_radeon_bo_create(bo);
175

176 177 178
	return 0;
}

179
int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
180
{
181
	bool is_iomem;
182 183
	int r;

184
	if (bo->kptr) {
185
		if (ptr) {
186
			*ptr = bo->kptr;
187 188 189
		}
		return 0;
	}
190
	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
191 192 193
	if (r) {
		return r;
	}
194
	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
195
	if (ptr) {
196
		*ptr = bo->kptr;
197
	}
198
	radeon_bo_check_tiling(bo, 0, 0);
199 200 201
	return 0;
}

202
void radeon_bo_kunmap(struct radeon_bo *bo)
203
{
204
	if (bo->kptr == NULL)
205
		return;
206 207 208
	bo->kptr = NULL;
	radeon_bo_check_tiling(bo, 0, 0);
	ttm_bo_kunmap(&bo->kmap);
209 210
}

211
void radeon_bo_unref(struct radeon_bo **bo)
212
{
213
	struct ttm_buffer_object *tbo;
214
	struct radeon_device *rdev;
215

216
	if ((*bo) == NULL)
217
		return;
218
	rdev = (*bo)->rdev;
219
	tbo = &((*bo)->tbo);
220
	mutex_lock(&rdev->vram_mutex);
221
	ttm_bo_unref(&tbo);
222
	mutex_unlock(&rdev->vram_mutex);
223 224
	if (tbo == NULL)
		*bo = NULL;
225 226
}

227 228
int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
			     u64 *gpu_addr)
229
{
230
	int r, i;
231

232 233 234 235
	if (bo->pin_count) {
		bo->pin_count++;
		if (gpu_addr)
			*gpu_addr = radeon_bo_gpu_offset(bo);
236
		WARN_ON_ONCE(max_offset != 0);
237 238
		return 0;
	}
239
	radeon_ttm_placement_from_domain(bo, domain);
240 241 242 243
	if (domain == RADEON_GEM_DOMAIN_VRAM) {
		/* force to pin into visible video ram */
		bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
	}
244 245 246 247 248 249 250 251 252
	if (max_offset) {
		u64 lpfn = max_offset >> PAGE_SHIFT;

		if (!bo->placement.lpfn)
			bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT;

		if (lpfn < bo->placement.lpfn)
			bo->placement.lpfn = lpfn;
	}
253 254
	for (i = 0; i < bo->placement.num_placement; i++)
		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
255
	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
256 257 258 259
	if (likely(r == 0)) {
		bo->pin_count = 1;
		if (gpu_addr != NULL)
			*gpu_addr = radeon_bo_gpu_offset(bo);
260
	}
261
	if (unlikely(r != 0))
262
		dev_err(bo->rdev->dev, "%p pin failed\n", bo);
263 264
	return r;
}
265 266 267 268 269

int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
{
	return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
}
270

271
int radeon_bo_unpin(struct radeon_bo *bo)
272
{
273
	int r, i;
274

275 276 277
	if (!bo->pin_count) {
		dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
		return 0;
278
	}
279 280 281
	bo->pin_count--;
	if (bo->pin_count)
		return 0;
282 283
	for (i = 0; i < bo->placement.num_placement; i++)
		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
284
	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
285
	if (unlikely(r != 0))
286
		dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
287
	return r;
288 289
}

290
int radeon_bo_evict_vram(struct radeon_device *rdev)
291
{
292 293
	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
	if (0 && (rdev->flags & RADEON_IS_IGP)) {
294 295 296
		if (rdev->mc.igp_sideport_enabled == false)
			/* Useless to evict on IGP chips */
			return 0;
297 298 299 300
	}
	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
}

301
void radeon_bo_force_delete(struct radeon_device *rdev)
302
{
303
	struct radeon_bo *bo, *n;
304 305 306 307

	if (list_empty(&rdev->gem.objects)) {
		return;
	}
308 309
	dev_err(rdev->dev, "Userspace still has active objects !\n");
	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
310
		mutex_lock(&rdev->ddev->struct_mutex);
311
		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
312 313
			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
			*((unsigned long *)&bo->gem_base.refcount));
314 315 316
		mutex_lock(&bo->rdev->gem.mutex);
		list_del_init(&bo->list);
		mutex_unlock(&bo->rdev->gem.mutex);
317
		/* this should unref the ttm bo */
318
		drm_gem_object_unreference(&bo->gem_base);
319 320 321 322
		mutex_unlock(&rdev->ddev->struct_mutex);
	}
}

323
int radeon_bo_init(struct radeon_device *rdev)
324
{
325 326 327 328 329 330 331 332
	/* Add an MTRR for the VRAM */
	rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
			MTRR_TYPE_WRCOMB, 1);
	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
		rdev->mc.mc_vram_size >> 20,
		(unsigned long long)rdev->mc.aper_size >> 20);
	DRM_INFO("RAM width %dbits %cDR\n",
			rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
333 334 335
	return radeon_ttm_init(rdev);
}

336
void radeon_bo_fini(struct radeon_device *rdev)
337 338 339 340
{
	radeon_ttm_fini(rdev);
}

341 342
void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
				struct list_head *head)
343 344
{
	if (lobj->wdomain) {
345
		list_add(&lobj->tv.head, head);
346
	} else {
347
		list_add_tail(&lobj->tv.head, head);
348 349 350
	}
}

351
int radeon_bo_list_validate(struct list_head *head)
352
{
353 354
	struct radeon_bo_list *lobj;
	struct radeon_bo *bo;
355
	u32 domain;
356 357
	int r;

358
	r = ttm_eu_reserve_buffers(head);
359 360 361
	if (unlikely(r != 0)) {
		return r;
	}
362
	list_for_each_entry(lobj, head, tv.head) {
363 364
		bo = lobj->bo;
		if (!bo->pin_count) {
365 366 367 368
			domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
			
		retry:
			radeon_ttm_placement_from_domain(bo, domain);
369
			r = ttm_bo_validate(&bo->tbo, &bo->placement,
370
						true, false, false);
371 372 373 374 375
			if (unlikely(r)) {
				if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
					domain |= RADEON_GEM_DOMAIN_GTT;
					goto retry;
				}
376
				return r;
377
			}
378
		}
379 380
		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
		lobj->tiling_flags = bo->tiling_flags;
381 382 383 384
	}
	return 0;
}

385
int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
386 387
			     struct vm_area_struct *vma)
{
388
	return ttm_fbdev_mmap(vma, &bo->tbo);
389 390
}

391
int radeon_bo_get_surface_reg(struct radeon_bo *bo)
392
{
393
	struct radeon_device *rdev = bo->rdev;
394
	struct radeon_surface_reg *reg;
395
	struct radeon_bo *old_object;
396 397 398
	int steal;
	int i;

399 400 401
	BUG_ON(!atomic_read(&bo->tbo.reserved));

	if (!bo->tiling_flags)
402 403
		return 0;

404 405 406
	if (bo->surface_reg >= 0) {
		reg = &rdev->surface_regs[bo->surface_reg];
		i = bo->surface_reg;
407 408 409 410 411 412 413
		goto out;
	}

	steal = -1;
	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {

		reg = &rdev->surface_regs[i];
414
		if (!reg->bo)
415 416
			break;

417
		old_object = reg->bo;
418 419 420 421 422 423 424 425 426 427
		if (old_object->pin_count == 0)
			steal = i;
	}

	/* if we are all out */
	if (i == RADEON_GEM_MAX_SURFACES) {
		if (steal == -1)
			return -ENOMEM;
		/* find someone with a surface reg and nuke their BO */
		reg = &rdev->surface_regs[steal];
428
		old_object = reg->bo;
429 430
		/* blow away the mapping */
		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
431
		ttm_bo_unmap_virtual(&old_object->tbo);
432 433 434 435
		old_object->surface_reg = -1;
		i = steal;
	}

436 437
	bo->surface_reg = i;
	reg->bo = bo;
438 439

out:
440
	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
441
			       bo->tbo.mem.start << PAGE_SHIFT,
442
			       bo->tbo.num_pages << PAGE_SHIFT);
443 444 445
	return 0;
}

446
static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
447
{
448
	struct radeon_device *rdev = bo->rdev;
449 450
	struct radeon_surface_reg *reg;

451
	if (bo->surface_reg == -1)
452 453
		return;

454 455
	reg = &rdev->surface_regs[bo->surface_reg];
	radeon_clear_surface_reg(rdev, bo->surface_reg);
456

457 458
	reg->bo = NULL;
	bo->surface_reg = -1;
459 460
}

461 462
int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
				uint32_t tiling_flags, uint32_t pitch)
463
{
464
	struct radeon_device *rdev = bo->rdev;
465 466
	int r;

467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511
	if (rdev->family >= CHIP_CEDAR) {
		unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;

		bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
		bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
		mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
		tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
		stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
		switch (bankw) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		switch (bankh) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		switch (mtaspect) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		if (tilesplit > 6) {
			return -EINVAL;
		}
		if (stilesplit > 6) {
			return -EINVAL;
		}
	}
512 513 514 515 516 517 518
	r = radeon_bo_reserve(bo, false);
	if (unlikely(r != 0))
		return r;
	bo->tiling_flags = tiling_flags;
	bo->pitch = pitch;
	radeon_bo_unreserve(bo);
	return 0;
519 520
}

521 522 523
void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
				uint32_t *tiling_flags,
				uint32_t *pitch)
524
{
525
	BUG_ON(!atomic_read(&bo->tbo.reserved));
526
	if (tiling_flags)
527
		*tiling_flags = bo->tiling_flags;
528
	if (pitch)
529
		*pitch = bo->pitch;
530 531
}

532 533
int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
				bool force_drop)
534
{
535 536 537
	BUG_ON(!atomic_read(&bo->tbo.reserved));

	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
538 539 540
		return 0;

	if (force_drop) {
541
		radeon_bo_clear_surface_reg(bo);
542 543 544
		return 0;
	}

545
	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
546 547 548
		if (!has_moved)
			return 0;

549 550
		if (bo->surface_reg >= 0)
			radeon_bo_clear_surface_reg(bo);
551 552 553
		return 0;
	}

554
	if ((bo->surface_reg >= 0) && !has_moved)
555 556
		return 0;

557
	return radeon_bo_get_surface_reg(bo);
558 559 560
}

void radeon_bo_move_notify(struct ttm_buffer_object *bo,
561
			   struct ttm_mem_reg *mem)
562
{
563 564 565 566
	struct radeon_bo *rbo;
	if (!radeon_ttm_bo_is_radeon_bo(bo))
		return;
	rbo = container_of(bo, struct radeon_bo, tbo);
567
	radeon_bo_check_tiling(rbo, 0, 1);
568
	radeon_vm_bo_invalidate(rbo->rdev, rbo);
569 570
}

571
int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
572
{
573
	struct radeon_device *rdev;
574
	struct radeon_bo *rbo;
575 576 577
	unsigned long offset, size;
	int r;

578
	if (!radeon_ttm_bo_is_radeon_bo(bo))
579
		return 0;
580
	rbo = container_of(bo, struct radeon_bo, tbo);
581
	radeon_bo_check_tiling(rbo, 0, 0);
582 583 584
	rdev = rbo->rdev;
	if (bo->mem.mem_type == TTM_PL_VRAM) {
		size = bo->mem.num_pages << PAGE_SHIFT;
585
		offset = bo->mem.start << PAGE_SHIFT;
586 587 588 589 590 591 592
		if ((offset + size) > rdev->mc.visible_vram_size) {
			/* hurrah the memory is not visible ! */
			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
			rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
			r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
			if (unlikely(r != 0))
				return r;
593
			offset = bo->mem.start << PAGE_SHIFT;
594 595 596 597 598 599
			/* this should not happen */
			if ((offset + size) > rdev->mc.visible_vram_size)
				return -EINVAL;
		}
	}
	return 0;
600
}
601

602
int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
603 604 605 606 607 608 609 610 611 612
{
	int r;

	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
	if (unlikely(r != 0))
		return r;
	spin_lock(&bo->tbo.bdev->fence_lock);
	if (mem_type)
		*mem_type = bo->tbo.mem.mem_type;
	if (bo->tbo.sync_obj)
613
		r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641
	spin_unlock(&bo->tbo.bdev->fence_lock);
	ttm_bo_unreserve(&bo->tbo);
	return r;
}


/**
 * radeon_bo_reserve - reserve bo
 * @bo:		bo structure
 * @no_wait:		don't sleep while trying to reserve (return -EBUSY)
 *
 * Returns:
 * -EBUSY: buffer is busy and @no_wait is true
 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
 * a signal. Release all buffer reservations and return to user-space.
 */
int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
{
	int r;

	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
	if (unlikely(r != 0)) {
		if (r != -ERESTARTSYS)
			dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
		return r;
	}
	return 0;
}
642 643 644 645 646 647 648 649 650 651 652 653 654

/* object have to be reserved */
struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo, struct radeon_vm *vm)
{
	struct radeon_bo_va *bo_va;

	list_for_each_entry(bo_va, &rbo->va, bo_list) {
		if (bo_va->vm == vm) {
			return bo_va;
		}
	}
	return NULL;
}