radeon_object.c 16.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Copyright 2009 Jerome Glisse.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 */
/*
 * Authors:
 *    Jerome Glisse <glisse@freedesktop.org>
 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
 *    Dave Airlie
 */
#include <linux/list.h>
33
#include <linux/slab.h>
34 35 36
#include <drm/drmP.h>
#include "radeon_drm.h"
#include "radeon.h"
37
#include "radeon_trace.h"
38 39 40 41


int radeon_ttm_init(struct radeon_device *rdev);
void radeon_ttm_fini(struct radeon_device *rdev);
42
static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
43 44 45 46 47 48

/*
 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
 * function are calling it.
 */

49 50 51 52 53 54 55 56 57 58 59 60 61 62
void radeon_bo_clear_va(struct radeon_bo *bo)
{
	struct radeon_bo_va *bo_va, *tmp;

	list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
		/* remove from all vm address space */
		mutex_lock(&bo_va->vm->mutex);
		list_del(&bo_va->vm_list);
		mutex_unlock(&bo_va->vm->mutex);
		list_del(&bo_va->bo_list);
		kfree(bo_va);
	}
}

63
static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
64
{
65
	struct radeon_bo *bo;
66

67 68 69 70 71
	bo = container_of(tbo, struct radeon_bo, tbo);
	mutex_lock(&bo->rdev->gem.mutex);
	list_del_init(&bo->list);
	mutex_unlock(&bo->rdev->gem.mutex);
	radeon_bo_clear_surface_reg(bo);
72
	radeon_bo_clear_va(bo);
73
	drm_gem_object_release(&bo->gem_base);
74
	kfree(bo);
75 76
}

77 78 79 80 81 82 83
bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
{
	if (bo->destroy == &radeon_ttm_bo_destroy)
		return true;
	return false;
}

84 85 86 87 88
void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
{
	u32 c = 0;

	rbo->placement.fpfn = 0;
89
	rbo->placement.lpfn = 0;
90 91 92 93 94 95 96 97 98
	rbo->placement.placement = rbo->placements;
	rbo->placement.busy_placement = rbo->placements;
	if (domain & RADEON_GEM_DOMAIN_VRAM)
		rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
					TTM_PL_FLAG_VRAM;
	if (domain & RADEON_GEM_DOMAIN_GTT)
		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
	if (domain & RADEON_GEM_DOMAIN_CPU)
		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
99 100
	if (!c)
		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
101 102 103 104
	rbo->placement.num_placement = c;
	rbo->placement.num_busy_placement = c;
}

105
int radeon_bo_create(struct radeon_device *rdev,
106
		     unsigned long size, int byte_align, bool kernel, u32 domain,
107
		     struct sg_table *sg, struct radeon_bo **bo_ptr)
108
{
109
	struct radeon_bo *bo;
110
	enum ttm_bo_type type;
111 112
	unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
	unsigned long max_size = 0;
113
	size_t acc_size;
114 115
	int r;

116 117
	size = ALIGN(size, PAGE_SIZE);

118
	rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
119 120
	if (kernel) {
		type = ttm_bo_type_kernel;
121 122
	} else if (sg) {
		type = ttm_bo_type_sg;
123 124 125
	} else {
		type = ttm_bo_type_device;
	}
126
	*bo_ptr = NULL;
127

128 129 130 131 132 133 134 135
	/* maximun bo size is the minimun btw visible vram and gtt size */
	max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
	if ((page_align << PAGE_SHIFT) >= max_size) {
		printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n",
			__func__, __LINE__, page_align  >> (20 - PAGE_SHIFT), max_size >> 20);
		return -ENOMEM;
	}

136 137 138
	acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
				       sizeof(struct radeon_bo));

139
retry:
140 141
	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
	if (bo == NULL)
142
		return -ENOMEM;
143 144 145 146 147
	r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
	if (unlikely(r)) {
		kfree(bo);
		return r;
	}
148
	bo->rdev = rdev;
149
	bo->gem_base.driver_private = NULL;
150 151
	bo->surface_reg = -1;
	INIT_LIST_HEAD(&bo->list);
152
	INIT_LIST_HEAD(&bo->va);
153
	radeon_ttm_placement_from_domain(bo, domain);
154
	/* Kernel allocation are uninterruptible */
155
	down_read(&rdev->pm.mclk_lock);
156
	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
157
			&bo->placement, page_align, 0, !kernel, NULL,
158
			acc_size, sg, &radeon_ttm_bo_destroy);
159
	up_read(&rdev->pm.mclk_lock);
160
	if (unlikely(r != 0)) {
161 162 163 164 165
		if (r != -ERESTARTSYS) {
			if (domain == RADEON_GEM_DOMAIN_VRAM) {
				domain |= RADEON_GEM_DOMAIN_GTT;
				goto retry;
			}
166
			dev_err(rdev->dev,
167 168
				"object_init failed for (%lu, 0x%08X)\n",
				size, domain);
169
		}
170 171
		return r;
	}
172
	*bo_ptr = bo;
173

174
	trace_radeon_bo_create(bo);
175

176 177 178
	return 0;
}

179
int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
180
{
181
	bool is_iomem;
182 183
	int r;

184
	if (bo->kptr) {
185
		if (ptr) {
186
			*ptr = bo->kptr;
187 188 189
		}
		return 0;
	}
190
	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
191 192 193
	if (r) {
		return r;
	}
194
	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
195
	if (ptr) {
196
		*ptr = bo->kptr;
197
	}
198
	radeon_bo_check_tiling(bo, 0, 0);
199 200 201
	return 0;
}

202
void radeon_bo_kunmap(struct radeon_bo *bo)
203
{
204
	if (bo->kptr == NULL)
205
		return;
206 207 208
	bo->kptr = NULL;
	radeon_bo_check_tiling(bo, 0, 0);
	ttm_bo_kunmap(&bo->kmap);
209 210
}

211
void radeon_bo_unref(struct radeon_bo **bo)
212
{
213
	struct ttm_buffer_object *tbo;
214
	struct radeon_device *rdev;
215

216
	if ((*bo) == NULL)
217
		return;
218
	rdev = (*bo)->rdev;
219
	tbo = &((*bo)->tbo);
220
	down_read(&rdev->pm.mclk_lock);
221
	ttm_bo_unref(&tbo);
222
	up_read(&rdev->pm.mclk_lock);
223 224
	if (tbo == NULL)
		*bo = NULL;
225 226
}

227 228
int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
			     u64 *gpu_addr)
229
{
230
	int r, i;
231

232 233 234 235
	if (bo->pin_count) {
		bo->pin_count++;
		if (gpu_addr)
			*gpu_addr = radeon_bo_gpu_offset(bo);
236 237 238 239 240 241 242 243

		if (max_offset != 0) {
			u64 domain_start;

			if (domain == RADEON_GEM_DOMAIN_VRAM)
				domain_start = bo->rdev->mc.vram_start;
			else
				domain_start = bo->rdev->mc.gtt_start;
244 245
			WARN_ON_ONCE(max_offset <
				     (radeon_bo_gpu_offset(bo) - domain_start));
246 247
		}

248 249
		return 0;
	}
250
	radeon_ttm_placement_from_domain(bo, domain);
251 252 253 254
	if (domain == RADEON_GEM_DOMAIN_VRAM) {
		/* force to pin into visible video ram */
		bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
	}
255 256 257 258 259 260 261 262 263
	if (max_offset) {
		u64 lpfn = max_offset >> PAGE_SHIFT;

		if (!bo->placement.lpfn)
			bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT;

		if (lpfn < bo->placement.lpfn)
			bo->placement.lpfn = lpfn;
	}
264 265
	for (i = 0; i < bo->placement.num_placement; i++)
		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
266
	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
267 268 269 270
	if (likely(r == 0)) {
		bo->pin_count = 1;
		if (gpu_addr != NULL)
			*gpu_addr = radeon_bo_gpu_offset(bo);
271
	}
272
	if (unlikely(r != 0))
273
		dev_err(bo->rdev->dev, "%p pin failed\n", bo);
274 275
	return r;
}
276 277 278 279 280

int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
{
	return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
}
281

282
int radeon_bo_unpin(struct radeon_bo *bo)
283
{
284
	int r, i;
285

286 287 288
	if (!bo->pin_count) {
		dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
		return 0;
289
	}
290 291 292
	bo->pin_count--;
	if (bo->pin_count)
		return 0;
293 294
	for (i = 0; i < bo->placement.num_placement; i++)
		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
295
	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
296
	if (unlikely(r != 0))
297
		dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
298
	return r;
299 300
}

301
int radeon_bo_evict_vram(struct radeon_device *rdev)
302
{
303 304
	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
	if (0 && (rdev->flags & RADEON_IS_IGP)) {
305 306 307
		if (rdev->mc.igp_sideport_enabled == false)
			/* Useless to evict on IGP chips */
			return 0;
308 309 310 311
	}
	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
}

312
void radeon_bo_force_delete(struct radeon_device *rdev)
313
{
314
	struct radeon_bo *bo, *n;
315 316 317 318

	if (list_empty(&rdev->gem.objects)) {
		return;
	}
319 320
	dev_err(rdev->dev, "Userspace still has active objects !\n");
	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
321
		mutex_lock(&rdev->ddev->struct_mutex);
322
		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
323 324
			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
			*((unsigned long *)&bo->gem_base.refcount));
325 326 327
		mutex_lock(&bo->rdev->gem.mutex);
		list_del_init(&bo->list);
		mutex_unlock(&bo->rdev->gem.mutex);
328
		/* this should unref the ttm bo */
329
		drm_gem_object_unreference(&bo->gem_base);
330 331 332 333
		mutex_unlock(&rdev->ddev->struct_mutex);
	}
}

334
int radeon_bo_init(struct radeon_device *rdev)
335
{
336 337 338 339 340 341 342 343
	/* Add an MTRR for the VRAM */
	rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
			MTRR_TYPE_WRCOMB, 1);
	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
		rdev->mc.mc_vram_size >> 20,
		(unsigned long long)rdev->mc.aper_size >> 20);
	DRM_INFO("RAM width %dbits %cDR\n",
			rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
344 345 346
	return radeon_ttm_init(rdev);
}

347
void radeon_bo_fini(struct radeon_device *rdev)
348 349 350 351
{
	radeon_ttm_fini(rdev);
}

352 353
void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
				struct list_head *head)
354 355
{
	if (lobj->wdomain) {
356
		list_add(&lobj->tv.head, head);
357
	} else {
358
		list_add_tail(&lobj->tv.head, head);
359 360 361
	}
}

362
int radeon_bo_list_validate(struct list_head *head)
363
{
364 365
	struct radeon_bo_list *lobj;
	struct radeon_bo *bo;
366
	u32 domain;
367 368
	int r;

369
	r = ttm_eu_reserve_buffers(head);
370 371 372
	if (unlikely(r != 0)) {
		return r;
	}
373
	list_for_each_entry(lobj, head, tv.head) {
374 375
		bo = lobj->bo;
		if (!bo->pin_count) {
376 377 378 379
			domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
			
		retry:
			radeon_ttm_placement_from_domain(bo, domain);
380
			r = ttm_bo_validate(&bo->tbo, &bo->placement,
381
						true, false, false);
382 383 384 385 386
			if (unlikely(r)) {
				if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
					domain |= RADEON_GEM_DOMAIN_GTT;
					goto retry;
				}
387
				return r;
388
			}
389
		}
390 391
		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
		lobj->tiling_flags = bo->tiling_flags;
392 393 394 395
	}
	return 0;
}

396
int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
397 398
			     struct vm_area_struct *vma)
{
399
	return ttm_fbdev_mmap(vma, &bo->tbo);
400 401
}

402
int radeon_bo_get_surface_reg(struct radeon_bo *bo)
403
{
404
	struct radeon_device *rdev = bo->rdev;
405
	struct radeon_surface_reg *reg;
406
	struct radeon_bo *old_object;
407 408 409
	int steal;
	int i;

410 411 412
	BUG_ON(!atomic_read(&bo->tbo.reserved));

	if (!bo->tiling_flags)
413 414
		return 0;

415 416 417
	if (bo->surface_reg >= 0) {
		reg = &rdev->surface_regs[bo->surface_reg];
		i = bo->surface_reg;
418 419 420 421 422 423 424
		goto out;
	}

	steal = -1;
	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {

		reg = &rdev->surface_regs[i];
425
		if (!reg->bo)
426 427
			break;

428
		old_object = reg->bo;
429 430 431 432 433 434 435 436 437 438
		if (old_object->pin_count == 0)
			steal = i;
	}

	/* if we are all out */
	if (i == RADEON_GEM_MAX_SURFACES) {
		if (steal == -1)
			return -ENOMEM;
		/* find someone with a surface reg and nuke their BO */
		reg = &rdev->surface_regs[steal];
439
		old_object = reg->bo;
440 441
		/* blow away the mapping */
		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
442
		ttm_bo_unmap_virtual(&old_object->tbo);
443 444 445 446
		old_object->surface_reg = -1;
		i = steal;
	}

447 448
	bo->surface_reg = i;
	reg->bo = bo;
449 450

out:
451
	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
452
			       bo->tbo.mem.start << PAGE_SHIFT,
453
			       bo->tbo.num_pages << PAGE_SHIFT);
454 455 456
	return 0;
}

457
static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
458
{
459
	struct radeon_device *rdev = bo->rdev;
460 461
	struct radeon_surface_reg *reg;

462
	if (bo->surface_reg == -1)
463 464
		return;

465 466
	reg = &rdev->surface_regs[bo->surface_reg];
	radeon_clear_surface_reg(rdev, bo->surface_reg);
467

468 469
	reg->bo = NULL;
	bo->surface_reg = -1;
470 471
}

472 473
int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
				uint32_t tiling_flags, uint32_t pitch)
474
{
475
	struct radeon_device *rdev = bo->rdev;
476 477
	int r;

478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
	if (rdev->family >= CHIP_CEDAR) {
		unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;

		bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
		bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
		mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
		tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
		stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
		switch (bankw) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		switch (bankh) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		switch (mtaspect) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		if (tilesplit > 6) {
			return -EINVAL;
		}
		if (stilesplit > 6) {
			return -EINVAL;
		}
	}
523 524 525 526 527 528 529
	r = radeon_bo_reserve(bo, false);
	if (unlikely(r != 0))
		return r;
	bo->tiling_flags = tiling_flags;
	bo->pitch = pitch;
	radeon_bo_unreserve(bo);
	return 0;
530 531
}

532 533 534
void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
				uint32_t *tiling_flags,
				uint32_t *pitch)
535
{
536
	BUG_ON(!atomic_read(&bo->tbo.reserved));
537
	if (tiling_flags)
538
		*tiling_flags = bo->tiling_flags;
539
	if (pitch)
540
		*pitch = bo->pitch;
541 542
}

543 544
int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
				bool force_drop)
545
{
546 547 548
	BUG_ON(!atomic_read(&bo->tbo.reserved));

	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
549 550 551
		return 0;

	if (force_drop) {
552
		radeon_bo_clear_surface_reg(bo);
553 554 555
		return 0;
	}

556
	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
557 558 559
		if (!has_moved)
			return 0;

560 561
		if (bo->surface_reg >= 0)
			radeon_bo_clear_surface_reg(bo);
562 563 564
		return 0;
	}

565
	if ((bo->surface_reg >= 0) && !has_moved)
566 567
		return 0;

568
	return radeon_bo_get_surface_reg(bo);
569 570 571
}

void radeon_bo_move_notify(struct ttm_buffer_object *bo,
572
			   struct ttm_mem_reg *mem)
573
{
574 575 576 577
	struct radeon_bo *rbo;
	if (!radeon_ttm_bo_is_radeon_bo(bo))
		return;
	rbo = container_of(bo, struct radeon_bo, tbo);
578
	radeon_bo_check_tiling(rbo, 0, 1);
579
	radeon_vm_bo_invalidate(rbo->rdev, rbo);
580 581
}

582
int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
583
{
584
	struct radeon_device *rdev;
585
	struct radeon_bo *rbo;
586 587 588
	unsigned long offset, size;
	int r;

589
	if (!radeon_ttm_bo_is_radeon_bo(bo))
590
		return 0;
591
	rbo = container_of(bo, struct radeon_bo, tbo);
592
	radeon_bo_check_tiling(rbo, 0, 0);
593 594 595
	rdev = rbo->rdev;
	if (bo->mem.mem_type == TTM_PL_VRAM) {
		size = bo->mem.num_pages << PAGE_SHIFT;
596
		offset = bo->mem.start << PAGE_SHIFT;
597 598 599 600 601 602 603
		if ((offset + size) > rdev->mc.visible_vram_size) {
			/* hurrah the memory is not visible ! */
			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
			rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
			r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
			if (unlikely(r != 0))
				return r;
604
			offset = bo->mem.start << PAGE_SHIFT;
605 606 607 608 609 610
			/* this should not happen */
			if ((offset + size) > rdev->mc.visible_vram_size)
				return -EINVAL;
		}
	}
	return 0;
611
}
612

613
int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
614 615 616 617 618 619 620 621 622 623
{
	int r;

	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
	if (unlikely(r != 0))
		return r;
	spin_lock(&bo->tbo.bdev->fence_lock);
	if (mem_type)
		*mem_type = bo->tbo.mem.mem_type;
	if (bo->tbo.sync_obj)
624
		r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652
	spin_unlock(&bo->tbo.bdev->fence_lock);
	ttm_bo_unreserve(&bo->tbo);
	return r;
}


/**
 * radeon_bo_reserve - reserve bo
 * @bo:		bo structure
 * @no_wait:		don't sleep while trying to reserve (return -EBUSY)
 *
 * Returns:
 * -EBUSY: buffer is busy and @no_wait is true
 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
 * a signal. Release all buffer reservations and return to user-space.
 */
int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
{
	int r;

	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
	if (unlikely(r != 0)) {
		if (r != -ERESTARTSYS)
			dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
		return r;
	}
	return 0;
}
653 654 655 656 657 658 659 660 661 662 663 664 665

/* object have to be reserved */
struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo, struct radeon_vm *vm)
{
	struct radeon_bo_va *bo_va;

	list_for_each_entry(bo_va, &rbo->va, bo_list) {
		if (bo_va->vm == vm) {
			return bo_va;
		}
	}
	return NULL;
}