radeon_object.c 16.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Copyright 2009 Jerome Glisse.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 */
/*
 * Authors:
 *    Jerome Glisse <glisse@freedesktop.org>
 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
 *    Dave Airlie
 */
#include <linux/list.h>
33
#include <linux/slab.h>
34 35 36
#include <drm/drmP.h>
#include "radeon_drm.h"
#include "radeon.h"
37
#include "radeon_trace.h"
38 39 40 41


int radeon_ttm_init(struct radeon_device *rdev);
void radeon_ttm_fini(struct radeon_device *rdev);
42
static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
43 44 45 46 47 48

/*
 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
 * function are calling it.
 */

49 50 51 52 53 54
void radeon_bo_clear_va(struct radeon_bo *bo)
{
	struct radeon_bo_va *bo_va, *tmp;

	list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
		/* remove from all vm address space */
55
		radeon_vm_bo_rmv(bo->rdev, bo_va->vm, bo);
56 57 58
	}
}

59
static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
60
{
61
	struct radeon_bo *bo;
62

63 64 65 66 67
	bo = container_of(tbo, struct radeon_bo, tbo);
	mutex_lock(&bo->rdev->gem.mutex);
	list_del_init(&bo->list);
	mutex_unlock(&bo->rdev->gem.mutex);
	radeon_bo_clear_surface_reg(bo);
68
	radeon_bo_clear_va(bo);
69
	drm_gem_object_release(&bo->gem_base);
70
	kfree(bo);
71 72
}

73 74 75 76 77 78 79
bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
{
	if (bo->destroy == &radeon_ttm_bo_destroy)
		return true;
	return false;
}

80 81 82 83 84
void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
{
	u32 c = 0;

	rbo->placement.fpfn = 0;
85
	rbo->placement.lpfn = 0;
86 87 88 89 90 91 92 93 94
	rbo->placement.placement = rbo->placements;
	rbo->placement.busy_placement = rbo->placements;
	if (domain & RADEON_GEM_DOMAIN_VRAM)
		rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
					TTM_PL_FLAG_VRAM;
	if (domain & RADEON_GEM_DOMAIN_GTT)
		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
	if (domain & RADEON_GEM_DOMAIN_CPU)
		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
95 96
	if (!c)
		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
97 98 99 100
	rbo->placement.num_placement = c;
	rbo->placement.num_busy_placement = c;
}

101
int radeon_bo_create(struct radeon_device *rdev,
102
		     unsigned long size, int byte_align, bool kernel, u32 domain,
103
		     struct sg_table *sg, struct radeon_bo **bo_ptr)
104
{
105
	struct radeon_bo *bo;
106
	enum ttm_bo_type type;
107 108
	unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
	unsigned long max_size = 0;
109
	size_t acc_size;
110 111
	int r;

112 113
	size = ALIGN(size, PAGE_SIZE);

114
	rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
115 116
	if (kernel) {
		type = ttm_bo_type_kernel;
117 118
	} else if (sg) {
		type = ttm_bo_type_sg;
119 120 121
	} else {
		type = ttm_bo_type_device;
	}
122
	*bo_ptr = NULL;
123

124 125 126 127 128 129 130 131
	/* maximun bo size is the minimun btw visible vram and gtt size */
	max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
	if ((page_align << PAGE_SHIFT) >= max_size) {
		printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n",
			__func__, __LINE__, page_align  >> (20 - PAGE_SHIFT), max_size >> 20);
		return -ENOMEM;
	}

132 133 134
	acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
				       sizeof(struct radeon_bo));

135
retry:
136 137
	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
	if (bo == NULL)
138
		return -ENOMEM;
139 140 141 142 143
	r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
	if (unlikely(r)) {
		kfree(bo);
		return r;
	}
144
	bo->rdev = rdev;
145
	bo->gem_base.driver_private = NULL;
146 147
	bo->surface_reg = -1;
	INIT_LIST_HEAD(&bo->list);
148
	INIT_LIST_HEAD(&bo->va);
149
	radeon_ttm_placement_from_domain(bo, domain);
150
	/* Kernel allocation are uninterruptible */
151
	down_read(&rdev->pm.mclk_lock);
152
	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
153
			&bo->placement, page_align, 0, !kernel, NULL,
154
			acc_size, sg, &radeon_ttm_bo_destroy);
155
	up_read(&rdev->pm.mclk_lock);
156
	if (unlikely(r != 0)) {
157 158 159 160 161
		if (r != -ERESTARTSYS) {
			if (domain == RADEON_GEM_DOMAIN_VRAM) {
				domain |= RADEON_GEM_DOMAIN_GTT;
				goto retry;
			}
162
			dev_err(rdev->dev,
163 164
				"object_init failed for (%lu, 0x%08X)\n",
				size, domain);
165
		}
166 167
		return r;
	}
168
	*bo_ptr = bo;
169

170
	trace_radeon_bo_create(bo);
171

172 173 174
	return 0;
}

175
int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
176
{
177
	bool is_iomem;
178 179
	int r;

180
	if (bo->kptr) {
181
		if (ptr) {
182
			*ptr = bo->kptr;
183 184 185
		}
		return 0;
	}
186
	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
187 188 189
	if (r) {
		return r;
	}
190
	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
191
	if (ptr) {
192
		*ptr = bo->kptr;
193
	}
194
	radeon_bo_check_tiling(bo, 0, 0);
195 196 197
	return 0;
}

198
void radeon_bo_kunmap(struct radeon_bo *bo)
199
{
200
	if (bo->kptr == NULL)
201
		return;
202 203 204
	bo->kptr = NULL;
	radeon_bo_check_tiling(bo, 0, 0);
	ttm_bo_kunmap(&bo->kmap);
205 206
}

207
void radeon_bo_unref(struct radeon_bo **bo)
208
{
209
	struct ttm_buffer_object *tbo;
210
	struct radeon_device *rdev;
211

212
	if ((*bo) == NULL)
213
		return;
214
	rdev = (*bo)->rdev;
215
	tbo = &((*bo)->tbo);
216
	down_read(&rdev->pm.mclk_lock);
217
	ttm_bo_unref(&tbo);
218
	up_read(&rdev->pm.mclk_lock);
219 220
	if (tbo == NULL)
		*bo = NULL;
221 222
}

223 224
int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
			     u64 *gpu_addr)
225
{
226
	int r, i;
227

228 229 230 231
	if (bo->pin_count) {
		bo->pin_count++;
		if (gpu_addr)
			*gpu_addr = radeon_bo_gpu_offset(bo);
232 233 234 235 236 237 238 239

		if (max_offset != 0) {
			u64 domain_start;

			if (domain == RADEON_GEM_DOMAIN_VRAM)
				domain_start = bo->rdev->mc.vram_start;
			else
				domain_start = bo->rdev->mc.gtt_start;
240 241
			WARN_ON_ONCE(max_offset <
				     (radeon_bo_gpu_offset(bo) - domain_start));
242 243
		}

244 245
		return 0;
	}
246
	radeon_ttm_placement_from_domain(bo, domain);
247 248 249 250
	if (domain == RADEON_GEM_DOMAIN_VRAM) {
		/* force to pin into visible video ram */
		bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
	}
251 252 253 254 255 256 257 258 259
	if (max_offset) {
		u64 lpfn = max_offset >> PAGE_SHIFT;

		if (!bo->placement.lpfn)
			bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT;

		if (lpfn < bo->placement.lpfn)
			bo->placement.lpfn = lpfn;
	}
260 261
	for (i = 0; i < bo->placement.num_placement; i++)
		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
262
	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
263 264 265 266
	if (likely(r == 0)) {
		bo->pin_count = 1;
		if (gpu_addr != NULL)
			*gpu_addr = radeon_bo_gpu_offset(bo);
267
	}
268
	if (unlikely(r != 0))
269
		dev_err(bo->rdev->dev, "%p pin failed\n", bo);
270 271
	return r;
}
272 273 274 275 276

int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
{
	return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
}
277

278
int radeon_bo_unpin(struct radeon_bo *bo)
279
{
280
	int r, i;
281

282 283 284
	if (!bo->pin_count) {
		dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
		return 0;
285
	}
286 287 288
	bo->pin_count--;
	if (bo->pin_count)
		return 0;
289 290
	for (i = 0; i < bo->placement.num_placement; i++)
		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
291
	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
292
	if (unlikely(r != 0))
293
		dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
294
	return r;
295 296
}

297
int radeon_bo_evict_vram(struct radeon_device *rdev)
298
{
299 300
	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
	if (0 && (rdev->flags & RADEON_IS_IGP)) {
301 302 303
		if (rdev->mc.igp_sideport_enabled == false)
			/* Useless to evict on IGP chips */
			return 0;
304 305 306 307
	}
	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
}

308
void radeon_bo_force_delete(struct radeon_device *rdev)
309
{
310
	struct radeon_bo *bo, *n;
311 312 313 314

	if (list_empty(&rdev->gem.objects)) {
		return;
	}
315 316
	dev_err(rdev->dev, "Userspace still has active objects !\n");
	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
317
		mutex_lock(&rdev->ddev->struct_mutex);
318
		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
319 320
			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
			*((unsigned long *)&bo->gem_base.refcount));
321 322 323
		mutex_lock(&bo->rdev->gem.mutex);
		list_del_init(&bo->list);
		mutex_unlock(&bo->rdev->gem.mutex);
324
		/* this should unref the ttm bo */
325
		drm_gem_object_unreference(&bo->gem_base);
326 327 328 329
		mutex_unlock(&rdev->ddev->struct_mutex);
	}
}

330
int radeon_bo_init(struct radeon_device *rdev)
331
{
332 333 334 335 336 337 338 339
	/* Add an MTRR for the VRAM */
	rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
			MTRR_TYPE_WRCOMB, 1);
	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
		rdev->mc.mc_vram_size >> 20,
		(unsigned long long)rdev->mc.aper_size >> 20);
	DRM_INFO("RAM width %dbits %cDR\n",
			rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
340 341 342
	return radeon_ttm_init(rdev);
}

343
void radeon_bo_fini(struct radeon_device *rdev)
344 345 346 347
{
	radeon_ttm_fini(rdev);
}

348 349
void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
				struct list_head *head)
350 351
{
	if (lobj->wdomain) {
352
		list_add(&lobj->tv.head, head);
353
	} else {
354
		list_add_tail(&lobj->tv.head, head);
355 356 357
	}
}

358
int radeon_bo_list_validate(struct list_head *head)
359
{
360 361
	struct radeon_bo_list *lobj;
	struct radeon_bo *bo;
362
	u32 domain;
363 364
	int r;

365
	r = ttm_eu_reserve_buffers(head);
366 367 368
	if (unlikely(r != 0)) {
		return r;
	}
369
	list_for_each_entry(lobj, head, tv.head) {
370 371
		bo = lobj->bo;
		if (!bo->pin_count) {
372 373 374 375
			domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
			
		retry:
			radeon_ttm_placement_from_domain(bo, domain);
376
			r = ttm_bo_validate(&bo->tbo, &bo->placement,
377
						true, false, false);
378 379 380 381 382
			if (unlikely(r)) {
				if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
					domain |= RADEON_GEM_DOMAIN_GTT;
					goto retry;
				}
383
				return r;
384
			}
385
		}
386 387
		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
		lobj->tiling_flags = bo->tiling_flags;
388 389 390 391
	}
	return 0;
}

392
int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
393 394
			     struct vm_area_struct *vma)
{
395
	return ttm_fbdev_mmap(vma, &bo->tbo);
396 397
}

398
int radeon_bo_get_surface_reg(struct radeon_bo *bo)
399
{
400
	struct radeon_device *rdev = bo->rdev;
401
	struct radeon_surface_reg *reg;
402
	struct radeon_bo *old_object;
403 404 405
	int steal;
	int i;

406 407 408
	BUG_ON(!atomic_read(&bo->tbo.reserved));

	if (!bo->tiling_flags)
409 410
		return 0;

411 412 413
	if (bo->surface_reg >= 0) {
		reg = &rdev->surface_regs[bo->surface_reg];
		i = bo->surface_reg;
414 415 416 417 418 419 420
		goto out;
	}

	steal = -1;
	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {

		reg = &rdev->surface_regs[i];
421
		if (!reg->bo)
422 423
			break;

424
		old_object = reg->bo;
425 426 427 428 429 430 431 432 433 434
		if (old_object->pin_count == 0)
			steal = i;
	}

	/* if we are all out */
	if (i == RADEON_GEM_MAX_SURFACES) {
		if (steal == -1)
			return -ENOMEM;
		/* find someone with a surface reg and nuke their BO */
		reg = &rdev->surface_regs[steal];
435
		old_object = reg->bo;
436 437
		/* blow away the mapping */
		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
438
		ttm_bo_unmap_virtual(&old_object->tbo);
439 440 441 442
		old_object->surface_reg = -1;
		i = steal;
	}

443 444
	bo->surface_reg = i;
	reg->bo = bo;
445 446

out:
447
	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
448
			       bo->tbo.mem.start << PAGE_SHIFT,
449
			       bo->tbo.num_pages << PAGE_SHIFT);
450 451 452
	return 0;
}

453
static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
454
{
455
	struct radeon_device *rdev = bo->rdev;
456 457
	struct radeon_surface_reg *reg;

458
	if (bo->surface_reg == -1)
459 460
		return;

461 462
	reg = &rdev->surface_regs[bo->surface_reg];
	radeon_clear_surface_reg(rdev, bo->surface_reg);
463

464 465
	reg->bo = NULL;
	bo->surface_reg = -1;
466 467
}

468 469
int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
				uint32_t tiling_flags, uint32_t pitch)
470
{
471
	struct radeon_device *rdev = bo->rdev;
472 473
	int r;

474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518
	if (rdev->family >= CHIP_CEDAR) {
		unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;

		bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
		bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
		mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
		tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
		stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
		switch (bankw) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		switch (bankh) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		switch (mtaspect) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		if (tilesplit > 6) {
			return -EINVAL;
		}
		if (stilesplit > 6) {
			return -EINVAL;
		}
	}
519 520 521 522 523 524 525
	r = radeon_bo_reserve(bo, false);
	if (unlikely(r != 0))
		return r;
	bo->tiling_flags = tiling_flags;
	bo->pitch = pitch;
	radeon_bo_unreserve(bo);
	return 0;
526 527
}

528 529 530
void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
				uint32_t *tiling_flags,
				uint32_t *pitch)
531
{
532
	BUG_ON(!atomic_read(&bo->tbo.reserved));
533
	if (tiling_flags)
534
		*tiling_flags = bo->tiling_flags;
535
	if (pitch)
536
		*pitch = bo->pitch;
537 538
}

539 540
int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
				bool force_drop)
541
{
542 543 544
	BUG_ON(!atomic_read(&bo->tbo.reserved));

	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
545 546 547
		return 0;

	if (force_drop) {
548
		radeon_bo_clear_surface_reg(bo);
549 550 551
		return 0;
	}

552
	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
553 554 555
		if (!has_moved)
			return 0;

556 557
		if (bo->surface_reg >= 0)
			radeon_bo_clear_surface_reg(bo);
558 559 560
		return 0;
	}

561
	if ((bo->surface_reg >= 0) && !has_moved)
562 563
		return 0;

564
	return radeon_bo_get_surface_reg(bo);
565 566 567
}

void radeon_bo_move_notify(struct ttm_buffer_object *bo,
568
			   struct ttm_mem_reg *mem)
569
{
570 571 572 573
	struct radeon_bo *rbo;
	if (!radeon_ttm_bo_is_radeon_bo(bo))
		return;
	rbo = container_of(bo, struct radeon_bo, tbo);
574
	radeon_bo_check_tiling(rbo, 0, 1);
575
	radeon_vm_bo_invalidate(rbo->rdev, rbo);
576 577
}

578
int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
579
{
580
	struct radeon_device *rdev;
581
	struct radeon_bo *rbo;
582 583 584
	unsigned long offset, size;
	int r;

585
	if (!radeon_ttm_bo_is_radeon_bo(bo))
586
		return 0;
587
	rbo = container_of(bo, struct radeon_bo, tbo);
588
	radeon_bo_check_tiling(rbo, 0, 0);
589 590 591
	rdev = rbo->rdev;
	if (bo->mem.mem_type == TTM_PL_VRAM) {
		size = bo->mem.num_pages << PAGE_SHIFT;
592
		offset = bo->mem.start << PAGE_SHIFT;
593 594 595 596 597 598 599
		if ((offset + size) > rdev->mc.visible_vram_size) {
			/* hurrah the memory is not visible ! */
			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
			rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
			r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
			if (unlikely(r != 0))
				return r;
600
			offset = bo->mem.start << PAGE_SHIFT;
601 602 603 604 605 606
			/* this should not happen */
			if ((offset + size) > rdev->mc.visible_vram_size)
				return -EINVAL;
		}
	}
	return 0;
607
}
608

609
int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
610 611 612 613 614 615 616 617 618 619
{
	int r;

	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
	if (unlikely(r != 0))
		return r;
	spin_lock(&bo->tbo.bdev->fence_lock);
	if (mem_type)
		*mem_type = bo->tbo.mem.mem_type;
	if (bo->tbo.sync_obj)
620
		r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648
	spin_unlock(&bo->tbo.bdev->fence_lock);
	ttm_bo_unreserve(&bo->tbo);
	return r;
}


/**
 * radeon_bo_reserve - reserve bo
 * @bo:		bo structure
 * @no_wait:		don't sleep while trying to reserve (return -EBUSY)
 *
 * Returns:
 * -EBUSY: buffer is busy and @no_wait is true
 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
 * a signal. Release all buffer reservations and return to user-space.
 */
int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
{
	int r;

	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
	if (unlikely(r != 0)) {
		if (r != -ERESTARTSYS)
			dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
		return r;
	}
	return 0;
}