radeon_object.c 16.4 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Copyright 2009 Jerome Glisse.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 */
/*
 * Authors:
 *    Jerome Glisse <glisse@freedesktop.org>
 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
 *    Dave Airlie
 */
#include <linux/list.h>
33
#include <linux/slab.h>
34 35 36
#include <drm/drmP.h>
#include "radeon_drm.h"
#include "radeon.h"
37
#include "radeon_trace.h"
38 39 40 41


int radeon_ttm_init(struct radeon_device *rdev);
void radeon_ttm_fini(struct radeon_device *rdev);
42
static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
43 44 45 46 47 48

/*
 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
 * function are calling it.
 */

49 50 51 52 53 54 55 56 57 58 59 60 61 62
void radeon_bo_clear_va(struct radeon_bo *bo)
{
	struct radeon_bo_va *bo_va, *tmp;

	list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
		/* remove from all vm address space */
		mutex_lock(&bo_va->vm->mutex);
		list_del(&bo_va->vm_list);
		mutex_unlock(&bo_va->vm->mutex);
		list_del(&bo_va->bo_list);
		kfree(bo_va);
	}
}

63
static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
64
{
65
	struct radeon_bo *bo;
66

67 68 69 70 71
	bo = container_of(tbo, struct radeon_bo, tbo);
	mutex_lock(&bo->rdev->gem.mutex);
	list_del_init(&bo->list);
	mutex_unlock(&bo->rdev->gem.mutex);
	radeon_bo_clear_surface_reg(bo);
72
	radeon_bo_clear_va(bo);
73
	drm_gem_object_release(&bo->gem_base);
74
	kfree(bo);
75 76
}

77 78 79 80 81 82 83
bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
{
	if (bo->destroy == &radeon_ttm_bo_destroy)
		return true;
	return false;
}

84 85 86 87 88
void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
{
	u32 c = 0;

	rbo->placement.fpfn = 0;
89
	rbo->placement.lpfn = 0;
90 91 92 93 94 95 96 97 98
	rbo->placement.placement = rbo->placements;
	rbo->placement.busy_placement = rbo->placements;
	if (domain & RADEON_GEM_DOMAIN_VRAM)
		rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
					TTM_PL_FLAG_VRAM;
	if (domain & RADEON_GEM_DOMAIN_GTT)
		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
	if (domain & RADEON_GEM_DOMAIN_CPU)
		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
99 100
	if (!c)
		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
101 102 103 104
	rbo->placement.num_placement = c;
	rbo->placement.num_busy_placement = c;
}

105
int radeon_bo_create(struct radeon_device *rdev,
106
		     unsigned long size, int byte_align, bool kernel, u32 domain,
107
		     struct sg_table *sg, struct radeon_bo **bo_ptr)
108
{
109
	struct radeon_bo *bo;
110
	enum ttm_bo_type type;
111 112
	unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
	unsigned long max_size = 0;
113
	size_t acc_size;
114 115
	int r;

116 117
	size = ALIGN(size, PAGE_SIZE);

118 119 120 121 122
	if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
		rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
	}
	if (kernel) {
		type = ttm_bo_type_kernel;
123 124
	} else if (sg) {
		type = ttm_bo_type_sg;
125 126 127
	} else {
		type = ttm_bo_type_device;
	}
128
	*bo_ptr = NULL;
129

130 131 132 133 134 135 136 137
	/* maximun bo size is the minimun btw visible vram and gtt size */
	max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
	if ((page_align << PAGE_SHIFT) >= max_size) {
		printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n",
			__func__, __LINE__, page_align  >> (20 - PAGE_SHIFT), max_size >> 20);
		return -ENOMEM;
	}

138 139 140
	acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
				       sizeof(struct radeon_bo));

141
retry:
142 143
	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
	if (bo == NULL)
144
		return -ENOMEM;
145 146 147 148 149
	r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
	if (unlikely(r)) {
		kfree(bo);
		return r;
	}
150
	bo->rdev = rdev;
151
	bo->gem_base.driver_private = NULL;
152 153
	bo->surface_reg = -1;
	INIT_LIST_HEAD(&bo->list);
154
	INIT_LIST_HEAD(&bo->va);
155
	radeon_ttm_placement_from_domain(bo, domain);
156
	/* Kernel allocation are uninterruptible */
157
	down_read(&rdev->pm.mclk_lock);
158
	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
159
			&bo->placement, page_align, 0, !kernel, NULL,
160
			acc_size, sg, &radeon_ttm_bo_destroy);
161
	up_read(&rdev->pm.mclk_lock);
162
	if (unlikely(r != 0)) {
163 164 165 166 167
		if (r != -ERESTARTSYS) {
			if (domain == RADEON_GEM_DOMAIN_VRAM) {
				domain |= RADEON_GEM_DOMAIN_GTT;
				goto retry;
			}
168
			dev_err(rdev->dev,
169 170
				"object_init failed for (%lu, 0x%08X)\n",
				size, domain);
171
		}
172 173
		return r;
	}
174
	*bo_ptr = bo;
175

176
	trace_radeon_bo_create(bo);
177

178 179 180
	return 0;
}

181
int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
182
{
183
	bool is_iomem;
184 185
	int r;

186
	if (bo->kptr) {
187
		if (ptr) {
188
			*ptr = bo->kptr;
189 190 191
		}
		return 0;
	}
192
	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
193 194 195
	if (r) {
		return r;
	}
196
	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
197
	if (ptr) {
198
		*ptr = bo->kptr;
199
	}
200
	radeon_bo_check_tiling(bo, 0, 0);
201 202 203
	return 0;
}

204
void radeon_bo_kunmap(struct radeon_bo *bo)
205
{
206
	if (bo->kptr == NULL)
207
		return;
208 209 210
	bo->kptr = NULL;
	radeon_bo_check_tiling(bo, 0, 0);
	ttm_bo_kunmap(&bo->kmap);
211 212
}

213
void radeon_bo_unref(struct radeon_bo **bo)
214
{
215
	struct ttm_buffer_object *tbo;
216
	struct radeon_device *rdev;
217

218
	if ((*bo) == NULL)
219
		return;
220
	rdev = (*bo)->rdev;
221
	tbo = &((*bo)->tbo);
222
	down_read(&rdev->pm.mclk_lock);
223
	ttm_bo_unref(&tbo);
224
	up_read(&rdev->pm.mclk_lock);
225 226
	if (tbo == NULL)
		*bo = NULL;
227 228
}

229 230
int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
			     u64 *gpu_addr)
231
{
232
	int r, i;
233

234 235 236 237
	if (bo->pin_count) {
		bo->pin_count++;
		if (gpu_addr)
			*gpu_addr = radeon_bo_gpu_offset(bo);
238 239 240 241 242 243 244 245

		if (max_offset != 0) {
			u64 domain_start;

			if (domain == RADEON_GEM_DOMAIN_VRAM)
				domain_start = bo->rdev->mc.vram_start;
			else
				domain_start = bo->rdev->mc.gtt_start;
246 247
			WARN_ON_ONCE(max_offset <
				     (radeon_bo_gpu_offset(bo) - domain_start));
248 249
		}

250 251
		return 0;
	}
252
	radeon_ttm_placement_from_domain(bo, domain);
253 254 255 256
	if (domain == RADEON_GEM_DOMAIN_VRAM) {
		/* force to pin into visible video ram */
		bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
	}
257 258 259 260 261 262 263 264 265
	if (max_offset) {
		u64 lpfn = max_offset >> PAGE_SHIFT;

		if (!bo->placement.lpfn)
			bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT;

		if (lpfn < bo->placement.lpfn)
			bo->placement.lpfn = lpfn;
	}
266 267
	for (i = 0; i < bo->placement.num_placement; i++)
		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
268
	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
269 270 271 272
	if (likely(r == 0)) {
		bo->pin_count = 1;
		if (gpu_addr != NULL)
			*gpu_addr = radeon_bo_gpu_offset(bo);
273
	}
274
	if (unlikely(r != 0))
275
		dev_err(bo->rdev->dev, "%p pin failed\n", bo);
276 277
	return r;
}
278 279 280 281 282

int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
{
	return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
}
283

284
int radeon_bo_unpin(struct radeon_bo *bo)
285
{
286
	int r, i;
287

288 289 290
	if (!bo->pin_count) {
		dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
		return 0;
291
	}
292 293 294
	bo->pin_count--;
	if (bo->pin_count)
		return 0;
295 296
	for (i = 0; i < bo->placement.num_placement; i++)
		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
297
	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false, false);
298
	if (unlikely(r != 0))
299
		dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
300
	return r;
301 302
}

303
int radeon_bo_evict_vram(struct radeon_device *rdev)
304
{
305 306
	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
	if (0 && (rdev->flags & RADEON_IS_IGP)) {
307 308 309
		if (rdev->mc.igp_sideport_enabled == false)
			/* Useless to evict on IGP chips */
			return 0;
310 311 312 313
	}
	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
}

314
void radeon_bo_force_delete(struct radeon_device *rdev)
315
{
316
	struct radeon_bo *bo, *n;
317 318 319 320

	if (list_empty(&rdev->gem.objects)) {
		return;
	}
321 322
	dev_err(rdev->dev, "Userspace still has active objects !\n");
	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
323
		mutex_lock(&rdev->ddev->struct_mutex);
324
		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
325 326
			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
			*((unsigned long *)&bo->gem_base.refcount));
327 328 329
		mutex_lock(&bo->rdev->gem.mutex);
		list_del_init(&bo->list);
		mutex_unlock(&bo->rdev->gem.mutex);
330
		/* this should unref the ttm bo */
331
		drm_gem_object_unreference(&bo->gem_base);
332 333 334 335
		mutex_unlock(&rdev->ddev->struct_mutex);
	}
}

336
int radeon_bo_init(struct radeon_device *rdev)
337
{
338 339 340 341 342 343 344 345
	/* Add an MTRR for the VRAM */
	rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
			MTRR_TYPE_WRCOMB, 1);
	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
		rdev->mc.mc_vram_size >> 20,
		(unsigned long long)rdev->mc.aper_size >> 20);
	DRM_INFO("RAM width %dbits %cDR\n",
			rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
346 347 348
	return radeon_ttm_init(rdev);
}

349
void radeon_bo_fini(struct radeon_device *rdev)
350 351 352 353
{
	radeon_ttm_fini(rdev);
}

354 355
void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
				struct list_head *head)
356 357
{
	if (lobj->wdomain) {
358
		list_add(&lobj->tv.head, head);
359
	} else {
360
		list_add_tail(&lobj->tv.head, head);
361 362 363
	}
}

364
int radeon_bo_list_validate(struct list_head *head)
365
{
366 367
	struct radeon_bo_list *lobj;
	struct radeon_bo *bo;
368
	u32 domain;
369 370
	int r;

371
	r = ttm_eu_reserve_buffers(head);
372 373 374
	if (unlikely(r != 0)) {
		return r;
	}
375
	list_for_each_entry(lobj, head, tv.head) {
376 377
		bo = lobj->bo;
		if (!bo->pin_count) {
378 379 380 381
			domain = lobj->wdomain ? lobj->wdomain : lobj->rdomain;
			
		retry:
			radeon_ttm_placement_from_domain(bo, domain);
382
			r = ttm_bo_validate(&bo->tbo, &bo->placement,
383
						true, false, false);
384 385 386 387 388
			if (unlikely(r)) {
				if (r != -ERESTARTSYS && domain == RADEON_GEM_DOMAIN_VRAM) {
					domain |= RADEON_GEM_DOMAIN_GTT;
					goto retry;
				}
389
				return r;
390
			}
391
		}
392 393
		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
		lobj->tiling_flags = bo->tiling_flags;
394 395 396 397
	}
	return 0;
}

398
int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
399 400
			     struct vm_area_struct *vma)
{
401
	return ttm_fbdev_mmap(vma, &bo->tbo);
402 403
}

404
int radeon_bo_get_surface_reg(struct radeon_bo *bo)
405
{
406
	struct radeon_device *rdev = bo->rdev;
407
	struct radeon_surface_reg *reg;
408
	struct radeon_bo *old_object;
409 410 411
	int steal;
	int i;

412 413 414
	BUG_ON(!atomic_read(&bo->tbo.reserved));

	if (!bo->tiling_flags)
415 416
		return 0;

417 418 419
	if (bo->surface_reg >= 0) {
		reg = &rdev->surface_regs[bo->surface_reg];
		i = bo->surface_reg;
420 421 422 423 424 425 426
		goto out;
	}

	steal = -1;
	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {

		reg = &rdev->surface_regs[i];
427
		if (!reg->bo)
428 429
			break;

430
		old_object = reg->bo;
431 432 433 434 435 436 437 438 439 440
		if (old_object->pin_count == 0)
			steal = i;
	}

	/* if we are all out */
	if (i == RADEON_GEM_MAX_SURFACES) {
		if (steal == -1)
			return -ENOMEM;
		/* find someone with a surface reg and nuke their BO */
		reg = &rdev->surface_regs[steal];
441
		old_object = reg->bo;
442 443
		/* blow away the mapping */
		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
444
		ttm_bo_unmap_virtual(&old_object->tbo);
445 446 447 448
		old_object->surface_reg = -1;
		i = steal;
	}

449 450
	bo->surface_reg = i;
	reg->bo = bo;
451 452

out:
453
	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
454
			       bo->tbo.mem.start << PAGE_SHIFT,
455
			       bo->tbo.num_pages << PAGE_SHIFT);
456 457 458
	return 0;
}

459
static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
460
{
461
	struct radeon_device *rdev = bo->rdev;
462 463
	struct radeon_surface_reg *reg;

464
	if (bo->surface_reg == -1)
465 466
		return;

467 468
	reg = &rdev->surface_regs[bo->surface_reg];
	radeon_clear_surface_reg(rdev, bo->surface_reg);
469

470 471
	reg->bo = NULL;
	bo->surface_reg = -1;
472 473
}

474 475
int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
				uint32_t tiling_flags, uint32_t pitch)
476
{
477
	struct radeon_device *rdev = bo->rdev;
478 479
	int r;

480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524
	if (rdev->family >= CHIP_CEDAR) {
		unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;

		bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
		bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
		mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
		tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
		stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
		switch (bankw) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		switch (bankh) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		switch (mtaspect) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		if (tilesplit > 6) {
			return -EINVAL;
		}
		if (stilesplit > 6) {
			return -EINVAL;
		}
	}
525 526 527 528 529 530 531
	r = radeon_bo_reserve(bo, false);
	if (unlikely(r != 0))
		return r;
	bo->tiling_flags = tiling_flags;
	bo->pitch = pitch;
	radeon_bo_unreserve(bo);
	return 0;
532 533
}

534 535 536
void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
				uint32_t *tiling_flags,
				uint32_t *pitch)
537
{
538
	BUG_ON(!atomic_read(&bo->tbo.reserved));
539
	if (tiling_flags)
540
		*tiling_flags = bo->tiling_flags;
541
	if (pitch)
542
		*pitch = bo->pitch;
543 544
}

545 546
int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
				bool force_drop)
547
{
548 549 550
	BUG_ON(!atomic_read(&bo->tbo.reserved));

	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
551 552 553
		return 0;

	if (force_drop) {
554
		radeon_bo_clear_surface_reg(bo);
555 556 557
		return 0;
	}

558
	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
559 560 561
		if (!has_moved)
			return 0;

562 563
		if (bo->surface_reg >= 0)
			radeon_bo_clear_surface_reg(bo);
564 565 566
		return 0;
	}

567
	if ((bo->surface_reg >= 0) && !has_moved)
568 569
		return 0;

570
	return radeon_bo_get_surface_reg(bo);
571 572 573
}

void radeon_bo_move_notify(struct ttm_buffer_object *bo,
574
			   struct ttm_mem_reg *mem)
575
{
576 577 578 579
	struct radeon_bo *rbo;
	if (!radeon_ttm_bo_is_radeon_bo(bo))
		return;
	rbo = container_of(bo, struct radeon_bo, tbo);
580
	radeon_bo_check_tiling(rbo, 0, 1);
581
	radeon_vm_bo_invalidate(rbo->rdev, rbo);
582 583
}

584
int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
585
{
586
	struct radeon_device *rdev;
587
	struct radeon_bo *rbo;
588 589 590
	unsigned long offset, size;
	int r;

591
	if (!radeon_ttm_bo_is_radeon_bo(bo))
592
		return 0;
593
	rbo = container_of(bo, struct radeon_bo, tbo);
594
	radeon_bo_check_tiling(rbo, 0, 0);
595 596 597
	rdev = rbo->rdev;
	if (bo->mem.mem_type == TTM_PL_VRAM) {
		size = bo->mem.num_pages << PAGE_SHIFT;
598
		offset = bo->mem.start << PAGE_SHIFT;
599 600 601 602 603 604 605
		if ((offset + size) > rdev->mc.visible_vram_size) {
			/* hurrah the memory is not visible ! */
			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
			rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
			r = ttm_bo_validate(bo, &rbo->placement, false, true, false);
			if (unlikely(r != 0))
				return r;
606
			offset = bo->mem.start << PAGE_SHIFT;
607 608 609 610 611 612
			/* this should not happen */
			if ((offset + size) > rdev->mc.visible_vram_size)
				return -EINVAL;
		}
	}
	return 0;
613
}
614

615
int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
616 617 618 619 620 621 622 623 624 625
{
	int r;

	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
	if (unlikely(r != 0))
		return r;
	spin_lock(&bo->tbo.bdev->fence_lock);
	if (mem_type)
		*mem_type = bo->tbo.mem.mem_type;
	if (bo->tbo.sync_obj)
626
		r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654
	spin_unlock(&bo->tbo.bdev->fence_lock);
	ttm_bo_unreserve(&bo->tbo);
	return r;
}


/**
 * radeon_bo_reserve - reserve bo
 * @bo:		bo structure
 * @no_wait:		don't sleep while trying to reserve (return -EBUSY)
 *
 * Returns:
 * -EBUSY: buffer is busy and @no_wait is true
 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
 * a signal. Release all buffer reservations and return to user-space.
 */
int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
{
	int r;

	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
	if (unlikely(r != 0)) {
		if (r != -ERESTARTSYS)
			dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
		return r;
	}
	return 0;
}
655 656 657 658 659 660 661 662 663 664 665 666 667

/* object have to be reserved */
struct radeon_bo_va *radeon_bo_va(struct radeon_bo *rbo, struct radeon_vm *vm)
{
	struct radeon_bo_va *bo_va;

	list_for_each_entry(bo_va, &rbo->va, bo_list) {
		if (bo_va->vm == vm) {
			return bo_va;
		}
	}
	return NULL;
}