radeon_object.c 15.5 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Copyright 2009 Jerome Glisse.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 */
/*
 * Authors:
 *    Jerome Glisse <glisse@freedesktop.org>
 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
 *    Dave Airlie
 */
#include <linux/list.h>
33
#include <linux/slab.h>
34
#include <drm/drmP.h>
35
#include <drm/radeon_drm.h>
36
#include "radeon.h"
37
#include "radeon_trace.h"
38 39 40 41


int radeon_ttm_init(struct radeon_device *rdev);
void radeon_ttm_fini(struct radeon_device *rdev);
42
static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
43 44 45 46 47 48

/*
 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
 * function are calling it.
 */

49 50 51 52 53 54
void radeon_bo_clear_va(struct radeon_bo *bo)
{
	struct radeon_bo_va *bo_va, *tmp;

	list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
		/* remove from all vm address space */
55
		radeon_vm_bo_rmv(bo->rdev, bo_va);
56 57 58
	}
}

59
static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
60
{
61
	struct radeon_bo *bo;
62

63 64 65 66 67
	bo = container_of(tbo, struct radeon_bo, tbo);
	mutex_lock(&bo->rdev->gem.mutex);
	list_del_init(&bo->list);
	mutex_unlock(&bo->rdev->gem.mutex);
	radeon_bo_clear_surface_reg(bo);
68
	radeon_bo_clear_va(bo);
69
	drm_gem_object_release(&bo->gem_base);
70
	kfree(bo);
71 72
}

73 74 75 76 77 78 79
bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
{
	if (bo->destroy == &radeon_ttm_bo_destroy)
		return true;
	return false;
}

80 81 82 83 84
void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
{
	u32 c = 0;

	rbo->placement.fpfn = 0;
85
	rbo->placement.lpfn = 0;
86 87 88 89
	rbo->placement.placement = rbo->placements;
	if (domain & RADEON_GEM_DOMAIN_VRAM)
		rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
					TTM_PL_FLAG_VRAM;
90 91 92 93 94 95 96 97 98 99 100 101 102 103
	if (domain & RADEON_GEM_DOMAIN_GTT) {
		if (rbo->rdev->flags & RADEON_IS_AGP) {
			rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
		} else {
			rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
		}
	}
	if (domain & RADEON_GEM_DOMAIN_CPU) {
		if (rbo->rdev->flags & RADEON_IS_AGP) {
			rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
		} else {
			rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
		}
	}
104 105
	if (!c)
		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
106
	rbo->placement.num_placement = c;
107 108 109 110 111 112 113 114

	c = 0;
	rbo->placement.busy_placement = rbo->busy_placements;
	if (rbo->rdev->flags & RADEON_IS_AGP) {
		rbo->busy_placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
	} else {
		rbo->busy_placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
	}
115 116 117
	rbo->placement.num_busy_placement = c;
}

118
int radeon_bo_create(struct radeon_device *rdev,
119
		     unsigned long size, int byte_align, bool kernel, u32 domain,
120
		     struct sg_table *sg, struct radeon_bo **bo_ptr)
121
{
122
	struct radeon_bo *bo;
123
	enum ttm_bo_type type;
124
	unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
125
	size_t acc_size;
126 127
	int r;

128 129
	size = ALIGN(size, PAGE_SIZE);

130
	rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
131 132
	if (kernel) {
		type = ttm_bo_type_kernel;
133 134
	} else if (sg) {
		type = ttm_bo_type_sg;
135 136 137
	} else {
		type = ttm_bo_type_device;
	}
138
	*bo_ptr = NULL;
139

140 141 142
	acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
				       sizeof(struct radeon_bo));

143 144
	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
	if (bo == NULL)
145
		return -ENOMEM;
146 147 148 149 150
	r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
	if (unlikely(r)) {
		kfree(bo);
		return r;
	}
151
	bo->rdev = rdev;
152
	bo->gem_base.driver_private = NULL;
153 154
	bo->surface_reg = -1;
	INIT_LIST_HEAD(&bo->list);
155
	INIT_LIST_HEAD(&bo->va);
156
	radeon_ttm_placement_from_domain(bo, domain);
157
	/* Kernel allocation are uninterruptible */
158
	down_read(&rdev->pm.mclk_lock);
159
	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
160
			&bo->placement, page_align, !kernel, NULL,
161
			acc_size, sg, &radeon_ttm_bo_destroy);
162
	up_read(&rdev->pm.mclk_lock);
163 164 165
	if (unlikely(r != 0)) {
		return r;
	}
166
	*bo_ptr = bo;
167

168
	trace_radeon_bo_create(bo);
169

170 171 172
	return 0;
}

173
int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
174
{
175
	bool is_iomem;
176 177
	int r;

178
	if (bo->kptr) {
179
		if (ptr) {
180
			*ptr = bo->kptr;
181 182 183
		}
		return 0;
	}
184
	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
185 186 187
	if (r) {
		return r;
	}
188
	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
189
	if (ptr) {
190
		*ptr = bo->kptr;
191
	}
192
	radeon_bo_check_tiling(bo, 0, 0);
193 194 195
	return 0;
}

196
void radeon_bo_kunmap(struct radeon_bo *bo)
197
{
198
	if (bo->kptr == NULL)
199
		return;
200 201 202
	bo->kptr = NULL;
	radeon_bo_check_tiling(bo, 0, 0);
	ttm_bo_kunmap(&bo->kmap);
203 204
}

205
void radeon_bo_unref(struct radeon_bo **bo)
206
{
207
	struct ttm_buffer_object *tbo;
208
	struct radeon_device *rdev;
209

210
	if ((*bo) == NULL)
211
		return;
212
	rdev = (*bo)->rdev;
213
	tbo = &((*bo)->tbo);
214
	down_read(&rdev->pm.mclk_lock);
215
	ttm_bo_unref(&tbo);
216
	up_read(&rdev->pm.mclk_lock);
217 218
	if (tbo == NULL)
		*bo = NULL;
219 220
}

221 222
int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
			     u64 *gpu_addr)
223
{
224
	int r, i;
225

226 227 228 229
	if (bo->pin_count) {
		bo->pin_count++;
		if (gpu_addr)
			*gpu_addr = radeon_bo_gpu_offset(bo);
230 231 232 233 234 235 236 237

		if (max_offset != 0) {
			u64 domain_start;

			if (domain == RADEON_GEM_DOMAIN_VRAM)
				domain_start = bo->rdev->mc.vram_start;
			else
				domain_start = bo->rdev->mc.gtt_start;
238 239
			WARN_ON_ONCE(max_offset <
				     (radeon_bo_gpu_offset(bo) - domain_start));
240 241
		}

242 243
		return 0;
	}
244
	radeon_ttm_placement_from_domain(bo, domain);
245 246 247 248
	if (domain == RADEON_GEM_DOMAIN_VRAM) {
		/* force to pin into visible video ram */
		bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
	}
249 250 251 252 253 254 255 256 257
	if (max_offset) {
		u64 lpfn = max_offset >> PAGE_SHIFT;

		if (!bo->placement.lpfn)
			bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT;

		if (lpfn < bo->placement.lpfn)
			bo->placement.lpfn = lpfn;
	}
258 259
	for (i = 0; i < bo->placement.num_placement; i++)
		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
260
	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
261 262 263 264
	if (likely(r == 0)) {
		bo->pin_count = 1;
		if (gpu_addr != NULL)
			*gpu_addr = radeon_bo_gpu_offset(bo);
265
	}
266
	if (unlikely(r != 0))
267
		dev_err(bo->rdev->dev, "%p pin failed\n", bo);
268 269
	return r;
}
270 271 272 273 274

int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
{
	return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
}
275

276
int radeon_bo_unpin(struct radeon_bo *bo)
277
{
278
	int r, i;
279

280 281 282
	if (!bo->pin_count) {
		dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
		return 0;
283
	}
284 285 286
	bo->pin_count--;
	if (bo->pin_count)
		return 0;
287 288
	for (i = 0; i < bo->placement.num_placement; i++)
		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
289
	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
290
	if (unlikely(r != 0))
291
		dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
292
	return r;
293 294
}

295
int radeon_bo_evict_vram(struct radeon_device *rdev)
296
{
297 298
	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
	if (0 && (rdev->flags & RADEON_IS_IGP)) {
299 300 301
		if (rdev->mc.igp_sideport_enabled == false)
			/* Useless to evict on IGP chips */
			return 0;
302 303 304 305
	}
	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
}

306
void radeon_bo_force_delete(struct radeon_device *rdev)
307
{
308
	struct radeon_bo *bo, *n;
309 310 311 312

	if (list_empty(&rdev->gem.objects)) {
		return;
	}
313 314
	dev_err(rdev->dev, "Userspace still has active objects !\n");
	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
315
		mutex_lock(&rdev->ddev->struct_mutex);
316
		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
317 318
			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
			*((unsigned long *)&bo->gem_base.refcount));
319 320 321
		mutex_lock(&bo->rdev->gem.mutex);
		list_del_init(&bo->list);
		mutex_unlock(&bo->rdev->gem.mutex);
322
		/* this should unref the ttm bo */
323
		drm_gem_object_unreference(&bo->gem_base);
324 325 326 327
		mutex_unlock(&rdev->ddev->struct_mutex);
	}
}

328
int radeon_bo_init(struct radeon_device *rdev)
329
{
330 331 332 333 334 335 336 337
	/* Add an MTRR for the VRAM */
	rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
			MTRR_TYPE_WRCOMB, 1);
	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
		rdev->mc.mc_vram_size >> 20,
		(unsigned long long)rdev->mc.aper_size >> 20);
	DRM_INFO("RAM width %dbits %cDR\n",
			rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
338 339 340
	return radeon_ttm_init(rdev);
}

341
void radeon_bo_fini(struct radeon_device *rdev)
342 343 344 345
{
	radeon_ttm_fini(rdev);
}

346 347
void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
				struct list_head *head)
348 349
{
	if (lobj->wdomain) {
350
		list_add(&lobj->tv.head, head);
351
	} else {
352
		list_add_tail(&lobj->tv.head, head);
353 354 355
	}
}

356
int radeon_bo_list_validate(struct list_head *head)
357
{
358 359
	struct radeon_bo_list *lobj;
	struct radeon_bo *bo;
360 361
	int r;

362
	r = ttm_eu_reserve_buffers(head);
363 364 365
	if (unlikely(r != 0)) {
		return r;
	}
366
	list_for_each_entry(lobj, head, tv.head) {
367 368
		bo = lobj->bo;
		if (!bo->pin_count) {
369
			r = ttm_bo_validate(&bo->tbo, &bo->placement,
370
						true, false);
371
			if (unlikely(r)) {
372
				return r;
373
			}
374
		}
375 376
		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
		lobj->tiling_flags = bo->tiling_flags;
377 378 379 380
	}
	return 0;
}

381
int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
382 383
			     struct vm_area_struct *vma)
{
384
	return ttm_fbdev_mmap(vma, &bo->tbo);
385 386
}

387
int radeon_bo_get_surface_reg(struct radeon_bo *bo)
388
{
389
	struct radeon_device *rdev = bo->rdev;
390
	struct radeon_surface_reg *reg;
391
	struct radeon_bo *old_object;
392 393 394
	int steal;
	int i;

395
	BUG_ON(!radeon_bo_is_reserved(bo));
396 397

	if (!bo->tiling_flags)
398 399
		return 0;

400 401 402
	if (bo->surface_reg >= 0) {
		reg = &rdev->surface_regs[bo->surface_reg];
		i = bo->surface_reg;
403 404 405 406 407 408 409
		goto out;
	}

	steal = -1;
	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {

		reg = &rdev->surface_regs[i];
410
		if (!reg->bo)
411 412
			break;

413
		old_object = reg->bo;
414 415 416 417 418 419 420 421 422 423
		if (old_object->pin_count == 0)
			steal = i;
	}

	/* if we are all out */
	if (i == RADEON_GEM_MAX_SURFACES) {
		if (steal == -1)
			return -ENOMEM;
		/* find someone with a surface reg and nuke their BO */
		reg = &rdev->surface_regs[steal];
424
		old_object = reg->bo;
425 426
		/* blow away the mapping */
		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
427
		ttm_bo_unmap_virtual(&old_object->tbo);
428 429 430 431
		old_object->surface_reg = -1;
		i = steal;
	}

432 433
	bo->surface_reg = i;
	reg->bo = bo;
434 435

out:
436
	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
437
			       bo->tbo.mem.start << PAGE_SHIFT,
438
			       bo->tbo.num_pages << PAGE_SHIFT);
439 440 441
	return 0;
}

442
static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
443
{
444
	struct radeon_device *rdev = bo->rdev;
445 446
	struct radeon_surface_reg *reg;

447
	if (bo->surface_reg == -1)
448 449
		return;

450 451
	reg = &rdev->surface_regs[bo->surface_reg];
	radeon_clear_surface_reg(rdev, bo->surface_reg);
452

453 454
	reg->bo = NULL;
	bo->surface_reg = -1;
455 456
}

457 458
int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
				uint32_t tiling_flags, uint32_t pitch)
459
{
460
	struct radeon_device *rdev = bo->rdev;
461 462
	int r;

463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507
	if (rdev->family >= CHIP_CEDAR) {
		unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;

		bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
		bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
		mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
		tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
		stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
		switch (bankw) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		switch (bankh) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		switch (mtaspect) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		if (tilesplit > 6) {
			return -EINVAL;
		}
		if (stilesplit > 6) {
			return -EINVAL;
		}
	}
508 509 510 511 512 513 514
	r = radeon_bo_reserve(bo, false);
	if (unlikely(r != 0))
		return r;
	bo->tiling_flags = tiling_flags;
	bo->pitch = pitch;
	radeon_bo_unreserve(bo);
	return 0;
515 516
}

517 518 519
void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
				uint32_t *tiling_flags,
				uint32_t *pitch)
520
{
521
	BUG_ON(!radeon_bo_is_reserved(bo));
522
	if (tiling_flags)
523
		*tiling_flags = bo->tiling_flags;
524
	if (pitch)
525
		*pitch = bo->pitch;
526 527
}

528 529
int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
				bool force_drop)
530
{
531
	BUG_ON(!radeon_bo_is_reserved(bo) && !force_drop);
532 533

	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
534 535 536
		return 0;

	if (force_drop) {
537
		radeon_bo_clear_surface_reg(bo);
538 539 540
		return 0;
	}

541
	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
542 543 544
		if (!has_moved)
			return 0;

545 546
		if (bo->surface_reg >= 0)
			radeon_bo_clear_surface_reg(bo);
547 548 549
		return 0;
	}

550
	if ((bo->surface_reg >= 0) && !has_moved)
551 552
		return 0;

553
	return radeon_bo_get_surface_reg(bo);
554 555 556
}

void radeon_bo_move_notify(struct ttm_buffer_object *bo,
557
			   struct ttm_mem_reg *mem)
558
{
559 560 561 562
	struct radeon_bo *rbo;
	if (!radeon_ttm_bo_is_radeon_bo(bo))
		return;
	rbo = container_of(bo, struct radeon_bo, tbo);
563
	radeon_bo_check_tiling(rbo, 0, 1);
564
	radeon_vm_bo_invalidate(rbo->rdev, rbo);
565 566
}

567
int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
568
{
569
	struct radeon_device *rdev;
570
	struct radeon_bo *rbo;
571 572 573
	unsigned long offset, size;
	int r;

574
	if (!radeon_ttm_bo_is_radeon_bo(bo))
575
		return 0;
576
	rbo = container_of(bo, struct radeon_bo, tbo);
577
	radeon_bo_check_tiling(rbo, 0, 0);
578 579 580
	rdev = rbo->rdev;
	if (bo->mem.mem_type == TTM_PL_VRAM) {
		size = bo->mem.num_pages << PAGE_SHIFT;
581
		offset = bo->mem.start << PAGE_SHIFT;
582 583 584 585
		if ((offset + size) > rdev->mc.visible_vram_size) {
			/* hurrah the memory is not visible ! */
			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
			rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
586
			r = ttm_bo_validate(bo, &rbo->placement, false, false);
587 588
			if (unlikely(r != 0))
				return r;
589
			offset = bo->mem.start << PAGE_SHIFT;
590 591 592 593 594 595
			/* this should not happen */
			if ((offset + size) > rdev->mc.visible_vram_size)
				return -EINVAL;
		}
	}
	return 0;
596
}
597

598
int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
599 600 601 602 603 604 605 606 607 608
{
	int r;

	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
	if (unlikely(r != 0))
		return r;
	spin_lock(&bo->tbo.bdev->fence_lock);
	if (mem_type)
		*mem_type = bo->tbo.mem.mem_type;
	if (bo->tbo.sync_obj)
609
		r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
610 611 612 613 614 615 616 617 618
	spin_unlock(&bo->tbo.bdev->fence_lock);
	ttm_bo_unreserve(&bo->tbo);
	return r;
}


/**
 * radeon_bo_reserve - reserve bo
 * @bo:		bo structure
619
 * @no_intr:	don't return -ERESTARTSYS on pending signal
620 621 622 623 624
 *
 * Returns:
 * -ERESTARTSYS: A wait for the buffer to become unreserved was interrupted by
 * a signal. Release all buffer reservations and return to user-space.
 */
625
int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr)
626 627 628
{
	int r;

629
	r = ttm_bo_reserve(&bo->tbo, !no_intr, false, false, 0);
630 631 632 633 634 635 636
	if (unlikely(r != 0)) {
		if (r != -ERESTARTSYS)
			dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
		return r;
	}
	return 0;
}