radeon_object.c 15.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Copyright 2009 Jerome Glisse.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 */
/*
 * Authors:
 *    Jerome Glisse <glisse@freedesktop.org>
 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
 *    Dave Airlie
 */
#include <linux/list.h>
33
#include <linux/slab.h>
34
#include <drm/drmP.h>
35
#include <drm/radeon_drm.h>
36
#include "radeon.h"
37
#include "radeon_trace.h"
38 39 40 41


int radeon_ttm_init(struct radeon_device *rdev);
void radeon_ttm_fini(struct radeon_device *rdev);
42
static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
43 44 45 46 47 48

/*
 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
 * function are calling it.
 */

49
static void radeon_bo_clear_va(struct radeon_bo *bo)
50 51 52 53 54
{
	struct radeon_bo_va *bo_va, *tmp;

	list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
		/* remove from all vm address space */
55
		radeon_vm_bo_rmv(bo->rdev, bo_va);
56 57 58
	}
}

59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
static void radeon_update_memory_usage(struct radeon_bo *bo,
				       unsigned mem_type, int sign)
{
	struct radeon_device *rdev = bo->rdev;
	u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;

	switch (mem_type) {
	case TTM_PL_TT:
		if (sign > 0)
			atomic64_add(size, &rdev->gtt_usage);
		else
			atomic64_sub(size, &rdev->gtt_usage);
		break;
	case TTM_PL_VRAM:
		if (sign > 0)
			atomic64_add(size, &rdev->vram_usage);
		else
			atomic64_sub(size, &rdev->vram_usage);
		break;
	}
}

81
static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
82
{
83
	struct radeon_bo *bo;
84

85
	bo = container_of(tbo, struct radeon_bo, tbo);
86 87 88

	radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);

89 90 91 92
	mutex_lock(&bo->rdev->gem.mutex);
	list_del_init(&bo->list);
	mutex_unlock(&bo->rdev->gem.mutex);
	radeon_bo_clear_surface_reg(bo);
93
	radeon_bo_clear_va(bo);
94
	drm_gem_object_release(&bo->gem_base);
95
	kfree(bo);
96 97
}

98 99 100 101 102 103 104
bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
{
	if (bo->destroy == &radeon_ttm_bo_destroy)
		return true;
	return false;
}

105 106 107 108 109
void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
{
	u32 c = 0;

	rbo->placement.fpfn = 0;
110
	rbo->placement.lpfn = 0;
111
	rbo->placement.placement = rbo->placements;
112
	rbo->placement.busy_placement = rbo->placements;
113 114 115
	if (domain & RADEON_GEM_DOMAIN_VRAM)
		rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
					TTM_PL_FLAG_VRAM;
116 117 118 119 120 121 122 123 124
	if (domain & RADEON_GEM_DOMAIN_GTT) {
		if (rbo->rdev->flags & RADEON_IS_AGP) {
			rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_TT;
		} else {
			rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
		}
	}
	if (domain & RADEON_GEM_DOMAIN_CPU) {
		if (rbo->rdev->flags & RADEON_IS_AGP) {
125
			rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_SYSTEM;
126
		} else {
127
			rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
128 129
		}
	}
130 131
	if (!c)
		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
132 133 134 135
	rbo->placement.num_placement = c;
	rbo->placement.num_busy_placement = c;
}

136
int radeon_bo_create(struct radeon_device *rdev,
137
		     unsigned long size, int byte_align, bool kernel, u32 domain,
138
		     struct sg_table *sg, struct radeon_bo **bo_ptr)
139
{
140
	struct radeon_bo *bo;
141
	enum ttm_bo_type type;
142
	unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
143
	size_t acc_size;
144 145
	int r;

146 147
	size = ALIGN(size, PAGE_SIZE);

148
	rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
149 150
	if (kernel) {
		type = ttm_bo_type_kernel;
151 152
	} else if (sg) {
		type = ttm_bo_type_sg;
153 154 155
	} else {
		type = ttm_bo_type_device;
	}
156
	*bo_ptr = NULL;
157

158 159 160
	acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
				       sizeof(struct radeon_bo));

161 162
	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
	if (bo == NULL)
163
		return -ENOMEM;
164 165 166 167 168
	r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
	if (unlikely(r)) {
		kfree(bo);
		return r;
	}
169 170 171
	bo->rdev = rdev;
	bo->surface_reg = -1;
	INIT_LIST_HEAD(&bo->list);
172
	INIT_LIST_HEAD(&bo->va);
173 174 175
	bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
	                               RADEON_GEM_DOMAIN_GTT |
	                               RADEON_GEM_DOMAIN_CPU);
176
	radeon_ttm_placement_from_domain(bo, domain);
177
	/* Kernel allocation are uninterruptible */
178
	down_read(&rdev->pm.mclk_lock);
179
	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
180
			&bo->placement, page_align, !kernel, NULL,
181
			acc_size, sg, &radeon_ttm_bo_destroy);
182
	up_read(&rdev->pm.mclk_lock);
183 184 185
	if (unlikely(r != 0)) {
		return r;
	}
186
	*bo_ptr = bo;
187

188
	trace_radeon_bo_create(bo);
189

190 191 192
	return 0;
}

193
int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
194
{
195
	bool is_iomem;
196 197
	int r;

198
	if (bo->kptr) {
199
		if (ptr) {
200
			*ptr = bo->kptr;
201 202 203
		}
		return 0;
	}
204
	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
205 206 207
	if (r) {
		return r;
	}
208
	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
209
	if (ptr) {
210
		*ptr = bo->kptr;
211
	}
212
	radeon_bo_check_tiling(bo, 0, 0);
213 214 215
	return 0;
}

216
void radeon_bo_kunmap(struct radeon_bo *bo)
217
{
218
	if (bo->kptr == NULL)
219
		return;
220 221 222
	bo->kptr = NULL;
	radeon_bo_check_tiling(bo, 0, 0);
	ttm_bo_kunmap(&bo->kmap);
223 224
}

225
void radeon_bo_unref(struct radeon_bo **bo)
226
{
227
	struct ttm_buffer_object *tbo;
228
	struct radeon_device *rdev;
229

230
	if ((*bo) == NULL)
231
		return;
232
	rdev = (*bo)->rdev;
233
	tbo = &((*bo)->tbo);
234
	down_read(&rdev->pm.mclk_lock);
235
	ttm_bo_unref(&tbo);
236
	up_read(&rdev->pm.mclk_lock);
237 238
	if (tbo == NULL)
		*bo = NULL;
239 240
}

241 242
int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
			     u64 *gpu_addr)
243
{
244
	int r, i;
245

246 247 248 249
	if (bo->pin_count) {
		bo->pin_count++;
		if (gpu_addr)
			*gpu_addr = radeon_bo_gpu_offset(bo);
250 251 252 253 254 255 256 257

		if (max_offset != 0) {
			u64 domain_start;

			if (domain == RADEON_GEM_DOMAIN_VRAM)
				domain_start = bo->rdev->mc.vram_start;
			else
				domain_start = bo->rdev->mc.gtt_start;
258 259
			WARN_ON_ONCE(max_offset <
				     (radeon_bo_gpu_offset(bo) - domain_start));
260 261
		}

262 263
		return 0;
	}
264
	radeon_ttm_placement_from_domain(bo, domain);
265 266 267 268
	if (domain == RADEON_GEM_DOMAIN_VRAM) {
		/* force to pin into visible video ram */
		bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
	}
269 270 271 272 273 274 275 276 277
	if (max_offset) {
		u64 lpfn = max_offset >> PAGE_SHIFT;

		if (!bo->placement.lpfn)
			bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT;

		if (lpfn < bo->placement.lpfn)
			bo->placement.lpfn = lpfn;
	}
278 279
	for (i = 0; i < bo->placement.num_placement; i++)
		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
280
	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
281 282 283 284
	if (likely(r == 0)) {
		bo->pin_count = 1;
		if (gpu_addr != NULL)
			*gpu_addr = radeon_bo_gpu_offset(bo);
285
	}
286
	if (unlikely(r != 0))
287
		dev_err(bo->rdev->dev, "%p pin failed\n", bo);
288 289
	return r;
}
290 291 292 293 294

int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
{
	return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
}
295

296
int radeon_bo_unpin(struct radeon_bo *bo)
297
{
298
	int r, i;
299

300 301 302
	if (!bo->pin_count) {
		dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
		return 0;
303
	}
304 305 306
	bo->pin_count--;
	if (bo->pin_count)
		return 0;
307 308
	for (i = 0; i < bo->placement.num_placement; i++)
		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
309
	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
310
	if (unlikely(r != 0))
311
		dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
312
	return r;
313 314
}

315
int radeon_bo_evict_vram(struct radeon_device *rdev)
316
{
317 318
	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
	if (0 && (rdev->flags & RADEON_IS_IGP)) {
319 320 321
		if (rdev->mc.igp_sideport_enabled == false)
			/* Useless to evict on IGP chips */
			return 0;
322 323 324 325
	}
	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
}

326
void radeon_bo_force_delete(struct radeon_device *rdev)
327
{
328
	struct radeon_bo *bo, *n;
329 330 331 332

	if (list_empty(&rdev->gem.objects)) {
		return;
	}
333 334
	dev_err(rdev->dev, "Userspace still has active objects !\n");
	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
335
		mutex_lock(&rdev->ddev->struct_mutex);
336
		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
337 338
			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
			*((unsigned long *)&bo->gem_base.refcount));
339 340 341
		mutex_lock(&bo->rdev->gem.mutex);
		list_del_init(&bo->list);
		mutex_unlock(&bo->rdev->gem.mutex);
342
		/* this should unref the ttm bo */
343
		drm_gem_object_unreference(&bo->gem_base);
344 345 346 347
		mutex_unlock(&rdev->ddev->struct_mutex);
	}
}

348
int radeon_bo_init(struct radeon_device *rdev)
349
{
350
	/* Add an MTRR for the VRAM */
351
	if (!rdev->fastfb_working) {
352 353
		rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
						      rdev->mc.aper_size);
354
	}
355 356 357 358 359
	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
		rdev->mc.mc_vram_size >> 20,
		(unsigned long long)rdev->mc.aper_size >> 20);
	DRM_INFO("RAM width %dbits %cDR\n",
			rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
360 361 362
	return radeon_ttm_init(rdev);
}

363
void radeon_bo_fini(struct radeon_device *rdev)
364 365
{
	radeon_ttm_fini(rdev);
366
	arch_phys_wc_del(rdev->mc.vram_mtrr);
367 368
}

369 370
int radeon_bo_list_validate(struct ww_acquire_ctx *ticket,
			    struct list_head *head, int ring)
371
{
372 373
	struct radeon_bo_list *lobj;
	struct radeon_bo *bo;
374
	u32 domain;
375 376
	int r;

377
	r = ttm_eu_reserve_buffers(ticket, head);
378 379 380
	if (unlikely(r != 0)) {
		return r;
	}
381
	list_for_each_entry(lobj, head, tv.head) {
382 383
		bo = lobj->bo;
		if (!bo->pin_count) {
384
			domain = lobj->domain;
385 386 387
			
		retry:
			radeon_ttm_placement_from_domain(bo, domain);
C
Christian König 已提交
388 389
			if (ring == R600_RING_TYPE_UVD_INDEX)
				radeon_uvd_force_into_uvd_segment(bo);
390
			r = ttm_bo_validate(&bo->tbo, &bo->placement,
391
						true, false);
392
			if (unlikely(r)) {
393 394
				if (r != -ERESTARTSYS && domain != lobj->alt_domain) {
					domain = lobj->alt_domain;
395 396
					goto retry;
				}
397
				ttm_eu_backoff_reservation(ticket, head);
398
				return r;
399
			}
400
		}
401 402
		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
		lobj->tiling_flags = bo->tiling_flags;
403 404 405 406
	}
	return 0;
}

407
int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
408 409
			     struct vm_area_struct *vma)
{
410
	return ttm_fbdev_mmap(vma, &bo->tbo);
411 412
}

413
int radeon_bo_get_surface_reg(struct radeon_bo *bo)
414
{
415
	struct radeon_device *rdev = bo->rdev;
416
	struct radeon_surface_reg *reg;
417
	struct radeon_bo *old_object;
418 419 420
	int steal;
	int i;

421
	lockdep_assert_held(&bo->tbo.resv->lock.base);
422 423

	if (!bo->tiling_flags)
424 425
		return 0;

426 427 428
	if (bo->surface_reg >= 0) {
		reg = &rdev->surface_regs[bo->surface_reg];
		i = bo->surface_reg;
429 430 431 432 433 434 435
		goto out;
	}

	steal = -1;
	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {

		reg = &rdev->surface_regs[i];
436
		if (!reg->bo)
437 438
			break;

439
		old_object = reg->bo;
440 441 442 443 444 445 446 447 448 449
		if (old_object->pin_count == 0)
			steal = i;
	}

	/* if we are all out */
	if (i == RADEON_GEM_MAX_SURFACES) {
		if (steal == -1)
			return -ENOMEM;
		/* find someone with a surface reg and nuke their BO */
		reg = &rdev->surface_regs[steal];
450
		old_object = reg->bo;
451 452
		/* blow away the mapping */
		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
453
		ttm_bo_unmap_virtual(&old_object->tbo);
454 455 456 457
		old_object->surface_reg = -1;
		i = steal;
	}

458 459
	bo->surface_reg = i;
	reg->bo = bo;
460 461

out:
462
	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
463
			       bo->tbo.mem.start << PAGE_SHIFT,
464
			       bo->tbo.num_pages << PAGE_SHIFT);
465 466 467
	return 0;
}

468
static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
469
{
470
	struct radeon_device *rdev = bo->rdev;
471 472
	struct radeon_surface_reg *reg;

473
	if (bo->surface_reg == -1)
474 475
		return;

476 477
	reg = &rdev->surface_regs[bo->surface_reg];
	radeon_clear_surface_reg(rdev, bo->surface_reg);
478

479 480
	reg->bo = NULL;
	bo->surface_reg = -1;
481 482
}

483 484
int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
				uint32_t tiling_flags, uint32_t pitch)
485
{
486
	struct radeon_device *rdev = bo->rdev;
487 488
	int r;

489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533
	if (rdev->family >= CHIP_CEDAR) {
		unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;

		bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
		bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
		mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
		tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
		stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
		switch (bankw) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		switch (bankh) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		switch (mtaspect) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		if (tilesplit > 6) {
			return -EINVAL;
		}
		if (stilesplit > 6) {
			return -EINVAL;
		}
	}
534 535 536 537 538 539 540
	r = radeon_bo_reserve(bo, false);
	if (unlikely(r != 0))
		return r;
	bo->tiling_flags = tiling_flags;
	bo->pitch = pitch;
	radeon_bo_unreserve(bo);
	return 0;
541 542
}

543 544 545
void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
				uint32_t *tiling_flags,
				uint32_t *pitch)
546
{
547 548
	lockdep_assert_held(&bo->tbo.resv->lock.base);

549
	if (tiling_flags)
550
		*tiling_flags = bo->tiling_flags;
551
	if (pitch)
552
		*pitch = bo->pitch;
553 554
}

555 556
int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
				bool force_drop)
557
{
558 559
	if (!force_drop)
		lockdep_assert_held(&bo->tbo.resv->lock.base);
560 561

	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
562 563 564
		return 0;

	if (force_drop) {
565
		radeon_bo_clear_surface_reg(bo);
566 567 568
		return 0;
	}

569
	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
570 571 572
		if (!has_moved)
			return 0;

573 574
		if (bo->surface_reg >= 0)
			radeon_bo_clear_surface_reg(bo);
575 576 577
		return 0;
	}

578
	if ((bo->surface_reg >= 0) && !has_moved)
579 580
		return 0;

581
	return radeon_bo_get_surface_reg(bo);
582 583 584
}

void radeon_bo_move_notify(struct ttm_buffer_object *bo,
585
			   struct ttm_mem_reg *new_mem)
586
{
587
	struct radeon_bo *rbo;
588

589 590
	if (!radeon_ttm_bo_is_radeon_bo(bo))
		return;
591

592
	rbo = container_of(bo, struct radeon_bo, tbo);
593
	radeon_bo_check_tiling(rbo, 0, 1);
594
	radeon_vm_bo_invalidate(rbo->rdev, rbo);
595 596 597 598 599 600 601

	/* update statistics */
	if (!new_mem)
		return;

	radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
	radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
602 603
}

604
int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
605
{
606
	struct radeon_device *rdev;
607
	struct radeon_bo *rbo;
608 609 610
	unsigned long offset, size;
	int r;

611
	if (!radeon_ttm_bo_is_radeon_bo(bo))
612
		return 0;
613
	rbo = container_of(bo, struct radeon_bo, tbo);
614
	radeon_bo_check_tiling(rbo, 0, 0);
615 616 617
	rdev = rbo->rdev;
	if (bo->mem.mem_type == TTM_PL_VRAM) {
		size = bo->mem.num_pages << PAGE_SHIFT;
618
		offset = bo->mem.start << PAGE_SHIFT;
619 620 621 622
		if ((offset + size) > rdev->mc.visible_vram_size) {
			/* hurrah the memory is not visible ! */
			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
			rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
623
			r = ttm_bo_validate(bo, &rbo->placement, false, false);
624 625
			if (unlikely(r != 0))
				return r;
626
			offset = bo->mem.start << PAGE_SHIFT;
627 628 629 630 631 632
			/* this should not happen */
			if ((offset + size) > rdev->mc.visible_vram_size)
				return -EINVAL;
		}
	}
	return 0;
633
}
634

635
int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
636 637 638 639 640 641 642 643 644 645
{
	int r;

	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
	if (unlikely(r != 0))
		return r;
	spin_lock(&bo->tbo.bdev->fence_lock);
	if (mem_type)
		*mem_type = bo->tbo.mem.mem_type;
	if (bo->tbo.sync_obj)
646
		r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
647 648 649 650
	spin_unlock(&bo->tbo.bdev->fence_lock);
	ttm_bo_unreserve(&bo->tbo);
	return r;
}