radeon_object.c 20.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Copyright 2009 Jerome Glisse.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 */
/*
 * Authors:
 *    Jerome Glisse <glisse@freedesktop.org>
 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
 *    Dave Airlie
 */
#include <linux/list.h>
33
#include <linux/slab.h>
34
#include <drm/drmP.h>
35
#include <drm/radeon_drm.h>
36
#include "radeon.h"
37
#include "radeon_trace.h"
38 39 40 41


int radeon_ttm_init(struct radeon_device *rdev);
void radeon_ttm_fini(struct radeon_device *rdev);
42
static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
43 44 45 46 47 48

/*
 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
 * function are calling it.
 */

49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
static void radeon_update_memory_usage(struct radeon_bo *bo,
				       unsigned mem_type, int sign)
{
	struct radeon_device *rdev = bo->rdev;
	u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;

	switch (mem_type) {
	case TTM_PL_TT:
		if (sign > 0)
			atomic64_add(size, &rdev->gtt_usage);
		else
			atomic64_sub(size, &rdev->gtt_usage);
		break;
	case TTM_PL_VRAM:
		if (sign > 0)
			atomic64_add(size, &rdev->vram_usage);
		else
			atomic64_sub(size, &rdev->vram_usage);
		break;
	}
}

71
static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
72
{
73
	struct radeon_bo *bo;
74

75
	bo = container_of(tbo, struct radeon_bo, tbo);
76 77

	radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
78
	radeon_mn_unregister(bo);
79

80 81 82 83
	mutex_lock(&bo->rdev->gem.mutex);
	list_del_init(&bo->list);
	mutex_unlock(&bo->rdev->gem.mutex);
	radeon_bo_clear_surface_reg(bo);
84
	WARN_ON(!list_empty(&bo->va));
85
	drm_gem_object_release(&bo->gem_base);
86
	kfree(bo);
87 88
}

89 90 91 92 93 94 95
bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
{
	if (bo->destroy == &radeon_ttm_bo_destroy)
		return true;
	return false;
}

96 97
void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
{
98
	u32 c = 0, i;
99 100

	rbo->placement.placement = rbo->placements;
101
	rbo->placement.busy_placement = rbo->placements;
102
	if (domain & RADEON_GEM_DOMAIN_VRAM)
103 104 105 106
		rbo->placements[c++].flags = TTM_PL_FLAG_WC |
					     TTM_PL_FLAG_UNCACHED |
					     TTM_PL_FLAG_VRAM;

107
	if (domain & RADEON_GEM_DOMAIN_GTT) {
108
		if (rbo->flags & RADEON_GEM_GTT_UC) {
109 110 111
			rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
				TTM_PL_FLAG_TT;

112 113
		} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
			   (rbo->rdev->flags & RADEON_IS_AGP)) {
114 115
			rbo->placements[c++].flags = TTM_PL_FLAG_WC |
				TTM_PL_FLAG_UNCACHED |
116
				TTM_PL_FLAG_TT;
117
		} else {
118 119
			rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
						     TTM_PL_FLAG_TT;
120 121
		}
	}
122

123
	if (domain & RADEON_GEM_DOMAIN_CPU) {
124
		if (rbo->flags & RADEON_GEM_GTT_UC) {
125 126 127
			rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
				TTM_PL_FLAG_SYSTEM;

128 129
		} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
		    rbo->rdev->flags & RADEON_IS_AGP) {
130 131
			rbo->placements[c++].flags = TTM_PL_FLAG_WC |
				TTM_PL_FLAG_UNCACHED |
132
				TTM_PL_FLAG_SYSTEM;
133
		} else {
134 135
			rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
						     TTM_PL_FLAG_SYSTEM;
136 137
		}
	}
138
	if (!c)
139 140 141
		rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
					     TTM_PL_FLAG_SYSTEM;

142 143
	rbo->placement.num_placement = c;
	rbo->placement.num_busy_placement = c;
144

145 146
	for (i = 0; i < c; ++i) {
		rbo->placements[i].fpfn = 0;
147 148 149 150 151 152
		if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
		    (rbo->placements[i].flags & TTM_PL_FLAG_VRAM))
			rbo->placements[i].lpfn =
				rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
		else
			rbo->placements[i].lpfn = 0;
153 154
	}

155 156 157 158 159
	/*
	 * Use two-ended allocation depending on the buffer size to
	 * improve fragmentation quality.
	 * 512kb was measured as the most optimal number.
	 */
160 161 162
	if (!((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
	      (rbo->placements[i].flags & TTM_PL_FLAG_VRAM)) &&
	    rbo->tbo.mem.size > 512 * 1024) {
163
		for (i = 0; i < c; i++) {
164
			rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
165 166
		}
	}
167 168
}

169
int radeon_bo_create(struct radeon_device *rdev,
170 171 172 173
		     unsigned long size, int byte_align, bool kernel,
		     u32 domain, u32 flags, struct sg_table *sg,
		     struct reservation_object *resv,
		     struct radeon_bo **bo_ptr)
174
{
175
	struct radeon_bo *bo;
176
	enum ttm_bo_type type;
177
	unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
178
	size_t acc_size;
179 180
	int r;

181 182
	size = ALIGN(size, PAGE_SIZE);

183 184
	if (kernel) {
		type = ttm_bo_type_kernel;
185 186
	} else if (sg) {
		type = ttm_bo_type_sg;
187 188 189
	} else {
		type = ttm_bo_type_device;
	}
190
	*bo_ptr = NULL;
191

192 193 194
	acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
				       sizeof(struct radeon_bo));

195 196
	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
	if (bo == NULL)
197
		return -ENOMEM;
198 199 200 201 202
	r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
	if (unlikely(r)) {
		kfree(bo);
		return r;
	}
203 204 205
	bo->rdev = rdev;
	bo->surface_reg = -1;
	INIT_LIST_HEAD(&bo->list);
206
	INIT_LIST_HEAD(&bo->va);
207 208 209
	bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
	                               RADEON_GEM_DOMAIN_GTT |
	                               RADEON_GEM_DOMAIN_CPU);
210 211 212 213 214 215

	bo->flags = flags;
	/* PCI GART is always snooped */
	if (!(rdev->flags & RADEON_IS_PCIE))
		bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);

216
	radeon_ttm_placement_from_domain(bo, domain);
217
	/* Kernel allocation are uninterruptible */
218
	down_read(&rdev->pm.mclk_lock);
219
	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
220
			&bo->placement, page_align, !kernel, NULL,
221
			acc_size, sg, resv, &radeon_ttm_bo_destroy);
222
	up_read(&rdev->pm.mclk_lock);
223 224 225
	if (unlikely(r != 0)) {
		return r;
	}
226
	*bo_ptr = bo;
227

228
	trace_radeon_bo_create(bo);
229

230 231 232
	return 0;
}

233
int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
234
{
235
	bool is_iomem;
236 237
	int r;

238
	if (bo->kptr) {
239
		if (ptr) {
240
			*ptr = bo->kptr;
241 242 243
		}
		return 0;
	}
244
	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
245 246 247
	if (r) {
		return r;
	}
248
	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
249
	if (ptr) {
250
		*ptr = bo->kptr;
251
	}
252
	radeon_bo_check_tiling(bo, 0, 0);
253 254 255
	return 0;
}

256
void radeon_bo_kunmap(struct radeon_bo *bo)
257
{
258
	if (bo->kptr == NULL)
259
		return;
260 261 262
	bo->kptr = NULL;
	radeon_bo_check_tiling(bo, 0, 0);
	ttm_bo_kunmap(&bo->kmap);
263 264
}

265 266 267 268 269 270 271 272 273
struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
{
	if (bo == NULL)
		return NULL;

	ttm_bo_reference(&bo->tbo);
	return bo;
}

274
void radeon_bo_unref(struct radeon_bo **bo)
275
{
276
	struct ttm_buffer_object *tbo;
277
	struct radeon_device *rdev;
278

279
	if ((*bo) == NULL)
280
		return;
281
	rdev = (*bo)->rdev;
282 283 284 285
	tbo = &((*bo)->tbo);
	ttm_bo_unref(&tbo);
	if (tbo == NULL)
		*bo = NULL;
286 287
}

288 289
int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
			     u64 *gpu_addr)
290
{
291
	int r, i;
292

293 294 295
	if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
		return -EPERM;

296 297 298 299
	if (bo->pin_count) {
		bo->pin_count++;
		if (gpu_addr)
			*gpu_addr = radeon_bo_gpu_offset(bo);
300 301 302 303 304 305 306 307

		if (max_offset != 0) {
			u64 domain_start;

			if (domain == RADEON_GEM_DOMAIN_VRAM)
				domain_start = bo->rdev->mc.vram_start;
			else
				domain_start = bo->rdev->mc.gtt_start;
308 309
			WARN_ON_ONCE(max_offset <
				     (radeon_bo_gpu_offset(bo) - domain_start));
310 311
		}

312 313
		return 0;
	}
314
	radeon_ttm_placement_from_domain(bo, domain);
315
	for (i = 0; i < bo->placement.num_placement; i++) {
316
		/* force to pin into visible video ram */
317
		if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
318
		    !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
319 320 321
		    (!max_offset || max_offset > bo->rdev->mc.visible_vram_size))
			bo->placements[i].lpfn =
				bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
322
		else
323
			bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
324

325
		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
326
	}
327

328
	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
329 330 331 332
	if (likely(r == 0)) {
		bo->pin_count = 1;
		if (gpu_addr != NULL)
			*gpu_addr = radeon_bo_gpu_offset(bo);
333 334 335 336 337
		if (domain == RADEON_GEM_DOMAIN_VRAM)
			bo->rdev->vram_pin_size += radeon_bo_size(bo);
		else
			bo->rdev->gart_pin_size += radeon_bo_size(bo);
	} else {
338
		dev_err(bo->rdev->dev, "%p pin failed\n", bo);
339
	}
340 341
	return r;
}
342 343 344 345 346

int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
{
	return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
}
347

348
int radeon_bo_unpin(struct radeon_bo *bo)
349
{
350
	int r, i;
351

352 353 354
	if (!bo->pin_count) {
		dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
		return 0;
355
	}
356 357 358
	bo->pin_count--;
	if (bo->pin_count)
		return 0;
359 360 361 362
	for (i = 0; i < bo->placement.num_placement; i++) {
		bo->placements[i].lpfn = 0;
		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
	}
363
	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
364 365 366 367 368 369
	if (likely(r == 0)) {
		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
			bo->rdev->vram_pin_size -= radeon_bo_size(bo);
		else
			bo->rdev->gart_pin_size -= radeon_bo_size(bo);
	} else {
370
		dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
371
	}
372
	return r;
373 374
}

375
int radeon_bo_evict_vram(struct radeon_device *rdev)
376
{
377 378
	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
	if (0 && (rdev->flags & RADEON_IS_IGP)) {
379 380 381
		if (rdev->mc.igp_sideport_enabled == false)
			/* Useless to evict on IGP chips */
			return 0;
382 383 384 385
	}
	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
}

386
void radeon_bo_force_delete(struct radeon_device *rdev)
387
{
388
	struct radeon_bo *bo, *n;
389 390 391 392

	if (list_empty(&rdev->gem.objects)) {
		return;
	}
393 394
	dev_err(rdev->dev, "Userspace still has active objects !\n");
	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
395
		mutex_lock(&rdev->ddev->struct_mutex);
396
		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
397 398
			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
			*((unsigned long *)&bo->gem_base.refcount));
399 400 401
		mutex_lock(&bo->rdev->gem.mutex);
		list_del_init(&bo->list);
		mutex_unlock(&bo->rdev->gem.mutex);
402
		/* this should unref the ttm bo */
403
		drm_gem_object_unreference(&bo->gem_base);
404 405 406 407
		mutex_unlock(&rdev->ddev->struct_mutex);
	}
}

408
int radeon_bo_init(struct radeon_device *rdev)
409
{
410
	/* Add an MTRR for the VRAM */
411
	if (!rdev->fastfb_working) {
412 413
		rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
						      rdev->mc.aper_size);
414
	}
415 416 417 418 419
	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
		rdev->mc.mc_vram_size >> 20,
		(unsigned long long)rdev->mc.aper_size >> 20);
	DRM_INFO("RAM width %dbits %cDR\n",
			rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
420 421 422
	return radeon_ttm_init(rdev);
}

423
void radeon_bo_fini(struct radeon_device *rdev)
424 425
{
	radeon_ttm_fini(rdev);
426
	arch_phys_wc_del(rdev->mc.vram_mtrr);
427 428
}

429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
/* Returns how many bytes TTM can move per IB.
 */
static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
{
	u64 real_vram_size = rdev->mc.real_vram_size;
	u64 vram_usage = atomic64_read(&rdev->vram_usage);

	/* This function is based on the current VRAM usage.
	 *
	 * - If all of VRAM is free, allow relocating the number of bytes that
	 *   is equal to 1/4 of the size of VRAM for this IB.

	 * - If more than one half of VRAM is occupied, only allow relocating
	 *   1 MB of data for this IB.
	 *
	 * - From 0 to one half of used VRAM, the threshold decreases
	 *   linearly.
	 *         __________________
	 * 1/4 of -|\               |
	 * VRAM    | \              |
	 *         |  \             |
	 *         |   \            |
	 *         |    \           |
	 *         |     \          |
	 *         |      \         |
	 *         |       \________|1 MB
	 *         |----------------|
	 *    VRAM 0 %             100 %
	 *         used            used
	 *
	 * Note: It's a threshold, not a limit. The threshold must be crossed
	 * for buffer relocations to stop, so any buffer of an arbitrary size
	 * can be moved as long as the threshold isn't crossed before
	 * the relocation takes place. We don't want to disable buffer
	 * relocations completely.
	 *
	 * The idea is that buffers should be placed in VRAM at creation time
	 * and TTM should only do a minimum number of relocations during
	 * command submission. In practice, you need to submit at least
	 * a dozen IBs to move all buffers to VRAM if they are in GTT.
	 *
	 * Also, things can get pretty crazy under memory pressure and actual
	 * VRAM usage can change a lot, so playing safe even at 50% does
	 * consistently increase performance.
	 */

	u64 half_vram = real_vram_size >> 1;
	u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
	u64 bytes_moved_threshold = half_free_vram >> 1;
	return max(bytes_moved_threshold, 1024*1024ull);
}

int radeon_bo_list_validate(struct radeon_device *rdev,
			    struct ww_acquire_ctx *ticket,
483
			    struct list_head *head, int ring)
484
{
485
	struct radeon_cs_reloc *lobj;
486
	struct radeon_bo *bo;
487
	int r;
488 489
	u64 bytes_moved = 0, initial_bytes_moved;
	u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
490

491
	r = ttm_eu_reserve_buffers(ticket, head, true);
492 493 494
	if (unlikely(r != 0)) {
		return r;
	}
495

496
	list_for_each_entry(lobj, head, tv.head) {
497
		bo = lobj->robj;
498
		if (!bo->pin_count) {
499
			u32 domain = lobj->prefered_domains;
500
			u32 allowed = lobj->allowed_domains;
501 502 503 504 505 506 507 508 509 510 511
			u32 current_domain =
				radeon_mem_type_to_domain(bo->tbo.mem.mem_type);

			/* Check if this buffer will be moved and don't move it
			 * if we have moved too many buffers for this IB already.
			 *
			 * Note that this allows moving at least one buffer of
			 * any size, because it doesn't take the current "bo"
			 * into account. We don't want to disallow buffer moves
			 * completely.
			 */
512
			if ((allowed & current_domain) != 0 &&
513 514 515 516 517 518
			    (domain & current_domain) == 0 && /* will be moved */
			    bytes_moved > bytes_moved_threshold) {
				/* don't move it */
				domain = current_domain;
			}

519 520
		retry:
			radeon_ttm_placement_from_domain(bo, domain);
C
Christian König 已提交
521
			if (ring == R600_RING_TYPE_UVD_INDEX)
522
				radeon_uvd_force_into_uvd_segment(bo, allowed);
523 524 525 526 527 528

			initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
			r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
			bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
				       initial_bytes_moved;

529
			if (unlikely(r)) {
530 531 532
				if (r != -ERESTARTSYS &&
				    domain != lobj->allowed_domains) {
					domain = lobj->allowed_domains;
533 534
					goto retry;
				}
535
				ttm_eu_backoff_reservation(ticket, head);
536
				return r;
537
			}
538
		}
539 540
		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
		lobj->tiling_flags = bo->tiling_flags;
541 542 543 544
	}
	return 0;
}

545
int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
546 547
			     struct vm_area_struct *vma)
{
548
	return ttm_fbdev_mmap(vma, &bo->tbo);
549 550
}

551
int radeon_bo_get_surface_reg(struct radeon_bo *bo)
552
{
553
	struct radeon_device *rdev = bo->rdev;
554
	struct radeon_surface_reg *reg;
555
	struct radeon_bo *old_object;
556 557 558
	int steal;
	int i;

559
	lockdep_assert_held(&bo->tbo.resv->lock.base);
560 561

	if (!bo->tiling_flags)
562 563
		return 0;

564 565 566
	if (bo->surface_reg >= 0) {
		reg = &rdev->surface_regs[bo->surface_reg];
		i = bo->surface_reg;
567 568 569 570 571 572 573
		goto out;
	}

	steal = -1;
	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {

		reg = &rdev->surface_regs[i];
574
		if (!reg->bo)
575 576
			break;

577
		old_object = reg->bo;
578 579 580 581 582 583 584 585 586 587
		if (old_object->pin_count == 0)
			steal = i;
	}

	/* if we are all out */
	if (i == RADEON_GEM_MAX_SURFACES) {
		if (steal == -1)
			return -ENOMEM;
		/* find someone with a surface reg and nuke their BO */
		reg = &rdev->surface_regs[steal];
588
		old_object = reg->bo;
589 590
		/* blow away the mapping */
		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
591
		ttm_bo_unmap_virtual(&old_object->tbo);
592 593 594 595
		old_object->surface_reg = -1;
		i = steal;
	}

596 597
	bo->surface_reg = i;
	reg->bo = bo;
598 599

out:
600
	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
601
			       bo->tbo.mem.start << PAGE_SHIFT,
602
			       bo->tbo.num_pages << PAGE_SHIFT);
603 604 605
	return 0;
}

606
static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
607
{
608
	struct radeon_device *rdev = bo->rdev;
609 610
	struct radeon_surface_reg *reg;

611
	if (bo->surface_reg == -1)
612 613
		return;

614 615
	reg = &rdev->surface_regs[bo->surface_reg];
	radeon_clear_surface_reg(rdev, bo->surface_reg);
616

617 618
	reg->bo = NULL;
	bo->surface_reg = -1;
619 620
}

621 622
int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
				uint32_t tiling_flags, uint32_t pitch)
623
{
624
	struct radeon_device *rdev = bo->rdev;
625 626
	int r;

627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671
	if (rdev->family >= CHIP_CEDAR) {
		unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;

		bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
		bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
		mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
		tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
		stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
		switch (bankw) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		switch (bankh) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		switch (mtaspect) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		if (tilesplit > 6) {
			return -EINVAL;
		}
		if (stilesplit > 6) {
			return -EINVAL;
		}
	}
672 673 674 675 676 677 678
	r = radeon_bo_reserve(bo, false);
	if (unlikely(r != 0))
		return r;
	bo->tiling_flags = tiling_flags;
	bo->pitch = pitch;
	radeon_bo_unreserve(bo);
	return 0;
679 680
}

681 682 683
void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
				uint32_t *tiling_flags,
				uint32_t *pitch)
684
{
685 686
	lockdep_assert_held(&bo->tbo.resv->lock.base);

687
	if (tiling_flags)
688
		*tiling_flags = bo->tiling_flags;
689
	if (pitch)
690
		*pitch = bo->pitch;
691 692
}

693 694
int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
				bool force_drop)
695
{
696 697
	if (!force_drop)
		lockdep_assert_held(&bo->tbo.resv->lock.base);
698 699

	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
700 701 702
		return 0;

	if (force_drop) {
703
		radeon_bo_clear_surface_reg(bo);
704 705 706
		return 0;
	}

707
	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
708 709 710
		if (!has_moved)
			return 0;

711 712
		if (bo->surface_reg >= 0)
			radeon_bo_clear_surface_reg(bo);
713 714 715
		return 0;
	}

716
	if ((bo->surface_reg >= 0) && !has_moved)
717 718
		return 0;

719
	return radeon_bo_get_surface_reg(bo);
720 721 722
}

void radeon_bo_move_notify(struct ttm_buffer_object *bo,
723
			   struct ttm_mem_reg *new_mem)
724
{
725
	struct radeon_bo *rbo;
726

727 728
	if (!radeon_ttm_bo_is_radeon_bo(bo))
		return;
729

730
	rbo = container_of(bo, struct radeon_bo, tbo);
731
	radeon_bo_check_tiling(rbo, 0, 1);
732
	radeon_vm_bo_invalidate(rbo->rdev, rbo);
733 734 735 736 737 738 739

	/* update statistics */
	if (!new_mem)
		return;

	radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
	radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
740 741
}

742
int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
743
{
744
	struct radeon_device *rdev;
745
	struct radeon_bo *rbo;
746 747 748
	unsigned long offset, size;
	int r;

749
	if (!radeon_ttm_bo_is_radeon_bo(bo))
750
		return 0;
751
	rbo = container_of(bo, struct radeon_bo, tbo);
752
	radeon_bo_check_tiling(rbo, 0, 0);
753
	rdev = rbo->rdev;
754 755 756 757 758 759 760 761 762 763
	if (bo->mem.mem_type != TTM_PL_VRAM)
		return 0;

	size = bo->mem.num_pages << PAGE_SHIFT;
	offset = bo->mem.start << PAGE_SHIFT;
	if ((offset + size) <= rdev->mc.visible_vram_size)
		return 0;

	/* hurrah the memory is not visible ! */
	radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
764
	rbo->placements[0].lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
765 766 767 768 769 770
	r = ttm_bo_validate(bo, &rbo->placement, false, false);
	if (unlikely(r == -ENOMEM)) {
		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
		return ttm_bo_validate(bo, &rbo->placement, false, false);
	} else if (unlikely(r != 0)) {
		return r;
771
	}
772 773 774 775 776 777

	offset = bo->mem.start << PAGE_SHIFT;
	/* this should never happen */
	if ((offset + size) > rdev->mc.visible_vram_size)
		return -EINVAL;

778
	return 0;
779
}
780

781
int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
782 783 784
{
	int r;

785
	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
786 787 788 789
	if (unlikely(r != 0))
		return r;
	if (mem_type)
		*mem_type = bo->tbo.mem.mem_type;
790 791

	r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
792 793 794
	ttm_bo_unreserve(&bo->tbo);
	return r;
}