radeon_object.c 20.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Copyright 2009 Jerome Glisse.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 */
/*
 * Authors:
 *    Jerome Glisse <glisse@freedesktop.org>
 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
 *    Dave Airlie
 */
#include <linux/list.h>
33
#include <linux/slab.h>
34
#include <drm/drmP.h>
35
#include <drm/radeon_drm.h>
36
#include "radeon.h"
37
#include "radeon_trace.h"
38 39 40 41


int radeon_ttm_init(struct radeon_device *rdev);
void radeon_ttm_fini(struct radeon_device *rdev);
42
static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
43 44 45 46 47 48

/*
 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
 * function are calling it.
 */

49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
static void radeon_update_memory_usage(struct radeon_bo *bo,
				       unsigned mem_type, int sign)
{
	struct radeon_device *rdev = bo->rdev;
	u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;

	switch (mem_type) {
	case TTM_PL_TT:
		if (sign > 0)
			atomic64_add(size, &rdev->gtt_usage);
		else
			atomic64_sub(size, &rdev->gtt_usage);
		break;
	case TTM_PL_VRAM:
		if (sign > 0)
			atomic64_add(size, &rdev->vram_usage);
		else
			atomic64_sub(size, &rdev->vram_usage);
		break;
	}
}

71
static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
72
{
73
	struct radeon_bo *bo;
74

75
	bo = container_of(tbo, struct radeon_bo, tbo);
76 77

	radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
78
	radeon_mn_unregister(bo);
79

80 81 82 83
	mutex_lock(&bo->rdev->gem.mutex);
	list_del_init(&bo->list);
	mutex_unlock(&bo->rdev->gem.mutex);
	radeon_bo_clear_surface_reg(bo);
84
	WARN_ON(!list_empty(&bo->va));
85
	drm_gem_object_release(&bo->gem_base);
86
	kfree(bo);
87 88
}

89 90 91 92 93 94 95
bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
{
	if (bo->destroy == &radeon_ttm_bo_destroy)
		return true;
	return false;
}

96 97
void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
{
98
	u32 c = 0, i;
99 100

	rbo->placement.placement = rbo->placements;
101
	rbo->placement.busy_placement = rbo->placements;
102
	if (domain & RADEON_GEM_DOMAIN_VRAM)
103 104 105 106
		rbo->placements[c++].flags = TTM_PL_FLAG_WC |
					     TTM_PL_FLAG_UNCACHED |
					     TTM_PL_FLAG_VRAM;

107
	if (domain & RADEON_GEM_DOMAIN_GTT) {
108
		if (rbo->flags & RADEON_GEM_GTT_UC) {
109 110 111
			rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
				TTM_PL_FLAG_TT;

112 113
		} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
			   (rbo->rdev->flags & RADEON_IS_AGP)) {
114 115
			rbo->placements[c++].flags = TTM_PL_FLAG_WC |
				TTM_PL_FLAG_UNCACHED |
116
				TTM_PL_FLAG_TT;
117
		} else {
118 119
			rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
						     TTM_PL_FLAG_TT;
120 121
		}
	}
122

123
	if (domain & RADEON_GEM_DOMAIN_CPU) {
124
		if (rbo->flags & RADEON_GEM_GTT_UC) {
125 126 127
			rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
				TTM_PL_FLAG_SYSTEM;

128 129
		} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
		    rbo->rdev->flags & RADEON_IS_AGP) {
130 131
			rbo->placements[c++].flags = TTM_PL_FLAG_WC |
				TTM_PL_FLAG_UNCACHED |
132
				TTM_PL_FLAG_SYSTEM;
133
		} else {
134 135
			rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
						     TTM_PL_FLAG_SYSTEM;
136 137
		}
	}
138
	if (!c)
139 140 141
		rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
					     TTM_PL_FLAG_SYSTEM;

142 143
	rbo->placement.num_placement = c;
	rbo->placement.num_busy_placement = c;
144

145 146 147 148 149
	for (i = 0; i < c; ++i) {
		rbo->placements[i].fpfn = 0;
		rbo->placements[i].lpfn = 0;
	}

150 151 152 153 154 155 156
	/*
	 * Use two-ended allocation depending on the buffer size to
	 * improve fragmentation quality.
	 * 512kb was measured as the most optimal number.
	 */
	if (rbo->tbo.mem.size > 512 * 1024) {
		for (i = 0; i < c; i++) {
157
			rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
158 159
		}
	}
160 161
}

162
int radeon_bo_create(struct radeon_device *rdev,
163
		     unsigned long size, int byte_align, bool kernel, u32 domain,
164
		     u32 flags, struct sg_table *sg, struct radeon_bo **bo_ptr)
165
{
166
	struct radeon_bo *bo;
167
	enum ttm_bo_type type;
168
	unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
169
	size_t acc_size;
170 171
	int r;

172 173
	size = ALIGN(size, PAGE_SIZE);

174 175
	if (kernel) {
		type = ttm_bo_type_kernel;
176 177
	} else if (sg) {
		type = ttm_bo_type_sg;
178 179 180
	} else {
		type = ttm_bo_type_device;
	}
181
	*bo_ptr = NULL;
182

183 184 185
	acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
				       sizeof(struct radeon_bo));

186 187
	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
	if (bo == NULL)
188
		return -ENOMEM;
189 190 191 192 193
	r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
	if (unlikely(r)) {
		kfree(bo);
		return r;
	}
194 195 196
	bo->rdev = rdev;
	bo->surface_reg = -1;
	INIT_LIST_HEAD(&bo->list);
197
	INIT_LIST_HEAD(&bo->va);
198 199 200
	bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
	                               RADEON_GEM_DOMAIN_GTT |
	                               RADEON_GEM_DOMAIN_CPU);
201 202 203 204 205 206

	bo->flags = flags;
	/* PCI GART is always snooped */
	if (!(rdev->flags & RADEON_IS_PCIE))
		bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);

207
	radeon_ttm_placement_from_domain(bo, domain);
208
	/* Kernel allocation are uninterruptible */
209
	down_read(&rdev->pm.mclk_lock);
210
	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
211
			&bo->placement, page_align, !kernel, NULL,
212
			acc_size, sg, &radeon_ttm_bo_destroy);
213
	up_read(&rdev->pm.mclk_lock);
214 215 216
	if (unlikely(r != 0)) {
		return r;
	}
217
	*bo_ptr = bo;
218

219
	trace_radeon_bo_create(bo);
220

221 222 223
	return 0;
}

224
int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
225
{
226
	bool is_iomem;
227 228
	int r;

229
	if (bo->kptr) {
230
		if (ptr) {
231
			*ptr = bo->kptr;
232 233 234
		}
		return 0;
	}
235
	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
236 237 238
	if (r) {
		return r;
	}
239
	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
240
	if (ptr) {
241
		*ptr = bo->kptr;
242
	}
243
	radeon_bo_check_tiling(bo, 0, 0);
244 245 246
	return 0;
}

247
void radeon_bo_kunmap(struct radeon_bo *bo)
248
{
249
	if (bo->kptr == NULL)
250
		return;
251 252 253
	bo->kptr = NULL;
	radeon_bo_check_tiling(bo, 0, 0);
	ttm_bo_kunmap(&bo->kmap);
254 255
}

256 257 258 259 260 261 262 263 264
struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
{
	if (bo == NULL)
		return NULL;

	ttm_bo_reference(&bo->tbo);
	return bo;
}

265
void radeon_bo_unref(struct radeon_bo **bo)
266
{
267
	struct ttm_buffer_object *tbo;
268
	struct radeon_device *rdev;
269

270
	if ((*bo) == NULL)
271
		return;
272
	rdev = (*bo)->rdev;
273 274 275 276
	tbo = &((*bo)->tbo);
	ttm_bo_unref(&tbo);
	if (tbo == NULL)
		*bo = NULL;
277 278
}

279 280
int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
			     u64 *gpu_addr)
281
{
282
	int r, i;
283

284 285 286
	if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
		return -EPERM;

287 288 289 290
	if (bo->pin_count) {
		bo->pin_count++;
		if (gpu_addr)
			*gpu_addr = radeon_bo_gpu_offset(bo);
291 292 293 294 295 296 297 298

		if (max_offset != 0) {
			u64 domain_start;

			if (domain == RADEON_GEM_DOMAIN_VRAM)
				domain_start = bo->rdev->mc.vram_start;
			else
				domain_start = bo->rdev->mc.gtt_start;
299 300
			WARN_ON_ONCE(max_offset <
				     (radeon_bo_gpu_offset(bo) - domain_start));
301 302
		}

303 304
		return 0;
	}
305
	radeon_ttm_placement_from_domain(bo, domain);
306 307 308
	for (i = 0; i < bo->placement.num_placement; i++) {
		unsigned lpfn = 0;

309
		/* force to pin into visible video ram */
310 311 312 313
		if (bo->placements[i].flags & TTM_PL_FLAG_VRAM)
			lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
		else
			lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT; /* ??? */
314

315 316
		if (max_offset)
			lpfn = min (lpfn, (unsigned)(max_offset >> PAGE_SHIFT));
317

318 319
		bo->placements[i].lpfn = lpfn;
		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
320
	}
321

322
	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
323 324 325 326
	if (likely(r == 0)) {
		bo->pin_count = 1;
		if (gpu_addr != NULL)
			*gpu_addr = radeon_bo_gpu_offset(bo);
327 328 329 330 331
		if (domain == RADEON_GEM_DOMAIN_VRAM)
			bo->rdev->vram_pin_size += radeon_bo_size(bo);
		else
			bo->rdev->gart_pin_size += radeon_bo_size(bo);
	} else {
332
		dev_err(bo->rdev->dev, "%p pin failed\n", bo);
333
	}
334 335
	return r;
}
336 337 338 339 340

int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
{
	return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
}
341

342
int radeon_bo_unpin(struct radeon_bo *bo)
343
{
344
	int r, i;
345

346 347 348
	if (!bo->pin_count) {
		dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
		return 0;
349
	}
350 351 352
	bo->pin_count--;
	if (bo->pin_count)
		return 0;
353 354 355 356
	for (i = 0; i < bo->placement.num_placement; i++) {
		bo->placements[i].lpfn = 0;
		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
	}
357
	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
358 359 360 361 362 363
	if (likely(r == 0)) {
		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
			bo->rdev->vram_pin_size -= radeon_bo_size(bo);
		else
			bo->rdev->gart_pin_size -= radeon_bo_size(bo);
	} else {
364
		dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
365
	}
366
	return r;
367 368
}

369
int radeon_bo_evict_vram(struct radeon_device *rdev)
370
{
371 372
	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
	if (0 && (rdev->flags & RADEON_IS_IGP)) {
373 374 375
		if (rdev->mc.igp_sideport_enabled == false)
			/* Useless to evict on IGP chips */
			return 0;
376 377 378 379
	}
	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
}

380
void radeon_bo_force_delete(struct radeon_device *rdev)
381
{
382
	struct radeon_bo *bo, *n;
383 384 385 386

	if (list_empty(&rdev->gem.objects)) {
		return;
	}
387 388
	dev_err(rdev->dev, "Userspace still has active objects !\n");
	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
389
		mutex_lock(&rdev->ddev->struct_mutex);
390
		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
391 392
			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
			*((unsigned long *)&bo->gem_base.refcount));
393 394 395
		mutex_lock(&bo->rdev->gem.mutex);
		list_del_init(&bo->list);
		mutex_unlock(&bo->rdev->gem.mutex);
396
		/* this should unref the ttm bo */
397
		drm_gem_object_unreference(&bo->gem_base);
398 399 400 401
		mutex_unlock(&rdev->ddev->struct_mutex);
	}
}

402
int radeon_bo_init(struct radeon_device *rdev)
403
{
404
	/* Add an MTRR for the VRAM */
405
	if (!rdev->fastfb_working) {
406 407
		rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
						      rdev->mc.aper_size);
408
	}
409 410 411 412 413
	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
		rdev->mc.mc_vram_size >> 20,
		(unsigned long long)rdev->mc.aper_size >> 20);
	DRM_INFO("RAM width %dbits %cDR\n",
			rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
414 415 416
	return radeon_ttm_init(rdev);
}

417
void radeon_bo_fini(struct radeon_device *rdev)
418 419
{
	radeon_ttm_fini(rdev);
420
	arch_phys_wc_del(rdev->mc.vram_mtrr);
421 422
}

423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476
/* Returns how many bytes TTM can move per IB.
 */
static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
{
	u64 real_vram_size = rdev->mc.real_vram_size;
	u64 vram_usage = atomic64_read(&rdev->vram_usage);

	/* This function is based on the current VRAM usage.
	 *
	 * - If all of VRAM is free, allow relocating the number of bytes that
	 *   is equal to 1/4 of the size of VRAM for this IB.

	 * - If more than one half of VRAM is occupied, only allow relocating
	 *   1 MB of data for this IB.
	 *
	 * - From 0 to one half of used VRAM, the threshold decreases
	 *   linearly.
	 *         __________________
	 * 1/4 of -|\               |
	 * VRAM    | \              |
	 *         |  \             |
	 *         |   \            |
	 *         |    \           |
	 *         |     \          |
	 *         |      \         |
	 *         |       \________|1 MB
	 *         |----------------|
	 *    VRAM 0 %             100 %
	 *         used            used
	 *
	 * Note: It's a threshold, not a limit. The threshold must be crossed
	 * for buffer relocations to stop, so any buffer of an arbitrary size
	 * can be moved as long as the threshold isn't crossed before
	 * the relocation takes place. We don't want to disable buffer
	 * relocations completely.
	 *
	 * The idea is that buffers should be placed in VRAM at creation time
	 * and TTM should only do a minimum number of relocations during
	 * command submission. In practice, you need to submit at least
	 * a dozen IBs to move all buffers to VRAM if they are in GTT.
	 *
	 * Also, things can get pretty crazy under memory pressure and actual
	 * VRAM usage can change a lot, so playing safe even at 50% does
	 * consistently increase performance.
	 */

	u64 half_vram = real_vram_size >> 1;
	u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
	u64 bytes_moved_threshold = half_free_vram >> 1;
	return max(bytes_moved_threshold, 1024*1024ull);
}

int radeon_bo_list_validate(struct radeon_device *rdev,
			    struct ww_acquire_ctx *ticket,
477
			    struct list_head *head, int ring)
478
{
479
	struct radeon_cs_reloc *lobj;
480
	struct radeon_bo *bo;
481
	int r;
482 483
	u64 bytes_moved = 0, initial_bytes_moved;
	u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
484

485
	r = ttm_eu_reserve_buffers(ticket, head);
486 487 488
	if (unlikely(r != 0)) {
		return r;
	}
489

490
	list_for_each_entry(lobj, head, tv.head) {
491
		bo = lobj->robj;
492
		if (!bo->pin_count) {
493
			u32 domain = lobj->prefered_domains;
494 495 496 497 498 499 500 501 502 503 504
			u32 current_domain =
				radeon_mem_type_to_domain(bo->tbo.mem.mem_type);

			/* Check if this buffer will be moved and don't move it
			 * if we have moved too many buffers for this IB already.
			 *
			 * Note that this allows moving at least one buffer of
			 * any size, because it doesn't take the current "bo"
			 * into account. We don't want to disallow buffer moves
			 * completely.
			 */
505
			if ((lobj->allowed_domains & current_domain) != 0 &&
506 507 508 509 510 511
			    (domain & current_domain) == 0 && /* will be moved */
			    bytes_moved > bytes_moved_threshold) {
				/* don't move it */
				domain = current_domain;
			}

512 513
		retry:
			radeon_ttm_placement_from_domain(bo, domain);
C
Christian König 已提交
514 515
			if (ring == R600_RING_TYPE_UVD_INDEX)
				radeon_uvd_force_into_uvd_segment(bo);
516 517 518 519 520 521

			initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
			r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
			bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
				       initial_bytes_moved;

522
			if (unlikely(r)) {
523 524 525
				if (r != -ERESTARTSYS &&
				    domain != lobj->allowed_domains) {
					domain = lobj->allowed_domains;
526 527
					goto retry;
				}
528
				ttm_eu_backoff_reservation(ticket, head);
529
				return r;
530
			}
531
		}
532 533
		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
		lobj->tiling_flags = bo->tiling_flags;
534 535 536 537
	}
	return 0;
}

538
int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
539 540
			     struct vm_area_struct *vma)
{
541
	return ttm_fbdev_mmap(vma, &bo->tbo);
542 543
}

544
int radeon_bo_get_surface_reg(struct radeon_bo *bo)
545
{
546
	struct radeon_device *rdev = bo->rdev;
547
	struct radeon_surface_reg *reg;
548
	struct radeon_bo *old_object;
549 550 551
	int steal;
	int i;

552
	lockdep_assert_held(&bo->tbo.resv->lock.base);
553 554

	if (!bo->tiling_flags)
555 556
		return 0;

557 558 559
	if (bo->surface_reg >= 0) {
		reg = &rdev->surface_regs[bo->surface_reg];
		i = bo->surface_reg;
560 561 562 563 564 565 566
		goto out;
	}

	steal = -1;
	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {

		reg = &rdev->surface_regs[i];
567
		if (!reg->bo)
568 569
			break;

570
		old_object = reg->bo;
571 572 573 574 575 576 577 578 579 580
		if (old_object->pin_count == 0)
			steal = i;
	}

	/* if we are all out */
	if (i == RADEON_GEM_MAX_SURFACES) {
		if (steal == -1)
			return -ENOMEM;
		/* find someone with a surface reg and nuke their BO */
		reg = &rdev->surface_regs[steal];
581
		old_object = reg->bo;
582 583
		/* blow away the mapping */
		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
584
		ttm_bo_unmap_virtual(&old_object->tbo);
585 586 587 588
		old_object->surface_reg = -1;
		i = steal;
	}

589 590
	bo->surface_reg = i;
	reg->bo = bo;
591 592

out:
593
	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
594
			       bo->tbo.mem.start << PAGE_SHIFT,
595
			       bo->tbo.num_pages << PAGE_SHIFT);
596 597 598
	return 0;
}

599
static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
600
{
601
	struct radeon_device *rdev = bo->rdev;
602 603
	struct radeon_surface_reg *reg;

604
	if (bo->surface_reg == -1)
605 606
		return;

607 608
	reg = &rdev->surface_regs[bo->surface_reg];
	radeon_clear_surface_reg(rdev, bo->surface_reg);
609

610 611
	reg->bo = NULL;
	bo->surface_reg = -1;
612 613
}

614 615
int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
				uint32_t tiling_flags, uint32_t pitch)
616
{
617
	struct radeon_device *rdev = bo->rdev;
618 619
	int r;

620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664
	if (rdev->family >= CHIP_CEDAR) {
		unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;

		bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
		bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
		mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
		tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
		stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
		switch (bankw) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		switch (bankh) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		switch (mtaspect) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		if (tilesplit > 6) {
			return -EINVAL;
		}
		if (stilesplit > 6) {
			return -EINVAL;
		}
	}
665 666 667 668 669 670 671
	r = radeon_bo_reserve(bo, false);
	if (unlikely(r != 0))
		return r;
	bo->tiling_flags = tiling_flags;
	bo->pitch = pitch;
	radeon_bo_unreserve(bo);
	return 0;
672 673
}

674 675 676
void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
				uint32_t *tiling_flags,
				uint32_t *pitch)
677
{
678 679
	lockdep_assert_held(&bo->tbo.resv->lock.base);

680
	if (tiling_flags)
681
		*tiling_flags = bo->tiling_flags;
682
	if (pitch)
683
		*pitch = bo->pitch;
684 685
}

686 687
int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
				bool force_drop)
688
{
689 690
	if (!force_drop)
		lockdep_assert_held(&bo->tbo.resv->lock.base);
691 692

	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
693 694 695
		return 0;

	if (force_drop) {
696
		radeon_bo_clear_surface_reg(bo);
697 698 699
		return 0;
	}

700
	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
701 702 703
		if (!has_moved)
			return 0;

704 705
		if (bo->surface_reg >= 0)
			radeon_bo_clear_surface_reg(bo);
706 707 708
		return 0;
	}

709
	if ((bo->surface_reg >= 0) && !has_moved)
710 711
		return 0;

712
	return radeon_bo_get_surface_reg(bo);
713 714 715
}

void radeon_bo_move_notify(struct ttm_buffer_object *bo,
716
			   struct ttm_mem_reg *new_mem)
717
{
718
	struct radeon_bo *rbo;
719

720 721
	if (!radeon_ttm_bo_is_radeon_bo(bo))
		return;
722

723
	rbo = container_of(bo, struct radeon_bo, tbo);
724
	radeon_bo_check_tiling(rbo, 0, 1);
725
	radeon_vm_bo_invalidate(rbo->rdev, rbo);
726 727 728 729 730 731 732

	/* update statistics */
	if (!new_mem)
		return;

	radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
	radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
733 734
}

735
int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
736
{
737
	struct radeon_device *rdev;
738
	struct radeon_bo *rbo;
739 740 741
	unsigned long offset, size;
	int r;

742
	if (!radeon_ttm_bo_is_radeon_bo(bo))
743
		return 0;
744
	rbo = container_of(bo, struct radeon_bo, tbo);
745
	radeon_bo_check_tiling(rbo, 0, 0);
746
	rdev = rbo->rdev;
747 748 749 750 751 752 753 754 755 756
	if (bo->mem.mem_type != TTM_PL_VRAM)
		return 0;

	size = bo->mem.num_pages << PAGE_SHIFT;
	offset = bo->mem.start << PAGE_SHIFT;
	if ((offset + size) <= rdev->mc.visible_vram_size)
		return 0;

	/* hurrah the memory is not visible ! */
	radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
757
	rbo->placements[0].lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
758 759 760 761 762 763
	r = ttm_bo_validate(bo, &rbo->placement, false, false);
	if (unlikely(r == -ENOMEM)) {
		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
		return ttm_bo_validate(bo, &rbo->placement, false, false);
	} else if (unlikely(r != 0)) {
		return r;
764
	}
765 766 767 768 769 770

	offset = bo->mem.start << PAGE_SHIFT;
	/* this should never happen */
	if ((offset + size) > rdev->mc.visible_vram_size)
		return -EINVAL;

771
	return 0;
772
}
773

774
int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
775 776 777
{
	int r;

778
	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
779 780 781 782 783 784
	if (unlikely(r != 0))
		return r;
	spin_lock(&bo->tbo.bdev->fence_lock);
	if (mem_type)
		*mem_type = bo->tbo.mem.mem_type;
	if (bo->tbo.sync_obj)
785
		r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
786 787 788 789
	spin_unlock(&bo->tbo.bdev->fence_lock);
	ttm_bo_unreserve(&bo->tbo);
	return r;
}