radeon_object.c 19.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Copyright 2009 Jerome Glisse.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 */
/*
 * Authors:
 *    Jerome Glisse <glisse@freedesktop.org>
 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
 *    Dave Airlie
 */
#include <linux/list.h>
33
#include <linux/slab.h>
34
#include <drm/drmP.h>
35
#include <drm/radeon_drm.h>
36
#include "radeon.h"
37
#include "radeon_trace.h"
38 39 40 41


int radeon_ttm_init(struct radeon_device *rdev);
void radeon_ttm_fini(struct radeon_device *rdev);
42
static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
43 44 45 46 47 48

/*
 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
 * function are calling it.
 */

49
static void radeon_bo_clear_va(struct radeon_bo *bo)
50 51 52 53 54
{
	struct radeon_bo_va *bo_va, *tmp;

	list_for_each_entry_safe(bo_va, tmp, &bo->va, bo_list) {
		/* remove from all vm address space */
55
		radeon_vm_bo_rmv(bo->rdev, bo_va);
56 57 58
	}
}

59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
static void radeon_update_memory_usage(struct radeon_bo *bo,
				       unsigned mem_type, int sign)
{
	struct radeon_device *rdev = bo->rdev;
	u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;

	switch (mem_type) {
	case TTM_PL_TT:
		if (sign > 0)
			atomic64_add(size, &rdev->gtt_usage);
		else
			atomic64_sub(size, &rdev->gtt_usage);
		break;
	case TTM_PL_VRAM:
		if (sign > 0)
			atomic64_add(size, &rdev->vram_usage);
		else
			atomic64_sub(size, &rdev->vram_usage);
		break;
	}
}

81
static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
82
{
83
	struct radeon_bo *bo;
84

85
	bo = container_of(tbo, struct radeon_bo, tbo);
86 87 88

	radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);

89 90 91 92
	mutex_lock(&bo->rdev->gem.mutex);
	list_del_init(&bo->list);
	mutex_unlock(&bo->rdev->gem.mutex);
	radeon_bo_clear_surface_reg(bo);
93
	radeon_bo_clear_va(bo);
94
	drm_gem_object_release(&bo->gem_base);
95
	kfree(bo);
96 97
}

98 99 100 101 102 103 104
bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
{
	if (bo->destroy == &radeon_ttm_bo_destroy)
		return true;
	return false;
}

105 106
void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
{
107
	u32 c = 0, i;
108 109

	rbo->placement.fpfn = 0;
110
	rbo->placement.lpfn = 0;
111
	rbo->placement.placement = rbo->placements;
112
	rbo->placement.busy_placement = rbo->placements;
113 114 115
	if (domain & RADEON_GEM_DOMAIN_VRAM)
		rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
					TTM_PL_FLAG_VRAM;
116
	if (domain & RADEON_GEM_DOMAIN_GTT) {
117 118 119 120 121 122
		if (rbo->flags & RADEON_GEM_GTT_UC) {
			rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_TT;
		} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
			   (rbo->rdev->flags & RADEON_IS_AGP)) {
			rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
				TTM_PL_FLAG_TT;
123 124 125 126 127
		} else {
			rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_TT;
		}
	}
	if (domain & RADEON_GEM_DOMAIN_CPU) {
128 129 130 131 132 133
		if (rbo->flags & RADEON_GEM_GTT_UC) {
			rbo->placements[c++] = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_SYSTEM;
		} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
		    rbo->rdev->flags & RADEON_IS_AGP) {
			rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
				TTM_PL_FLAG_SYSTEM;
134
		} else {
135
			rbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM;
136 137
		}
	}
138 139
	if (!c)
		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
140 141
	rbo->placement.num_placement = c;
	rbo->placement.num_busy_placement = c;
142 143 144 145 146 147 148 149 150 151 152

	/*
	 * Use two-ended allocation depending on the buffer size to
	 * improve fragmentation quality.
	 * 512kb was measured as the most optimal number.
	 */
	if (rbo->tbo.mem.size > 512 * 1024) {
		for (i = 0; i < c; i++) {
			rbo->placements[i] |= TTM_PL_FLAG_TOPDOWN;
		}
	}
153 154
}

155
int radeon_bo_create(struct radeon_device *rdev,
156
		     unsigned long size, int byte_align, bool kernel, u32 domain,
157
		     u32 flags, struct sg_table *sg, struct radeon_bo **bo_ptr)
158
{
159
	struct radeon_bo *bo;
160
	enum ttm_bo_type type;
161
	unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
162
	size_t acc_size;
163 164
	int r;

165 166
	size = ALIGN(size, PAGE_SIZE);

167 168
	if (kernel) {
		type = ttm_bo_type_kernel;
169 170
	} else if (sg) {
		type = ttm_bo_type_sg;
171 172 173
	} else {
		type = ttm_bo_type_device;
	}
174
	*bo_ptr = NULL;
175

176 177 178
	acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
				       sizeof(struct radeon_bo));

179 180
	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
	if (bo == NULL)
181
		return -ENOMEM;
182 183 184 185 186
	r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
	if (unlikely(r)) {
		kfree(bo);
		return r;
	}
187 188 189
	bo->rdev = rdev;
	bo->surface_reg = -1;
	INIT_LIST_HEAD(&bo->list);
190
	INIT_LIST_HEAD(&bo->va);
191 192 193
	bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
	                               RADEON_GEM_DOMAIN_GTT |
	                               RADEON_GEM_DOMAIN_CPU);
194 195 196 197 198 199

	bo->flags = flags;
	/* PCI GART is always snooped */
	if (!(rdev->flags & RADEON_IS_PCIE))
		bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);

200
	radeon_ttm_placement_from_domain(bo, domain);
201
	/* Kernel allocation are uninterruptible */
202
	down_read(&rdev->pm.mclk_lock);
203
	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
204
			&bo->placement, page_align, !kernel, NULL,
205
			acc_size, sg, &radeon_ttm_bo_destroy);
206
	up_read(&rdev->pm.mclk_lock);
207 208 209
	if (unlikely(r != 0)) {
		return r;
	}
210
	*bo_ptr = bo;
211

212
	trace_radeon_bo_create(bo);
213

214 215 216
	return 0;
}

217
int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
218
{
219
	bool is_iomem;
220 221
	int r;

222
	if (bo->kptr) {
223
		if (ptr) {
224
			*ptr = bo->kptr;
225 226 227
		}
		return 0;
	}
228
	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
229 230 231
	if (r) {
		return r;
	}
232
	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
233
	if (ptr) {
234
		*ptr = bo->kptr;
235
	}
236
	radeon_bo_check_tiling(bo, 0, 0);
237 238 239
	return 0;
}

240
void radeon_bo_kunmap(struct radeon_bo *bo)
241
{
242
	if (bo->kptr == NULL)
243
		return;
244 245 246
	bo->kptr = NULL;
	radeon_bo_check_tiling(bo, 0, 0);
	ttm_bo_kunmap(&bo->kmap);
247 248
}

249
void radeon_bo_unref(struct radeon_bo **bo)
250
{
251
	struct ttm_buffer_object *tbo;
252
	struct radeon_device *rdev;
253

254
	if ((*bo) == NULL)
255
		return;
256
	rdev = (*bo)->rdev;
257
	tbo = &((*bo)->tbo);
258
	down_read(&rdev->pm.mclk_lock);
259
	ttm_bo_unref(&tbo);
260
	up_read(&rdev->pm.mclk_lock);
261 262
	if (tbo == NULL)
		*bo = NULL;
263 264
}

265 266
int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
			     u64 *gpu_addr)
267
{
268
	int r, i;
269

270 271 272 273
	if (bo->pin_count) {
		bo->pin_count++;
		if (gpu_addr)
			*gpu_addr = radeon_bo_gpu_offset(bo);
274 275 276 277 278 279 280 281

		if (max_offset != 0) {
			u64 domain_start;

			if (domain == RADEON_GEM_DOMAIN_VRAM)
				domain_start = bo->rdev->mc.vram_start;
			else
				domain_start = bo->rdev->mc.gtt_start;
282 283
			WARN_ON_ONCE(max_offset <
				     (radeon_bo_gpu_offset(bo) - domain_start));
284 285
		}

286 287
		return 0;
	}
288
	radeon_ttm_placement_from_domain(bo, domain);
289 290 291 292
	if (domain == RADEON_GEM_DOMAIN_VRAM) {
		/* force to pin into visible video ram */
		bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
	}
293 294 295 296 297 298 299 300 301
	if (max_offset) {
		u64 lpfn = max_offset >> PAGE_SHIFT;

		if (!bo->placement.lpfn)
			bo->placement.lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT;

		if (lpfn < bo->placement.lpfn)
			bo->placement.lpfn = lpfn;
	}
302 303
	for (i = 0; i < bo->placement.num_placement; i++)
		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
304
	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
305 306 307 308
	if (likely(r == 0)) {
		bo->pin_count = 1;
		if (gpu_addr != NULL)
			*gpu_addr = radeon_bo_gpu_offset(bo);
309 310 311 312 313
		if (domain == RADEON_GEM_DOMAIN_VRAM)
			bo->rdev->vram_pin_size += radeon_bo_size(bo);
		else
			bo->rdev->gart_pin_size += radeon_bo_size(bo);
	} else {
314
		dev_err(bo->rdev->dev, "%p pin failed\n", bo);
315
	}
316 317
	return r;
}
318 319 320 321 322

int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
{
	return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
}
323

324
int radeon_bo_unpin(struct radeon_bo *bo)
325
{
326
	int r, i;
327

328 329 330
	if (!bo->pin_count) {
		dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
		return 0;
331
	}
332 333 334
	bo->pin_count--;
	if (bo->pin_count)
		return 0;
335 336
	for (i = 0; i < bo->placement.num_placement; i++)
		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
337
	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
338 339 340 341 342 343
	if (likely(r == 0)) {
		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
			bo->rdev->vram_pin_size -= radeon_bo_size(bo);
		else
			bo->rdev->gart_pin_size -= radeon_bo_size(bo);
	} else {
344
		dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
345
	}
346
	return r;
347 348
}

349
int radeon_bo_evict_vram(struct radeon_device *rdev)
350
{
351 352
	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
	if (0 && (rdev->flags & RADEON_IS_IGP)) {
353 354 355
		if (rdev->mc.igp_sideport_enabled == false)
			/* Useless to evict on IGP chips */
			return 0;
356 357 358 359
	}
	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
}

360
void radeon_bo_force_delete(struct radeon_device *rdev)
361
{
362
	struct radeon_bo *bo, *n;
363 364 365 366

	if (list_empty(&rdev->gem.objects)) {
		return;
	}
367 368
	dev_err(rdev->dev, "Userspace still has active objects !\n");
	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
369
		mutex_lock(&rdev->ddev->struct_mutex);
370
		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
371 372
			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
			*((unsigned long *)&bo->gem_base.refcount));
373 374 375
		mutex_lock(&bo->rdev->gem.mutex);
		list_del_init(&bo->list);
		mutex_unlock(&bo->rdev->gem.mutex);
376
		/* this should unref the ttm bo */
377
		drm_gem_object_unreference(&bo->gem_base);
378 379 380 381
		mutex_unlock(&rdev->ddev->struct_mutex);
	}
}

382
int radeon_bo_init(struct radeon_device *rdev)
383
{
384
	/* Add an MTRR for the VRAM */
385
	if (!rdev->fastfb_working) {
386 387
		rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
						      rdev->mc.aper_size);
388
	}
389 390 391 392 393
	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
		rdev->mc.mc_vram_size >> 20,
		(unsigned long long)rdev->mc.aper_size >> 20);
	DRM_INFO("RAM width %dbits %cDR\n",
			rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
394 395 396
	return radeon_ttm_init(rdev);
}

397
void radeon_bo_fini(struct radeon_device *rdev)
398 399
{
	radeon_ttm_fini(rdev);
400
	arch_phys_wc_del(rdev->mc.vram_mtrr);
401 402
}

403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
/* Returns how many bytes TTM can move per IB.
 */
static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
{
	u64 real_vram_size = rdev->mc.real_vram_size;
	u64 vram_usage = atomic64_read(&rdev->vram_usage);

	/* This function is based on the current VRAM usage.
	 *
	 * - If all of VRAM is free, allow relocating the number of bytes that
	 *   is equal to 1/4 of the size of VRAM for this IB.

	 * - If more than one half of VRAM is occupied, only allow relocating
	 *   1 MB of data for this IB.
	 *
	 * - From 0 to one half of used VRAM, the threshold decreases
	 *   linearly.
	 *         __________________
	 * 1/4 of -|\               |
	 * VRAM    | \              |
	 *         |  \             |
	 *         |   \            |
	 *         |    \           |
	 *         |     \          |
	 *         |      \         |
	 *         |       \________|1 MB
	 *         |----------------|
	 *    VRAM 0 %             100 %
	 *         used            used
	 *
	 * Note: It's a threshold, not a limit. The threshold must be crossed
	 * for buffer relocations to stop, so any buffer of an arbitrary size
	 * can be moved as long as the threshold isn't crossed before
	 * the relocation takes place. We don't want to disable buffer
	 * relocations completely.
	 *
	 * The idea is that buffers should be placed in VRAM at creation time
	 * and TTM should only do a minimum number of relocations during
	 * command submission. In practice, you need to submit at least
	 * a dozen IBs to move all buffers to VRAM if they are in GTT.
	 *
	 * Also, things can get pretty crazy under memory pressure and actual
	 * VRAM usage can change a lot, so playing safe even at 50% does
	 * consistently increase performance.
	 */

	u64 half_vram = real_vram_size >> 1;
	u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
	u64 bytes_moved_threshold = half_free_vram >> 1;
	return max(bytes_moved_threshold, 1024*1024ull);
}

int radeon_bo_list_validate(struct radeon_device *rdev,
			    struct ww_acquire_ctx *ticket,
457
			    struct list_head *head, int ring)
458
{
459
	struct radeon_cs_reloc *lobj;
460
	struct radeon_bo *bo;
461
	int r;
462 463
	u64 bytes_moved = 0, initial_bytes_moved;
	u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
464

465
	r = ttm_eu_reserve_buffers(ticket, head);
466 467 468
	if (unlikely(r != 0)) {
		return r;
	}
469

470
	list_for_each_entry(lobj, head, tv.head) {
471
		bo = lobj->robj;
472
		if (!bo->pin_count) {
473
			u32 domain = lobj->prefered_domains;
474 475 476 477 478 479 480 481 482 483 484
			u32 current_domain =
				radeon_mem_type_to_domain(bo->tbo.mem.mem_type);

			/* Check if this buffer will be moved and don't move it
			 * if we have moved too many buffers for this IB already.
			 *
			 * Note that this allows moving at least one buffer of
			 * any size, because it doesn't take the current "bo"
			 * into account. We don't want to disallow buffer moves
			 * completely.
			 */
485
			if ((lobj->allowed_domains & current_domain) != 0 &&
486 487 488 489 490 491
			    (domain & current_domain) == 0 && /* will be moved */
			    bytes_moved > bytes_moved_threshold) {
				/* don't move it */
				domain = current_domain;
			}

492 493
		retry:
			radeon_ttm_placement_from_domain(bo, domain);
C
Christian König 已提交
494 495
			if (ring == R600_RING_TYPE_UVD_INDEX)
				radeon_uvd_force_into_uvd_segment(bo);
496 497 498 499 500 501

			initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
			r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
			bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
				       initial_bytes_moved;

502
			if (unlikely(r)) {
503 504 505
				if (r != -ERESTARTSYS &&
				    domain != lobj->allowed_domains) {
					domain = lobj->allowed_domains;
506 507
					goto retry;
				}
508
				ttm_eu_backoff_reservation(ticket, head);
509
				return r;
510
			}
511
		}
512 513
		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
		lobj->tiling_flags = bo->tiling_flags;
514 515 516 517
	}
	return 0;
}

518
int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
519 520
			     struct vm_area_struct *vma)
{
521
	return ttm_fbdev_mmap(vma, &bo->tbo);
522 523
}

524
int radeon_bo_get_surface_reg(struct radeon_bo *bo)
525
{
526
	struct radeon_device *rdev = bo->rdev;
527
	struct radeon_surface_reg *reg;
528
	struct radeon_bo *old_object;
529 530 531
	int steal;
	int i;

532
	lockdep_assert_held(&bo->tbo.resv->lock.base);
533 534

	if (!bo->tiling_flags)
535 536
		return 0;

537 538 539
	if (bo->surface_reg >= 0) {
		reg = &rdev->surface_regs[bo->surface_reg];
		i = bo->surface_reg;
540 541 542 543 544 545 546
		goto out;
	}

	steal = -1;
	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {

		reg = &rdev->surface_regs[i];
547
		if (!reg->bo)
548 549
			break;

550
		old_object = reg->bo;
551 552 553 554 555 556 557 558 559 560
		if (old_object->pin_count == 0)
			steal = i;
	}

	/* if we are all out */
	if (i == RADEON_GEM_MAX_SURFACES) {
		if (steal == -1)
			return -ENOMEM;
		/* find someone with a surface reg and nuke their BO */
		reg = &rdev->surface_regs[steal];
561
		old_object = reg->bo;
562 563
		/* blow away the mapping */
		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
564
		ttm_bo_unmap_virtual(&old_object->tbo);
565 566 567 568
		old_object->surface_reg = -1;
		i = steal;
	}

569 570
	bo->surface_reg = i;
	reg->bo = bo;
571 572

out:
573
	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
574
			       bo->tbo.mem.start << PAGE_SHIFT,
575
			       bo->tbo.num_pages << PAGE_SHIFT);
576 577 578
	return 0;
}

579
static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
580
{
581
	struct radeon_device *rdev = bo->rdev;
582 583
	struct radeon_surface_reg *reg;

584
	if (bo->surface_reg == -1)
585 586
		return;

587 588
	reg = &rdev->surface_regs[bo->surface_reg];
	radeon_clear_surface_reg(rdev, bo->surface_reg);
589

590 591
	reg->bo = NULL;
	bo->surface_reg = -1;
592 593
}

594 595
int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
				uint32_t tiling_flags, uint32_t pitch)
596
{
597
	struct radeon_device *rdev = bo->rdev;
598 599
	int r;

600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644
	if (rdev->family >= CHIP_CEDAR) {
		unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;

		bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
		bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
		mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
		tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
		stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
		switch (bankw) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		switch (bankh) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		switch (mtaspect) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		if (tilesplit > 6) {
			return -EINVAL;
		}
		if (stilesplit > 6) {
			return -EINVAL;
		}
	}
645 646 647 648 649 650 651
	r = radeon_bo_reserve(bo, false);
	if (unlikely(r != 0))
		return r;
	bo->tiling_flags = tiling_flags;
	bo->pitch = pitch;
	radeon_bo_unreserve(bo);
	return 0;
652 653
}

654 655 656
void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
				uint32_t *tiling_flags,
				uint32_t *pitch)
657
{
658 659
	lockdep_assert_held(&bo->tbo.resv->lock.base);

660
	if (tiling_flags)
661
		*tiling_flags = bo->tiling_flags;
662
	if (pitch)
663
		*pitch = bo->pitch;
664 665
}

666 667
int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
				bool force_drop)
668
{
669 670
	if (!force_drop)
		lockdep_assert_held(&bo->tbo.resv->lock.base);
671 672

	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
673 674 675
		return 0;

	if (force_drop) {
676
		radeon_bo_clear_surface_reg(bo);
677 678 679
		return 0;
	}

680
	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
681 682 683
		if (!has_moved)
			return 0;

684 685
		if (bo->surface_reg >= 0)
			radeon_bo_clear_surface_reg(bo);
686 687 688
		return 0;
	}

689
	if ((bo->surface_reg >= 0) && !has_moved)
690 691
		return 0;

692
	return radeon_bo_get_surface_reg(bo);
693 694 695
}

void radeon_bo_move_notify(struct ttm_buffer_object *bo,
696
			   struct ttm_mem_reg *new_mem)
697
{
698
	struct radeon_bo *rbo;
699

700 701
	if (!radeon_ttm_bo_is_radeon_bo(bo))
		return;
702

703
	rbo = container_of(bo, struct radeon_bo, tbo);
704
	radeon_bo_check_tiling(rbo, 0, 1);
705
	radeon_vm_bo_invalidate(rbo->rdev, rbo);
706 707 708 709 710 711 712

	/* update statistics */
	if (!new_mem)
		return;

	radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
	radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
713 714
}

715
int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
716
{
717
	struct radeon_device *rdev;
718
	struct radeon_bo *rbo;
719 720 721
	unsigned long offset, size;
	int r;

722
	if (!radeon_ttm_bo_is_radeon_bo(bo))
723
		return 0;
724
	rbo = container_of(bo, struct radeon_bo, tbo);
725
	radeon_bo_check_tiling(rbo, 0, 0);
726
	rdev = rbo->rdev;
727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743
	if (bo->mem.mem_type != TTM_PL_VRAM)
		return 0;

	size = bo->mem.num_pages << PAGE_SHIFT;
	offset = bo->mem.start << PAGE_SHIFT;
	if ((offset + size) <= rdev->mc.visible_vram_size)
		return 0;

	/* hurrah the memory is not visible ! */
	radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
	rbo->placement.lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
	r = ttm_bo_validate(bo, &rbo->placement, false, false);
	if (unlikely(r == -ENOMEM)) {
		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
		return ttm_bo_validate(bo, &rbo->placement, false, false);
	} else if (unlikely(r != 0)) {
		return r;
744
	}
745 746 747 748 749 750

	offset = bo->mem.start << PAGE_SHIFT;
	/* this should never happen */
	if ((offset + size) > rdev->mc.visible_vram_size)
		return -EINVAL;

751
	return 0;
752
}
753

754
int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
755 756 757
{
	int r;

758
	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
759 760 761 762 763 764
	if (unlikely(r != 0))
		return r;
	spin_lock(&bo->tbo.bdev->fence_lock);
	if (mem_type)
		*mem_type = bo->tbo.mem.mem_type;
	if (bo->tbo.sync_obj)
765
		r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
766 767 768 769
	spin_unlock(&bo->tbo.bdev->fence_lock);
	ttm_bo_unreserve(&bo->tbo);
	return r;
}