radeon_object.c 20.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Copyright 2009 Jerome Glisse.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 */
/*
 * Authors:
 *    Jerome Glisse <glisse@freedesktop.org>
 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
 *    Dave Airlie
 */
#include <linux/list.h>
33
#include <linux/slab.h>
34
#include <drm/drmP.h>
35
#include <drm/radeon_drm.h>
36
#include "radeon.h"
37
#include "radeon_trace.h"
38 39 40 41


int radeon_ttm_init(struct radeon_device *rdev);
void radeon_ttm_fini(struct radeon_device *rdev);
42
static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
43 44 45 46 47 48

/*
 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
 * function are calling it.
 */

49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
static void radeon_update_memory_usage(struct radeon_bo *bo,
				       unsigned mem_type, int sign)
{
	struct radeon_device *rdev = bo->rdev;
	u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;

	switch (mem_type) {
	case TTM_PL_TT:
		if (sign > 0)
			atomic64_add(size, &rdev->gtt_usage);
		else
			atomic64_sub(size, &rdev->gtt_usage);
		break;
	case TTM_PL_VRAM:
		if (sign > 0)
			atomic64_add(size, &rdev->vram_usage);
		else
			atomic64_sub(size, &rdev->vram_usage);
		break;
	}
}

71
static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
72
{
73
	struct radeon_bo *bo;
74

75
	bo = container_of(tbo, struct radeon_bo, tbo);
76 77

	radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);
78
	radeon_mn_unregister(bo);
79

80 81 82 83
	mutex_lock(&bo->rdev->gem.mutex);
	list_del_init(&bo->list);
	mutex_unlock(&bo->rdev->gem.mutex);
	radeon_bo_clear_surface_reg(bo);
84
	WARN_ON(!list_empty(&bo->va));
85
	drm_gem_object_release(&bo->gem_base);
86
	kfree(bo);
87 88
}

89 90 91 92 93 94 95
bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
{
	if (bo->destroy == &radeon_ttm_bo_destroy)
		return true;
	return false;
}

96 97
void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
{
98
	u32 c = 0, i;
99 100

	rbo->placement.placement = rbo->placements;
101
	rbo->placement.busy_placement = rbo->placements;
102
	if (domain & RADEON_GEM_DOMAIN_VRAM)
103 104 105 106
		rbo->placements[c++].flags = TTM_PL_FLAG_WC |
					     TTM_PL_FLAG_UNCACHED |
					     TTM_PL_FLAG_VRAM;

107
	if (domain & RADEON_GEM_DOMAIN_GTT) {
108
		if (rbo->flags & RADEON_GEM_GTT_UC) {
109 110 111
			rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
				TTM_PL_FLAG_TT;

112 113
		} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
			   (rbo->rdev->flags & RADEON_IS_AGP)) {
114 115
			rbo->placements[c++].flags = TTM_PL_FLAG_WC |
				TTM_PL_FLAG_UNCACHED |
116
				TTM_PL_FLAG_TT;
117
		} else {
118 119
			rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
						     TTM_PL_FLAG_TT;
120 121
		}
	}
122

123
	if (domain & RADEON_GEM_DOMAIN_CPU) {
124
		if (rbo->flags & RADEON_GEM_GTT_UC) {
125 126 127
			rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
				TTM_PL_FLAG_SYSTEM;

128 129
		} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
		    rbo->rdev->flags & RADEON_IS_AGP) {
130 131
			rbo->placements[c++].flags = TTM_PL_FLAG_WC |
				TTM_PL_FLAG_UNCACHED |
132
				TTM_PL_FLAG_SYSTEM;
133
		} else {
134 135
			rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
						     TTM_PL_FLAG_SYSTEM;
136 137
		}
	}
138
	if (!c)
139 140 141
		rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
					     TTM_PL_FLAG_SYSTEM;

142 143
	rbo->placement.num_placement = c;
	rbo->placement.num_busy_placement = c;
144

145 146
	for (i = 0; i < c; ++i) {
		rbo->placements[i].fpfn = 0;
147 148 149 150 151 152
		if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
		    (rbo->placements[i].flags & TTM_PL_FLAG_VRAM))
			rbo->placements[i].lpfn =
				rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
		else
			rbo->placements[i].lpfn = 0;
153 154
	}

155 156 157 158 159
	/*
	 * Use two-ended allocation depending on the buffer size to
	 * improve fragmentation quality.
	 * 512kb was measured as the most optimal number.
	 */
160 161 162
	if (!((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
	      (rbo->placements[i].flags & TTM_PL_FLAG_VRAM)) &&
	    rbo->tbo.mem.size > 512 * 1024) {
163
		for (i = 0; i < c; i++) {
164
			rbo->placements[i].flags |= TTM_PL_FLAG_TOPDOWN;
165 166
		}
	}
167 168
}

169
int radeon_bo_create(struct radeon_device *rdev,
170
		     unsigned long size, int byte_align, bool kernel, u32 domain,
171
		     u32 flags, struct sg_table *sg, struct radeon_bo **bo_ptr)
172
{
173
	struct radeon_bo *bo;
174
	enum ttm_bo_type type;
175
	unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
176
	size_t acc_size;
177 178
	int r;

179 180
	size = ALIGN(size, PAGE_SIZE);

181 182
	if (kernel) {
		type = ttm_bo_type_kernel;
183 184
	} else if (sg) {
		type = ttm_bo_type_sg;
185 186 187
	} else {
		type = ttm_bo_type_device;
	}
188
	*bo_ptr = NULL;
189

190 191 192
	acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
				       sizeof(struct radeon_bo));

193 194
	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
	if (bo == NULL)
195
		return -ENOMEM;
196 197 198 199 200
	r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
	if (unlikely(r)) {
		kfree(bo);
		return r;
	}
201 202 203
	bo->rdev = rdev;
	bo->surface_reg = -1;
	INIT_LIST_HEAD(&bo->list);
204
	INIT_LIST_HEAD(&bo->va);
205 206 207
	bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
	                               RADEON_GEM_DOMAIN_GTT |
	                               RADEON_GEM_DOMAIN_CPU);
208 209 210 211 212 213

	bo->flags = flags;
	/* PCI GART is always snooped */
	if (!(rdev->flags & RADEON_IS_PCIE))
		bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);

214
	radeon_ttm_placement_from_domain(bo, domain);
215
	/* Kernel allocation are uninterruptible */
216
	down_read(&rdev->pm.mclk_lock);
217
	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
218
			&bo->placement, page_align, !kernel, NULL,
219
			acc_size, sg, &radeon_ttm_bo_destroy);
220
	up_read(&rdev->pm.mclk_lock);
221 222 223
	if (unlikely(r != 0)) {
		return r;
	}
224
	*bo_ptr = bo;
225

226
	trace_radeon_bo_create(bo);
227

228 229 230
	return 0;
}

231
int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
232
{
233
	bool is_iomem;
234 235
	int r;

236
	if (bo->kptr) {
237
		if (ptr) {
238
			*ptr = bo->kptr;
239 240 241
		}
		return 0;
	}
242
	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
243 244 245
	if (r) {
		return r;
	}
246
	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
247
	if (ptr) {
248
		*ptr = bo->kptr;
249
	}
250
	radeon_bo_check_tiling(bo, 0, 0);
251 252 253
	return 0;
}

254
void radeon_bo_kunmap(struct radeon_bo *bo)
255
{
256
	if (bo->kptr == NULL)
257
		return;
258 259 260
	bo->kptr = NULL;
	radeon_bo_check_tiling(bo, 0, 0);
	ttm_bo_kunmap(&bo->kmap);
261 262
}

263 264 265 266 267 268 269 270 271
struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
{
	if (bo == NULL)
		return NULL;

	ttm_bo_reference(&bo->tbo);
	return bo;
}

272
void radeon_bo_unref(struct radeon_bo **bo)
273
{
274
	struct ttm_buffer_object *tbo;
275
	struct radeon_device *rdev;
276

277
	if ((*bo) == NULL)
278
		return;
279
	rdev = (*bo)->rdev;
280 281 282 283
	tbo = &((*bo)->tbo);
	ttm_bo_unref(&tbo);
	if (tbo == NULL)
		*bo = NULL;
284 285
}

286 287
int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
			     u64 *gpu_addr)
288
{
289
	int r, i;
290

291 292 293
	if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
		return -EPERM;

294 295 296 297
	if (bo->pin_count) {
		bo->pin_count++;
		if (gpu_addr)
			*gpu_addr = radeon_bo_gpu_offset(bo);
298 299 300 301 302 303 304 305

		if (max_offset != 0) {
			u64 domain_start;

			if (domain == RADEON_GEM_DOMAIN_VRAM)
				domain_start = bo->rdev->mc.vram_start;
			else
				domain_start = bo->rdev->mc.gtt_start;
306 307
			WARN_ON_ONCE(max_offset <
				     (radeon_bo_gpu_offset(bo) - domain_start));
308 309
		}

310 311
		return 0;
	}
312
	radeon_ttm_placement_from_domain(bo, domain);
313 314 315
	for (i = 0; i < bo->placement.num_placement; i++) {
		unsigned lpfn = 0;

316
		/* force to pin into visible video ram */
317 318 319 320
		if (bo->placements[i].flags & TTM_PL_FLAG_VRAM)
			lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
		else
			lpfn = bo->rdev->mc.gtt_size >> PAGE_SHIFT; /* ??? */
321

322 323
		if (max_offset)
			lpfn = min (lpfn, (unsigned)(max_offset >> PAGE_SHIFT));
324

325 326
		bo->placements[i].lpfn = lpfn;
		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
327
	}
328

329
	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
330 331 332 333
	if (likely(r == 0)) {
		bo->pin_count = 1;
		if (gpu_addr != NULL)
			*gpu_addr = radeon_bo_gpu_offset(bo);
334 335 336 337 338
		if (domain == RADEON_GEM_DOMAIN_VRAM)
			bo->rdev->vram_pin_size += radeon_bo_size(bo);
		else
			bo->rdev->gart_pin_size += radeon_bo_size(bo);
	} else {
339
		dev_err(bo->rdev->dev, "%p pin failed\n", bo);
340
	}
341 342
	return r;
}
343 344 345 346 347

int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
{
	return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
}
348

349
int radeon_bo_unpin(struct radeon_bo *bo)
350
{
351
	int r, i;
352

353 354 355
	if (!bo->pin_count) {
		dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
		return 0;
356
	}
357 358 359
	bo->pin_count--;
	if (bo->pin_count)
		return 0;
360 361 362 363
	for (i = 0; i < bo->placement.num_placement; i++) {
		bo->placements[i].lpfn = 0;
		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
	}
364
	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
365 366 367 368 369 370
	if (likely(r == 0)) {
		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
			bo->rdev->vram_pin_size -= radeon_bo_size(bo);
		else
			bo->rdev->gart_pin_size -= radeon_bo_size(bo);
	} else {
371
		dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
372
	}
373
	return r;
374 375
}

376
int radeon_bo_evict_vram(struct radeon_device *rdev)
377
{
378 379
	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
	if (0 && (rdev->flags & RADEON_IS_IGP)) {
380 381 382
		if (rdev->mc.igp_sideport_enabled == false)
			/* Useless to evict on IGP chips */
			return 0;
383 384 385 386
	}
	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
}

387
void radeon_bo_force_delete(struct radeon_device *rdev)
388
{
389
	struct radeon_bo *bo, *n;
390 391 392 393

	if (list_empty(&rdev->gem.objects)) {
		return;
	}
394 395
	dev_err(rdev->dev, "Userspace still has active objects !\n");
	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
396
		mutex_lock(&rdev->ddev->struct_mutex);
397
		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
398 399
			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
			*((unsigned long *)&bo->gem_base.refcount));
400 401 402
		mutex_lock(&bo->rdev->gem.mutex);
		list_del_init(&bo->list);
		mutex_unlock(&bo->rdev->gem.mutex);
403
		/* this should unref the ttm bo */
404
		drm_gem_object_unreference(&bo->gem_base);
405 406 407 408
		mutex_unlock(&rdev->ddev->struct_mutex);
	}
}

409
int radeon_bo_init(struct radeon_device *rdev)
410
{
411
	/* Add an MTRR for the VRAM */
412
	if (!rdev->fastfb_working) {
413 414
		rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
						      rdev->mc.aper_size);
415
	}
416 417 418 419 420
	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
		rdev->mc.mc_vram_size >> 20,
		(unsigned long long)rdev->mc.aper_size >> 20);
	DRM_INFO("RAM width %dbits %cDR\n",
			rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
421 422 423
	return radeon_ttm_init(rdev);
}

424
void radeon_bo_fini(struct radeon_device *rdev)
425 426
{
	radeon_ttm_fini(rdev);
427
	arch_phys_wc_del(rdev->mc.vram_mtrr);
428 429
}

430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483
/* Returns how many bytes TTM can move per IB.
 */
static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
{
	u64 real_vram_size = rdev->mc.real_vram_size;
	u64 vram_usage = atomic64_read(&rdev->vram_usage);

	/* This function is based on the current VRAM usage.
	 *
	 * - If all of VRAM is free, allow relocating the number of bytes that
	 *   is equal to 1/4 of the size of VRAM for this IB.

	 * - If more than one half of VRAM is occupied, only allow relocating
	 *   1 MB of data for this IB.
	 *
	 * - From 0 to one half of used VRAM, the threshold decreases
	 *   linearly.
	 *         __________________
	 * 1/4 of -|\               |
	 * VRAM    | \              |
	 *         |  \             |
	 *         |   \            |
	 *         |    \           |
	 *         |     \          |
	 *         |      \         |
	 *         |       \________|1 MB
	 *         |----------------|
	 *    VRAM 0 %             100 %
	 *         used            used
	 *
	 * Note: It's a threshold, not a limit. The threshold must be crossed
	 * for buffer relocations to stop, so any buffer of an arbitrary size
	 * can be moved as long as the threshold isn't crossed before
	 * the relocation takes place. We don't want to disable buffer
	 * relocations completely.
	 *
	 * The idea is that buffers should be placed in VRAM at creation time
	 * and TTM should only do a minimum number of relocations during
	 * command submission. In practice, you need to submit at least
	 * a dozen IBs to move all buffers to VRAM if they are in GTT.
	 *
	 * Also, things can get pretty crazy under memory pressure and actual
	 * VRAM usage can change a lot, so playing safe even at 50% does
	 * consistently increase performance.
	 */

	u64 half_vram = real_vram_size >> 1;
	u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
	u64 bytes_moved_threshold = half_free_vram >> 1;
	return max(bytes_moved_threshold, 1024*1024ull);
}

int radeon_bo_list_validate(struct radeon_device *rdev,
			    struct ww_acquire_ctx *ticket,
484
			    struct list_head *head, int ring)
485
{
486
	struct radeon_cs_reloc *lobj;
487
	struct radeon_bo *bo;
488
	int r;
489 490
	u64 bytes_moved = 0, initial_bytes_moved;
	u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
491

492
	r = ttm_eu_reserve_buffers(ticket, head, true);
493 494 495
	if (unlikely(r != 0)) {
		return r;
	}
496

497
	list_for_each_entry(lobj, head, tv.head) {
498
		bo = lobj->robj;
499
		if (!bo->pin_count) {
500
			u32 domain = lobj->prefered_domains;
501
			u32 allowed = lobj->allowed_domains;
502 503 504 505 506 507 508 509 510 511 512
			u32 current_domain =
				radeon_mem_type_to_domain(bo->tbo.mem.mem_type);

			/* Check if this buffer will be moved and don't move it
			 * if we have moved too many buffers for this IB already.
			 *
			 * Note that this allows moving at least one buffer of
			 * any size, because it doesn't take the current "bo"
			 * into account. We don't want to disallow buffer moves
			 * completely.
			 */
513
			if ((allowed & current_domain) != 0 &&
514 515 516 517 518 519
			    (domain & current_domain) == 0 && /* will be moved */
			    bytes_moved > bytes_moved_threshold) {
				/* don't move it */
				domain = current_domain;
			}

520 521
		retry:
			radeon_ttm_placement_from_domain(bo, domain);
C
Christian König 已提交
522
			if (ring == R600_RING_TYPE_UVD_INDEX)
523
				radeon_uvd_force_into_uvd_segment(bo, allowed);
524 525 526 527 528 529

			initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
			r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
			bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
				       initial_bytes_moved;

530
			if (unlikely(r)) {
531 532 533
				if (r != -ERESTARTSYS &&
				    domain != lobj->allowed_domains) {
					domain = lobj->allowed_domains;
534 535
					goto retry;
				}
536
				ttm_eu_backoff_reservation(ticket, head);
537
				return r;
538
			}
539
		}
540 541
		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
		lobj->tiling_flags = bo->tiling_flags;
542 543 544 545
	}
	return 0;
}

546
int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
547 548
			     struct vm_area_struct *vma)
{
549
	return ttm_fbdev_mmap(vma, &bo->tbo);
550 551
}

552
int radeon_bo_get_surface_reg(struct radeon_bo *bo)
553
{
554
	struct radeon_device *rdev = bo->rdev;
555
	struct radeon_surface_reg *reg;
556
	struct radeon_bo *old_object;
557 558 559
	int steal;
	int i;

560
	lockdep_assert_held(&bo->tbo.resv->lock.base);
561 562

	if (!bo->tiling_flags)
563 564
		return 0;

565 566 567
	if (bo->surface_reg >= 0) {
		reg = &rdev->surface_regs[bo->surface_reg];
		i = bo->surface_reg;
568 569 570 571 572 573 574
		goto out;
	}

	steal = -1;
	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {

		reg = &rdev->surface_regs[i];
575
		if (!reg->bo)
576 577
			break;

578
		old_object = reg->bo;
579 580 581 582 583 584 585 586 587 588
		if (old_object->pin_count == 0)
			steal = i;
	}

	/* if we are all out */
	if (i == RADEON_GEM_MAX_SURFACES) {
		if (steal == -1)
			return -ENOMEM;
		/* find someone with a surface reg and nuke their BO */
		reg = &rdev->surface_regs[steal];
589
		old_object = reg->bo;
590 591
		/* blow away the mapping */
		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
592
		ttm_bo_unmap_virtual(&old_object->tbo);
593 594 595 596
		old_object->surface_reg = -1;
		i = steal;
	}

597 598
	bo->surface_reg = i;
	reg->bo = bo;
599 600

out:
601
	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
602
			       bo->tbo.mem.start << PAGE_SHIFT,
603
			       bo->tbo.num_pages << PAGE_SHIFT);
604 605 606
	return 0;
}

607
static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
608
{
609
	struct radeon_device *rdev = bo->rdev;
610 611
	struct radeon_surface_reg *reg;

612
	if (bo->surface_reg == -1)
613 614
		return;

615 616
	reg = &rdev->surface_regs[bo->surface_reg];
	radeon_clear_surface_reg(rdev, bo->surface_reg);
617

618 619
	reg->bo = NULL;
	bo->surface_reg = -1;
620 621
}

622 623
int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
				uint32_t tiling_flags, uint32_t pitch)
624
{
625
	struct radeon_device *rdev = bo->rdev;
626 627
	int r;

628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672
	if (rdev->family >= CHIP_CEDAR) {
		unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;

		bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
		bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
		mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
		tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
		stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
		switch (bankw) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		switch (bankh) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		switch (mtaspect) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		if (tilesplit > 6) {
			return -EINVAL;
		}
		if (stilesplit > 6) {
			return -EINVAL;
		}
	}
673 674 675 676 677 678 679
	r = radeon_bo_reserve(bo, false);
	if (unlikely(r != 0))
		return r;
	bo->tiling_flags = tiling_flags;
	bo->pitch = pitch;
	radeon_bo_unreserve(bo);
	return 0;
680 681
}

682 683 684
void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
				uint32_t *tiling_flags,
				uint32_t *pitch)
685
{
686 687
	lockdep_assert_held(&bo->tbo.resv->lock.base);

688
	if (tiling_flags)
689
		*tiling_flags = bo->tiling_flags;
690
	if (pitch)
691
		*pitch = bo->pitch;
692 693
}

694 695
int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
				bool force_drop)
696
{
697 698
	if (!force_drop)
		lockdep_assert_held(&bo->tbo.resv->lock.base);
699 700

	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
701 702 703
		return 0;

	if (force_drop) {
704
		radeon_bo_clear_surface_reg(bo);
705 706 707
		return 0;
	}

708
	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
709 710 711
		if (!has_moved)
			return 0;

712 713
		if (bo->surface_reg >= 0)
			radeon_bo_clear_surface_reg(bo);
714 715 716
		return 0;
	}

717
	if ((bo->surface_reg >= 0) && !has_moved)
718 719
		return 0;

720
	return radeon_bo_get_surface_reg(bo);
721 722 723
}

void radeon_bo_move_notify(struct ttm_buffer_object *bo,
724
			   struct ttm_mem_reg *new_mem)
725
{
726
	struct radeon_bo *rbo;
727

728 729
	if (!radeon_ttm_bo_is_radeon_bo(bo))
		return;
730

731
	rbo = container_of(bo, struct radeon_bo, tbo);
732
	radeon_bo_check_tiling(rbo, 0, 1);
733
	radeon_vm_bo_invalidate(rbo->rdev, rbo);
734 735 736 737 738 739 740

	/* update statistics */
	if (!new_mem)
		return;

	radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
	radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
741 742
}

743
int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
744
{
745
	struct radeon_device *rdev;
746
	struct radeon_bo *rbo;
747 748 749
	unsigned long offset, size;
	int r;

750
	if (!radeon_ttm_bo_is_radeon_bo(bo))
751
		return 0;
752
	rbo = container_of(bo, struct radeon_bo, tbo);
753
	radeon_bo_check_tiling(rbo, 0, 0);
754
	rdev = rbo->rdev;
755 756 757 758 759 760 761 762 763 764
	if (bo->mem.mem_type != TTM_PL_VRAM)
		return 0;

	size = bo->mem.num_pages << PAGE_SHIFT;
	offset = bo->mem.start << PAGE_SHIFT;
	if ((offset + size) <= rdev->mc.visible_vram_size)
		return 0;

	/* hurrah the memory is not visible ! */
	radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
765
	rbo->placements[0].lpfn = rdev->mc.visible_vram_size >> PAGE_SHIFT;
766 767 768 769 770 771
	r = ttm_bo_validate(bo, &rbo->placement, false, false);
	if (unlikely(r == -ENOMEM)) {
		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
		return ttm_bo_validate(bo, &rbo->placement, false, false);
	} else if (unlikely(r != 0)) {
		return r;
772
	}
773 774 775 776 777 778

	offset = bo->mem.start << PAGE_SHIFT;
	/* this should never happen */
	if ((offset + size) > rdev->mc.visible_vram_size)
		return -EINVAL;

779
	return 0;
780
}
781

782
int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
783 784 785
{
	int r;

786
	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, NULL);
787 788 789 790
	if (unlikely(r != 0))
		return r;
	if (mem_type)
		*mem_type = bo->tbo.mem.mem_type;
791 792

	r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
793 794 795
	ttm_bo_unreserve(&bo->tbo);
	return r;
}