radeon_object.c 22.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
/*
 * Copyright 2009 Jerome Glisse.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 */
/*
 * Authors:
 *    Jerome Glisse <glisse@freedesktop.org>
 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
 *    Dave Airlie
 */
#include <linux/list.h>
33
#include <linux/slab.h>
34
#include <drm/drmP.h>
35
#include <drm/radeon_drm.h>
36
#include <drm/drm_cache.h>
37
#include "radeon.h"
38
#include "radeon_trace.h"
39 40 41 42


int radeon_ttm_init(struct radeon_device *rdev);
void radeon_ttm_fini(struct radeon_device *rdev);
43
static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
44 45 46 47 48 49

/*
 * To exclude mutual BO access we rely on bo_reserve exclusion, as all
 * function are calling it.
 */

50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71
static void radeon_update_memory_usage(struct radeon_bo *bo,
				       unsigned mem_type, int sign)
{
	struct radeon_device *rdev = bo->rdev;
	u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT;

	switch (mem_type) {
	case TTM_PL_TT:
		if (sign > 0)
			atomic64_add(size, &rdev->gtt_usage);
		else
			atomic64_sub(size, &rdev->gtt_usage);
		break;
	case TTM_PL_VRAM:
		if (sign > 0)
			atomic64_add(size, &rdev->vram_usage);
		else
			atomic64_sub(size, &rdev->vram_usage);
		break;
	}
}

72
static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
73
{
74
	struct radeon_bo *bo;
75

76
	bo = container_of(tbo, struct radeon_bo, tbo);
77 78 79

	radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1);

80 81 82 83
	mutex_lock(&bo->rdev->gem.mutex);
	list_del_init(&bo->list);
	mutex_unlock(&bo->rdev->gem.mutex);
	radeon_bo_clear_surface_reg(bo);
84
	WARN_ON_ONCE(!list_empty(&bo->va));
85
	drm_gem_object_release(&bo->gem_base);
86
	kfree(bo);
87 88
}

89 90 91 92 93 94 95
bool radeon_ttm_bo_is_radeon_bo(struct ttm_buffer_object *bo)
{
	if (bo->destroy == &radeon_ttm_bo_destroy)
		return true;
	return false;
}

96 97
void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
{
98
	u32 c = 0, i;
99 100

	rbo->placement.placement = rbo->placements;
101
	rbo->placement.busy_placement = rbo->placements;
102 103 104 105 106 107 108 109 110 111 112 113 114 115
	if (domain & RADEON_GEM_DOMAIN_VRAM) {
		/* Try placing BOs which don't need CPU access outside of the
		 * CPU accessible part of VRAM
		 */
		if ((rbo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
		    rbo->rdev->mc.visible_vram_size < rbo->rdev->mc.real_vram_size) {
			rbo->placements[c].fpfn =
				rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
			rbo->placements[c++].flags = TTM_PL_FLAG_WC |
						     TTM_PL_FLAG_UNCACHED |
						     TTM_PL_FLAG_VRAM;
		}

		rbo->placements[c].fpfn = 0;
116 117 118
		rbo->placements[c++].flags = TTM_PL_FLAG_WC |
					     TTM_PL_FLAG_UNCACHED |
					     TTM_PL_FLAG_VRAM;
119
	}
120

121
	if (domain & RADEON_GEM_DOMAIN_GTT) {
122
		if (rbo->flags & RADEON_GEM_GTT_UC) {
123
			rbo->placements[c].fpfn = 0;
124 125 126
			rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
				TTM_PL_FLAG_TT;

127 128
		} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
			   (rbo->rdev->flags & RADEON_IS_AGP)) {
129
			rbo->placements[c].fpfn = 0;
130 131
			rbo->placements[c++].flags = TTM_PL_FLAG_WC |
				TTM_PL_FLAG_UNCACHED |
132
				TTM_PL_FLAG_TT;
133
		} else {
134
			rbo->placements[c].fpfn = 0;
135 136
			rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
						     TTM_PL_FLAG_TT;
137 138
		}
	}
139

140
	if (domain & RADEON_GEM_DOMAIN_CPU) {
141
		if (rbo->flags & RADEON_GEM_GTT_UC) {
142
			rbo->placements[c].fpfn = 0;
143 144 145
			rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED |
				TTM_PL_FLAG_SYSTEM;

146 147
		} else if ((rbo->flags & RADEON_GEM_GTT_WC) ||
		    rbo->rdev->flags & RADEON_IS_AGP) {
148
			rbo->placements[c].fpfn = 0;
149 150
			rbo->placements[c++].flags = TTM_PL_FLAG_WC |
				TTM_PL_FLAG_UNCACHED |
151
				TTM_PL_FLAG_SYSTEM;
152
		} else {
153
			rbo->placements[c].fpfn = 0;
154 155
			rbo->placements[c++].flags = TTM_PL_FLAG_CACHED |
						     TTM_PL_FLAG_SYSTEM;
156 157
		}
	}
158 159
	if (!c) {
		rbo->placements[c].fpfn = 0;
160 161
		rbo->placements[c++].flags = TTM_PL_MASK_CACHING |
					     TTM_PL_FLAG_SYSTEM;
162
	}
163

164 165
	rbo->placement.num_placement = c;
	rbo->placement.num_busy_placement = c;
166

167
	for (i = 0; i < c; ++i) {
168
		if ((rbo->flags & RADEON_GEM_CPU_ACCESS) &&
169 170
		    (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
		    !rbo->placements[i].fpfn)
171 172 173 174
			rbo->placements[i].lpfn =
				rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
		else
			rbo->placements[i].lpfn = 0;
175
	}
176 177
}

178
int radeon_bo_create(struct radeon_device *rdev,
179 180 181 182
		     unsigned long size, int byte_align, bool kernel,
		     u32 domain, u32 flags, struct sg_table *sg,
		     struct reservation_object *resv,
		     struct radeon_bo **bo_ptr)
183
{
184
	struct radeon_bo *bo;
185
	enum ttm_bo_type type;
186
	unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
187
	size_t acc_size;
188 189
	int r;

190 191
	size = ALIGN(size, PAGE_SIZE);

192 193
	if (kernel) {
		type = ttm_bo_type_kernel;
194 195
	} else if (sg) {
		type = ttm_bo_type_sg;
196 197 198
	} else {
		type = ttm_bo_type_device;
	}
199
	*bo_ptr = NULL;
200

201 202 203
	acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
				       sizeof(struct radeon_bo));

204 205
	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
	if (bo == NULL)
206
		return -ENOMEM;
207 208 209 210 211
	r = drm_gem_object_init(rdev->ddev, &bo->gem_base, size);
	if (unlikely(r)) {
		kfree(bo);
		return r;
	}
212 213 214
	bo->rdev = rdev;
	bo->surface_reg = -1;
	INIT_LIST_HEAD(&bo->list);
215
	INIT_LIST_HEAD(&bo->va);
216
	bo->initial_domain = domain & (RADEON_GEM_DOMAIN_VRAM |
J
Jérome Glisse 已提交
217 218
				       RADEON_GEM_DOMAIN_GTT |
				       RADEON_GEM_DOMAIN_CPU);
219 220 221 222 223 224

	bo->flags = flags;
	/* PCI GART is always snooped */
	if (!(rdev->flags & RADEON_IS_PCIE))
		bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);

225 226 227 228 229 230
	/* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx
	 * See https://bugs.freedesktop.org/show_bug.cgi?id=91268
	 */
	if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635)
		bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);

231 232 233 234
#ifdef CONFIG_X86_32
	/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
	 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
	 */
235
	bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
236 237 238 239 240 241 242 243 244
#elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
	/* Don't try to enable write-combining when it can't work, or things
	 * may be slow
	 * See https://bugs.freedesktop.org/show_bug.cgi?id=88758
	 */

#warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
	 thanks to write-combining

245 246 247
	if (bo->flags & RADEON_GEM_GTT_WC)
		DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
			      "better performance thanks to write-combining\n");
248
	bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
249 250 251 252 253 254
#else
	/* For architectures that don't support WC memory,
	 * mask out the WC flag from the BO
	 */
	if (!drm_arch_can_wc_memory())
		bo->flags &= ~RADEON_GEM_GTT_WC;
255 256
#endif

257
	radeon_ttm_placement_from_domain(bo, domain);
258
	/* Kernel allocation are uninterruptible */
259
	down_read(&rdev->pm.mclk_lock);
260
	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
261
			&bo->placement, page_align, !kernel, NULL,
262
			acc_size, sg, resv, &radeon_ttm_bo_destroy);
263
	up_read(&rdev->pm.mclk_lock);
264 265 266
	if (unlikely(r != 0)) {
		return r;
	}
267
	*bo_ptr = bo;
268

269
	trace_radeon_bo_create(bo);
270

271 272 273
	return 0;
}

274
int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
275
{
276
	bool is_iomem;
277 278
	int r;

279
	if (bo->kptr) {
280
		if (ptr) {
281
			*ptr = bo->kptr;
282 283 284
		}
		return 0;
	}
285
	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
286 287 288
	if (r) {
		return r;
	}
289
	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
290
	if (ptr) {
291
		*ptr = bo->kptr;
292
	}
293
	radeon_bo_check_tiling(bo, 0, 0);
294 295 296
	return 0;
}

297
void radeon_bo_kunmap(struct radeon_bo *bo)
298
{
299
	if (bo->kptr == NULL)
300
		return;
301 302 303
	bo->kptr = NULL;
	radeon_bo_check_tiling(bo, 0, 0);
	ttm_bo_kunmap(&bo->kmap);
304 305
}

306 307 308 309 310 311 312 313 314
struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
{
	if (bo == NULL)
		return NULL;

	ttm_bo_reference(&bo->tbo);
	return bo;
}

315
void radeon_bo_unref(struct radeon_bo **bo)
316
{
317
	struct ttm_buffer_object *tbo;
318
	struct radeon_device *rdev;
319

320
	if ((*bo) == NULL)
321
		return;
322
	rdev = (*bo)->rdev;
323 324 325 326
	tbo = &((*bo)->tbo);
	ttm_bo_unref(&tbo);
	if (tbo == NULL)
		*bo = NULL;
327 328
}

329 330
int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
			     u64 *gpu_addr)
331
{
332
	int r, i;
333

334 335 336
	if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
		return -EPERM;

337 338 339 340
	if (bo->pin_count) {
		bo->pin_count++;
		if (gpu_addr)
			*gpu_addr = radeon_bo_gpu_offset(bo);
341 342 343 344 345 346 347 348

		if (max_offset != 0) {
			u64 domain_start;

			if (domain == RADEON_GEM_DOMAIN_VRAM)
				domain_start = bo->rdev->mc.vram_start;
			else
				domain_start = bo->rdev->mc.gtt_start;
349 350
			WARN_ON_ONCE(max_offset <
				     (radeon_bo_gpu_offset(bo) - domain_start));
351 352
		}

353 354
		return 0;
	}
355 356 357 358 359
	if (bo->prime_shared_count && domain == RADEON_GEM_DOMAIN_VRAM) {
		/* A BO shared as a dma-buf cannot be sensibly migrated to VRAM */
		return -EINVAL;
	}

360
	radeon_ttm_placement_from_domain(bo, domain);
361
	for (i = 0; i < bo->placement.num_placement; i++) {
362
		/* force to pin into visible video ram */
363
		if ((bo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
364
		    !(bo->flags & RADEON_GEM_NO_CPU_ACCESS) &&
365 366 367
		    (!max_offset || max_offset > bo->rdev->mc.visible_vram_size))
			bo->placements[i].lpfn =
				bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
368
		else
369
			bo->placements[i].lpfn = max_offset >> PAGE_SHIFT;
370

371
		bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT;
372
	}
373

374
	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
375 376 377 378
	if (likely(r == 0)) {
		bo->pin_count = 1;
		if (gpu_addr != NULL)
			*gpu_addr = radeon_bo_gpu_offset(bo);
379 380 381 382 383
		if (domain == RADEON_GEM_DOMAIN_VRAM)
			bo->rdev->vram_pin_size += radeon_bo_size(bo);
		else
			bo->rdev->gart_pin_size += radeon_bo_size(bo);
	} else {
384
		dev_err(bo->rdev->dev, "%p pin failed\n", bo);
385
	}
386 387
	return r;
}
388 389 390 391 392

int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
{
	return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr);
}
393

394
int radeon_bo_unpin(struct radeon_bo *bo)
395
{
396
	int r, i;
397

398 399 400
	if (!bo->pin_count) {
		dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
		return 0;
401
	}
402 403 404
	bo->pin_count--;
	if (bo->pin_count)
		return 0;
405 406 407 408
	for (i = 0; i < bo->placement.num_placement; i++) {
		bo->placements[i].lpfn = 0;
		bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
	}
409
	r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
410 411 412 413 414 415
	if (likely(r == 0)) {
		if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
			bo->rdev->vram_pin_size -= radeon_bo_size(bo);
		else
			bo->rdev->gart_pin_size -= radeon_bo_size(bo);
	} else {
416
		dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
417
	}
418
	return r;
419 420
}

421
int radeon_bo_evict_vram(struct radeon_device *rdev)
422
{
423 424
	/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */
	if (0 && (rdev->flags & RADEON_IS_IGP)) {
425 426 427
		if (rdev->mc.igp_sideport_enabled == false)
			/* Useless to evict on IGP chips */
			return 0;
428 429 430 431
	}
	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
}

432
void radeon_bo_force_delete(struct radeon_device *rdev)
433
{
434
	struct radeon_bo *bo, *n;
435 436 437 438

	if (list_empty(&rdev->gem.objects)) {
		return;
	}
439 440 441
	dev_err(rdev->dev, "Userspace still has active objects !\n");
	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
442 443
			&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
			*((unsigned long *)&bo->gem_base.refcount));
444 445 446
		mutex_lock(&bo->rdev->gem.mutex);
		list_del_init(&bo->list);
		mutex_unlock(&bo->rdev->gem.mutex);
447
		/* this should unref the ttm bo */
448
		drm_gem_object_put_unlocked(&bo->gem_base);
449 450 451
	}
}

452
int radeon_bo_init(struct radeon_device *rdev)
453
{
454 455 456 457
	/* reserve PAT memory space to WC for VRAM */
	arch_io_reserve_memtype_wc(rdev->mc.aper_base,
				   rdev->mc.aper_size);

458
	/* Add an MTRR for the VRAM */
459
	if (!rdev->fastfb_working) {
460 461
		rdev->mc.vram_mtrr = arch_phys_wc_add(rdev->mc.aper_base,
						      rdev->mc.aper_size);
462
	}
463 464 465 466 467
	DRM_INFO("Detected VRAM RAM=%lluM, BAR=%lluM\n",
		rdev->mc.mc_vram_size >> 20,
		(unsigned long long)rdev->mc.aper_size >> 20);
	DRM_INFO("RAM width %dbits %cDR\n",
			rdev->mc.vram_width, rdev->mc.vram_is_ddr ? 'D' : 'S');
468 469 470
	return radeon_ttm_init(rdev);
}

471
void radeon_bo_fini(struct radeon_device *rdev)
472 473
{
	radeon_ttm_fini(rdev);
474
	arch_phys_wc_del(rdev->mc.vram_mtrr);
475
	arch_io_free_memtype_wc(rdev->mc.aper_base, rdev->mc.aper_size);
476 477
}

478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531
/* Returns how many bytes TTM can move per IB.
 */
static u64 radeon_bo_get_threshold_for_moves(struct radeon_device *rdev)
{
	u64 real_vram_size = rdev->mc.real_vram_size;
	u64 vram_usage = atomic64_read(&rdev->vram_usage);

	/* This function is based on the current VRAM usage.
	 *
	 * - If all of VRAM is free, allow relocating the number of bytes that
	 *   is equal to 1/4 of the size of VRAM for this IB.

	 * - If more than one half of VRAM is occupied, only allow relocating
	 *   1 MB of data for this IB.
	 *
	 * - From 0 to one half of used VRAM, the threshold decreases
	 *   linearly.
	 *         __________________
	 * 1/4 of -|\               |
	 * VRAM    | \              |
	 *         |  \             |
	 *         |   \            |
	 *         |    \           |
	 *         |     \          |
	 *         |      \         |
	 *         |       \________|1 MB
	 *         |----------------|
	 *    VRAM 0 %             100 %
	 *         used            used
	 *
	 * Note: It's a threshold, not a limit. The threshold must be crossed
	 * for buffer relocations to stop, so any buffer of an arbitrary size
	 * can be moved as long as the threshold isn't crossed before
	 * the relocation takes place. We don't want to disable buffer
	 * relocations completely.
	 *
	 * The idea is that buffers should be placed in VRAM at creation time
	 * and TTM should only do a minimum number of relocations during
	 * command submission. In practice, you need to submit at least
	 * a dozen IBs to move all buffers to VRAM if they are in GTT.
	 *
	 * Also, things can get pretty crazy under memory pressure and actual
	 * VRAM usage can change a lot, so playing safe even at 50% does
	 * consistently increase performance.
	 */

	u64 half_vram = real_vram_size >> 1;
	u64 half_free_vram = vram_usage >= half_vram ? 0 : half_vram - vram_usage;
	u64 bytes_moved_threshold = half_free_vram >> 1;
	return max(bytes_moved_threshold, 1024*1024ull);
}

int radeon_bo_list_validate(struct radeon_device *rdev,
			    struct ww_acquire_ctx *ticket,
532
			    struct list_head *head, int ring)
533
{
534
	struct radeon_bo_list *lobj;
535
	struct list_head duplicates;
536
	int r;
537 538
	u64 bytes_moved = 0, initial_bytes_moved;
	u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
539

540 541
	INIT_LIST_HEAD(&duplicates);
	r = ttm_eu_reserve_buffers(ticket, head, true, &duplicates);
542 543 544
	if (unlikely(r != 0)) {
		return r;
	}
545

546
	list_for_each_entry(lobj, head, tv.head) {
547
		struct radeon_bo *bo = lobj->robj;
548
		if (!bo->pin_count) {
549
			u32 domain = lobj->prefered_domains;
550
			u32 allowed = lobj->allowed_domains;
551 552 553 554 555 556 557 558 559 560 561
			u32 current_domain =
				radeon_mem_type_to_domain(bo->tbo.mem.mem_type);

			/* Check if this buffer will be moved and don't move it
			 * if we have moved too many buffers for this IB already.
			 *
			 * Note that this allows moving at least one buffer of
			 * any size, because it doesn't take the current "bo"
			 * into account. We don't want to disallow buffer moves
			 * completely.
			 */
562
			if ((allowed & current_domain) != 0 &&
563 564 565 566 567 568
			    (domain & current_domain) == 0 && /* will be moved */
			    bytes_moved > bytes_moved_threshold) {
				/* don't move it */
				domain = current_domain;
			}

569 570
		retry:
			radeon_ttm_placement_from_domain(bo, domain);
C
Christian König 已提交
571
			if (ring == R600_RING_TYPE_UVD_INDEX)
572
				radeon_uvd_force_into_uvd_segment(bo, allowed);
573 574 575 576 577 578

			initial_bytes_moved = atomic64_read(&rdev->num_bytes_moved);
			r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
			bytes_moved += atomic64_read(&rdev->num_bytes_moved) -
				       initial_bytes_moved;

579
			if (unlikely(r)) {
580 581 582
				if (r != -ERESTARTSYS &&
				    domain != lobj->allowed_domains) {
					domain = lobj->allowed_domains;
583 584
					goto retry;
				}
585
				ttm_eu_backoff_reservation(ticket, head);
586
				return r;
587
			}
588
		}
589 590
		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
		lobj->tiling_flags = bo->tiling_flags;
591
	}
592 593 594 595 596 597

	list_for_each_entry(lobj, &duplicates, tv.head) {
		lobj->gpu_offset = radeon_bo_gpu_offset(lobj->robj);
		lobj->tiling_flags = lobj->robj->tiling_flags;
	}

598 599 600
	return 0;
}

601
int radeon_bo_get_surface_reg(struct radeon_bo *bo)
602
{
603
	struct radeon_device *rdev = bo->rdev;
604
	struct radeon_surface_reg *reg;
605
	struct radeon_bo *old_object;
606 607 608
	int steal;
	int i;

609
	lockdep_assert_held(&bo->tbo.resv->lock.base);
610 611

	if (!bo->tiling_flags)
612 613
		return 0;

614 615 616
	if (bo->surface_reg >= 0) {
		reg = &rdev->surface_regs[bo->surface_reg];
		i = bo->surface_reg;
617 618 619 620 621 622 623
		goto out;
	}

	steal = -1;
	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {

		reg = &rdev->surface_regs[i];
624
		if (!reg->bo)
625 626
			break;

627
		old_object = reg->bo;
628 629 630 631 632 633 634 635 636 637
		if (old_object->pin_count == 0)
			steal = i;
	}

	/* if we are all out */
	if (i == RADEON_GEM_MAX_SURFACES) {
		if (steal == -1)
			return -ENOMEM;
		/* find someone with a surface reg and nuke their BO */
		reg = &rdev->surface_regs[steal];
638
		old_object = reg->bo;
639 640
		/* blow away the mapping */
		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
641
		ttm_bo_unmap_virtual(&old_object->tbo);
642 643 644 645
		old_object->surface_reg = -1;
		i = steal;
	}

646 647
	bo->surface_reg = i;
	reg->bo = bo;
648 649

out:
650
	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
651
			       bo->tbo.mem.start << PAGE_SHIFT,
652
			       bo->tbo.num_pages << PAGE_SHIFT);
653 654 655
	return 0;
}

656
static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
657
{
658
	struct radeon_device *rdev = bo->rdev;
659 660
	struct radeon_surface_reg *reg;

661
	if (bo->surface_reg == -1)
662 663
		return;

664 665
	reg = &rdev->surface_regs[bo->surface_reg];
	radeon_clear_surface_reg(rdev, bo->surface_reg);
666

667 668
	reg->bo = NULL;
	bo->surface_reg = -1;
669 670
}

671 672
int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
				uint32_t tiling_flags, uint32_t pitch)
673
{
674
	struct radeon_device *rdev = bo->rdev;
675 676
	int r;

677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721
	if (rdev->family >= CHIP_CEDAR) {
		unsigned bankw, bankh, mtaspect, tilesplit, stilesplit;

		bankw = (tiling_flags >> RADEON_TILING_EG_BANKW_SHIFT) & RADEON_TILING_EG_BANKW_MASK;
		bankh = (tiling_flags >> RADEON_TILING_EG_BANKH_SHIFT) & RADEON_TILING_EG_BANKH_MASK;
		mtaspect = (tiling_flags >> RADEON_TILING_EG_MACRO_TILE_ASPECT_SHIFT) & RADEON_TILING_EG_MACRO_TILE_ASPECT_MASK;
		tilesplit = (tiling_flags >> RADEON_TILING_EG_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_TILE_SPLIT_MASK;
		stilesplit = (tiling_flags >> RADEON_TILING_EG_STENCIL_TILE_SPLIT_SHIFT) & RADEON_TILING_EG_STENCIL_TILE_SPLIT_MASK;
		switch (bankw) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		switch (bankh) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		switch (mtaspect) {
		case 0:
		case 1:
		case 2:
		case 4:
		case 8:
			break;
		default:
			return -EINVAL;
		}
		if (tilesplit > 6) {
			return -EINVAL;
		}
		if (stilesplit > 6) {
			return -EINVAL;
		}
	}
722 723 724 725 726 727 728
	r = radeon_bo_reserve(bo, false);
	if (unlikely(r != 0))
		return r;
	bo->tiling_flags = tiling_flags;
	bo->pitch = pitch;
	radeon_bo_unreserve(bo);
	return 0;
729 730
}

731 732 733
void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
				uint32_t *tiling_flags,
				uint32_t *pitch)
734
{
735 736
	lockdep_assert_held(&bo->tbo.resv->lock.base);

737
	if (tiling_flags)
738
		*tiling_flags = bo->tiling_flags;
739
	if (pitch)
740
		*pitch = bo->pitch;
741 742
}

743 744
int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
				bool force_drop)
745
{
746 747
	if (!force_drop)
		lockdep_assert_held(&bo->tbo.resv->lock.base);
748 749

	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
750 751 752
		return 0;

	if (force_drop) {
753
		radeon_bo_clear_surface_reg(bo);
754 755 756
		return 0;
	}

757
	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
758 759 760
		if (!has_moved)
			return 0;

761 762
		if (bo->surface_reg >= 0)
			radeon_bo_clear_surface_reg(bo);
763 764 765
		return 0;
	}

766
	if ((bo->surface_reg >= 0) && !has_moved)
767 768
		return 0;

769
	return radeon_bo_get_surface_reg(bo);
770 771 772
}

void radeon_bo_move_notify(struct ttm_buffer_object *bo,
773
			   bool evict,
774
			   struct ttm_mem_reg *new_mem)
775
{
776
	struct radeon_bo *rbo;
777

778 779
	if (!radeon_ttm_bo_is_radeon_bo(bo))
		return;
780

781
	rbo = container_of(bo, struct radeon_bo, tbo);
782
	radeon_bo_check_tiling(rbo, 0, 1);
783
	radeon_vm_bo_invalidate(rbo->rdev, rbo);
784 785 786 787 788 789 790

	/* update statistics */
	if (!new_mem)
		return;

	radeon_update_memory_usage(rbo, bo->mem.mem_type, -1);
	radeon_update_memory_usage(rbo, new_mem->mem_type, 1);
791 792
}

793
int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
794
{
795
	struct radeon_device *rdev;
796
	struct radeon_bo *rbo;
797 798
	unsigned long offset, size, lpfn;
	int i, r;
799

800
	if (!radeon_ttm_bo_is_radeon_bo(bo))
801
		return 0;
802
	rbo = container_of(bo, struct radeon_bo, tbo);
803
	radeon_bo_check_tiling(rbo, 0, 0);
804
	rdev = rbo->rdev;
805 806 807 808 809 810 811 812
	if (bo->mem.mem_type != TTM_PL_VRAM)
		return 0;

	size = bo->mem.num_pages << PAGE_SHIFT;
	offset = bo->mem.start << PAGE_SHIFT;
	if ((offset + size) <= rdev->mc.visible_vram_size)
		return 0;

813 814 815 816
	/* Can't move a pinned BO to visible VRAM */
	if (rbo->pin_count > 0)
		return -EINVAL;

817 818
	/* hurrah the memory is not visible ! */
	radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM);
819 820 821 822 823 824 825
	lpfn =	rdev->mc.visible_vram_size >> PAGE_SHIFT;
	for (i = 0; i < rbo->placement.num_placement; i++) {
		/* Force into visible VRAM */
		if ((rbo->placements[i].flags & TTM_PL_FLAG_VRAM) &&
		    (!rbo->placements[i].lpfn || rbo->placements[i].lpfn > lpfn))
			rbo->placements[i].lpfn = lpfn;
	}
826 827 828 829 830 831
	r = ttm_bo_validate(bo, &rbo->placement, false, false);
	if (unlikely(r == -ENOMEM)) {
		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
		return ttm_bo_validate(bo, &rbo->placement, false, false);
	} else if (unlikely(r != 0)) {
		return r;
832
	}
833 834 835 836 837 838

	offset = bo->mem.start << PAGE_SHIFT;
	/* this should never happen */
	if ((offset + size) > rdev->mc.visible_vram_size)
		return -EINVAL;

839
	return 0;
840
}
841

842
int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait)
843 844 845
{
	int r;

846
	r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL);
847 848 849 850
	if (unlikely(r != 0))
		return r;
	if (mem_type)
		*mem_type = bo->tbo.mem.mem_type;
851

852
	r = ttm_bo_wait(&bo->tbo, true, no_wait);
853 854 855
	ttm_bo_unreserve(&bo->tbo);
	return r;
}
856 857 858 859 860 861 862 863 864 865

/**
 * radeon_bo_fence - add fence to buffer object
 *
 * @bo: buffer object in question
 * @fence: fence to add
 * @shared: true if fence should be added shared
 *
 */
void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence,
J
Jérome Glisse 已提交
866
		     bool shared)
867 868 869 870 871 872 873 874
{
	struct reservation_object *resv = bo->tbo.resv;

	if (shared)
		reservation_object_add_shared_fence(resv, &fence->base);
	else
		reservation_object_add_excl_fence(resv, &fence->base);
}