radeon_ttm.c 22.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
/*
 * Copyright 2009 Jerome Glisse.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 */
/*
 * Authors:
 *    Jerome Glisse <glisse@freedesktop.org>
 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
 *    Dave Airlie
 */
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h>
36
#include <ttm/ttm_page_alloc.h>
37 38
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
39
#include <linux/seq_file.h>
40
#include <linux/slab.h>
41 42 43 44 45
#include "radeon_reg.h"
#include "radeon.h"

#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)

46 47
static int radeon_ttm_debugfs_init(struct radeon_device *rdev);

48 49 50 51 52 53 54 55 56 57 58 59 60 61
static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
{
	struct radeon_mman *mman;
	struct radeon_device *rdev;

	mman = container_of(bdev, struct radeon_mman, bdev);
	rdev = container_of(mman, struct radeon_device, mman);
	return rdev;
}


/*
 * Global memory.
 */
62
static int radeon_ttm_mem_global_init(struct drm_global_reference *ref)
63 64 65 66
{
	return ttm_mem_global_init(ref->object);
}

67
static void radeon_ttm_mem_global_release(struct drm_global_reference *ref)
68 69 70 71 72 73
{
	ttm_mem_global_release(ref->object);
}

static int radeon_ttm_global_init(struct radeon_device *rdev)
{
74
	struct drm_global_reference *global_ref;
75 76 77 78
	int r;

	rdev->mman.mem_global_referenced = false;
	global_ref = &rdev->mman.mem_global_ref;
79
	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
80 81 82
	global_ref->size = sizeof(struct ttm_mem_global);
	global_ref->init = &radeon_ttm_mem_global_init;
	global_ref->release = &radeon_ttm_mem_global_release;
83
	r = drm_global_item_ref(global_ref);
84
	if (r != 0) {
85 86
		DRM_ERROR("Failed setting up TTM memory accounting "
			  "subsystem.\n");
87 88
		return r;
	}
89 90 91 92

	rdev->mman.bo_global_ref.mem_glob =
		rdev->mman.mem_global_ref.object;
	global_ref = &rdev->mman.bo_global_ref.ref;
93
	global_ref->global_type = DRM_GLOBAL_TTM_BO;
94
	global_ref->size = sizeof(struct ttm_bo_global);
95 96
	global_ref->init = &ttm_bo_global_init;
	global_ref->release = &ttm_bo_global_release;
97
	r = drm_global_item_ref(global_ref);
98 99
	if (r != 0) {
		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
100
		drm_global_item_unref(&rdev->mman.mem_global_ref);
101 102 103
		return r;
	}

104 105 106 107 108 109 110
	rdev->mman.mem_global_referenced = true;
	return 0;
}

static void radeon_ttm_global_fini(struct radeon_device *rdev)
{
	if (rdev->mman.mem_global_referenced) {
111 112
		drm_global_item_unref(&rdev->mman.bo_global_ref.ref);
		drm_global_item_unref(&rdev->mman.mem_global_ref);
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154
		rdev->mman.mem_global_referenced = false;
	}
}

struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev);

static struct ttm_backend*
radeon_create_ttm_backend_entry(struct ttm_bo_device *bdev)
{
	struct radeon_device *rdev;

	rdev = radeon_get_rdev(bdev);
#if __OS_HAS_AGP
	if (rdev->flags & RADEON_IS_AGP) {
		return ttm_agp_backend_init(bdev, rdev->ddev->agp->bridge);
	} else
#endif
	{
		return radeon_ttm_backend_create(rdev);
	}
}

static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
	return 0;
}

static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
				struct ttm_mem_type_manager *man)
{
	struct radeon_device *rdev;

	rdev = radeon_get_rdev(bdev);

	switch (type) {
	case TTM_PL_SYSTEM:
		/* System memory */
		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
		man->available_caching = TTM_PL_MASK_CACHING;
		man->default_caching = TTM_PL_FLAG_CACHED;
		break;
	case TTM_PL_TT:
155
		man->gpu_offset = rdev->mc.gtt_start;
156 157
		man->available_caching = TTM_PL_MASK_CACHING;
		man->default_caching = TTM_PL_FLAG_CACHED;
158
		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
159 160 161 162 163 164 165
#if __OS_HAS_AGP
		if (rdev->flags & RADEON_IS_AGP) {
			if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
				DRM_ERROR("AGP is not enabled for memory type %u\n",
					  (unsigned)type);
				return -EINVAL;
			}
166
			if (!rdev->ddev->agp->cant_use_aperture)
167
				man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
168 169 170 171
			man->available_caching = TTM_PL_FLAG_UNCACHED |
						 TTM_PL_FLAG_WC;
			man->default_caching = TTM_PL_FLAG_WC;
		}
172
#endif
173 174 175
		break;
	case TTM_PL_VRAM:
		/* "On-card" video ram */
176
		man->gpu_offset = rdev->mc.vram_start;
177 178 179 180 181 182 183 184 185 186 187 188
		man->flags = TTM_MEMTYPE_FLAG_FIXED |
			     TTM_MEMTYPE_FLAG_MAPPABLE;
		man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
		man->default_caching = TTM_PL_FLAG_WC;
		break;
	default:
		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
		return -EINVAL;
	}
	return 0;
}

189 190
static void radeon_evict_flags(struct ttm_buffer_object *bo,
				struct ttm_placement *placement)
191
{
192 193 194 195 196 197 198 199 200 201 202 203 204
	struct radeon_bo *rbo;
	static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;

	if (!radeon_ttm_bo_is_radeon_bo(bo)) {
		placement->fpfn = 0;
		placement->lpfn = 0;
		placement->placement = &placements;
		placement->busy_placement = &placements;
		placement->num_placement = 1;
		placement->num_busy_placement = 1;
		return;
	}
	rbo = container_of(bo, struct radeon_bo, tbo);
205
	switch (bo->mem.mem_type) {
206
	case TTM_PL_VRAM:
207 208 209 210
		if (rbo->rdev->cp.ready == false)
			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
		else
			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
211 212
		break;
	case TTM_PL_TT:
213
	default:
214
		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
215
	}
216
	*placement = rbo->placement;
217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234
}

static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
	return 0;
}

static void radeon_move_null(struct ttm_buffer_object *bo,
			     struct ttm_mem_reg *new_mem)
{
	struct ttm_mem_reg *old_mem = &bo->mem;

	BUG_ON(old_mem->mm_node != NULL);
	*old_mem = *new_mem;
	new_mem->mm_node = NULL;
}

static int radeon_move_blit(struct ttm_buffer_object *bo,
235 236 237
			bool evict, int no_wait_reserve, bool no_wait_gpu,
			struct ttm_mem_reg *new_mem,
			struct ttm_mem_reg *old_mem)
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
{
	struct radeon_device *rdev;
	uint64_t old_start, new_start;
	struct radeon_fence *fence;
	int r;

	rdev = radeon_get_rdev(bo->bdev);
	r = radeon_fence_create(rdev, &fence);
	if (unlikely(r)) {
		return r;
	}
	old_start = old_mem->mm_node->start << PAGE_SHIFT;
	new_start = new_mem->mm_node->start << PAGE_SHIFT;

	switch (old_mem->mem_type) {
	case TTM_PL_VRAM:
254
		old_start += rdev->mc.vram_start;
255 256
		break;
	case TTM_PL_TT:
257
		old_start += rdev->mc.gtt_start;
258 259 260 261 262 263 264
		break;
	default:
		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
		return -EINVAL;
	}
	switch (new_mem->mem_type) {
	case TTM_PL_VRAM:
265
		new_start += rdev->mc.vram_start;
266 267
		break;
	case TTM_PL_TT:
268
		new_start += rdev->mc.gtt_start;
269 270 271 272 273 274 275 276 277 278 279 280
		break;
	default:
		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
		return -EINVAL;
	}
	if (!rdev->cp.ready) {
		DRM_ERROR("Trying to move memory with CP turned off.\n");
		return -EINVAL;
	}
	r = radeon_copy(rdev, old_start, new_start, new_mem->num_pages, fence);
	/* FIXME: handle copy error */
	r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
281
				      evict, no_wait_reserve, no_wait_gpu, new_mem);
282 283 284 285 286
	radeon_fence_unref(&fence);
	return r;
}

static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
287 288
				bool evict, bool interruptible,
				bool no_wait_reserve, bool no_wait_gpu,
289 290 291 292 293
				struct ttm_mem_reg *new_mem)
{
	struct radeon_device *rdev;
	struct ttm_mem_reg *old_mem = &bo->mem;
	struct ttm_mem_reg tmp_mem;
294 295
	u32 placements;
	struct ttm_placement placement;
296 297 298 299 300
	int r;

	rdev = radeon_get_rdev(bo->bdev);
	tmp_mem = *new_mem;
	tmp_mem.mm_node = NULL;
301 302 303 304 305 306 307 308
	placement.fpfn = 0;
	placement.lpfn = 0;
	placement.num_placement = 1;
	placement.placement = &placements;
	placement.num_busy_placement = 1;
	placement.busy_placement = &placements;
	placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
309
			     interruptible, no_wait_reserve, no_wait_gpu);
310 311 312
	if (unlikely(r)) {
		return r;
	}
313 314 315 316 317 318

	r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
	if (unlikely(r)) {
		goto out_cleanup;
	}

319 320 321 322
	r = ttm_tt_bind(bo->ttm, &tmp_mem);
	if (unlikely(r)) {
		goto out_cleanup;
	}
323
	r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
324 325 326
	if (unlikely(r)) {
		goto out_cleanup;
	}
327
	r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
328 329
out_cleanup:
	if (tmp_mem.mm_node) {
330 331 332
		struct ttm_bo_global *glob = rdev->mman.bdev.glob;

		spin_lock(&glob->lru_lock);
333
		drm_mm_put_block(tmp_mem.mm_node);
334
		spin_unlock(&glob->lru_lock);
335 336 337 338 339 340
		return r;
	}
	return r;
}

static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
341 342
				bool evict, bool interruptible,
				bool no_wait_reserve, bool no_wait_gpu,
343 344 345 346 347
				struct ttm_mem_reg *new_mem)
{
	struct radeon_device *rdev;
	struct ttm_mem_reg *old_mem = &bo->mem;
	struct ttm_mem_reg tmp_mem;
348 349
	struct ttm_placement placement;
	u32 placements;
350 351 352 353 354
	int r;

	rdev = radeon_get_rdev(bo->bdev);
	tmp_mem = *new_mem;
	tmp_mem.mm_node = NULL;
355 356 357 358 359 360 361
	placement.fpfn = 0;
	placement.lpfn = 0;
	placement.num_placement = 1;
	placement.placement = &placements;
	placement.num_busy_placement = 1;
	placement.busy_placement = &placements;
	placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
362
	r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
363 364 365
	if (unlikely(r)) {
		return r;
	}
366
	r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
367 368 369
	if (unlikely(r)) {
		goto out_cleanup;
	}
370
	r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
371 372 373 374 375
	if (unlikely(r)) {
		goto out_cleanup;
	}
out_cleanup:
	if (tmp_mem.mm_node) {
376 377 378
		struct ttm_bo_global *glob = rdev->mman.bdev.glob;

		spin_lock(&glob->lru_lock);
379
		drm_mm_put_block(tmp_mem.mm_node);
380
		spin_unlock(&glob->lru_lock);
381 382 383 384 385 386
		return r;
	}
	return r;
}

static int radeon_bo_move(struct ttm_buffer_object *bo,
387 388 389
			bool evict, bool interruptible,
			bool no_wait_reserve, bool no_wait_gpu,
			struct ttm_mem_reg *new_mem)
390 391 392 393 394 395 396 397 398 399 400 401 402 403
{
	struct radeon_device *rdev;
	struct ttm_mem_reg *old_mem = &bo->mem;
	int r;

	rdev = radeon_get_rdev(bo->bdev);
	if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
		radeon_move_null(bo, new_mem);
		return 0;
	}
	if ((old_mem->mem_type == TTM_PL_TT &&
	     new_mem->mem_type == TTM_PL_SYSTEM) ||
	    (old_mem->mem_type == TTM_PL_SYSTEM &&
	     new_mem->mem_type == TTM_PL_TT)) {
404
		/* bind is enough */
405 406 407
		radeon_move_null(bo, new_mem);
		return 0;
	}
408
	if (!rdev->cp.ready || rdev->asic->copy == NULL) {
409
		/* use memcpy */
410
		goto memcpy;
411 412 413 414
	}

	if (old_mem->mem_type == TTM_PL_VRAM &&
	    new_mem->mem_type == TTM_PL_SYSTEM) {
415
		r = radeon_move_vram_ram(bo, evict, interruptible,
416
					no_wait_reserve, no_wait_gpu, new_mem);
417 418
	} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
		   new_mem->mem_type == TTM_PL_VRAM) {
419
		r = radeon_move_ram_vram(bo, evict, interruptible,
420
					    no_wait_reserve, no_wait_gpu, new_mem);
421
	} else {
422
		r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
423
	}
424 425 426

	if (r) {
memcpy:
427
		r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
428
	}
429 430 431
	return r;
}

432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453
static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
	struct radeon_device *rdev = radeon_get_rdev(bdev);

	mem->bus.addr = NULL;
	mem->bus.offset = 0;
	mem->bus.size = mem->num_pages << PAGE_SHIFT;
	mem->bus.base = 0;
	mem->bus.is_iomem = false;
	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
		return -EINVAL;
	switch (mem->mem_type) {
	case TTM_PL_SYSTEM:
		/* system memory */
		return 0;
	case TTM_PL_TT:
#if __OS_HAS_AGP
		if (rdev->flags & RADEON_IS_AGP) {
			/* RADEON_IS_AGP is set only if AGP is active */
			mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
			mem->bus.base = rdev->mc.agp_base;
454
			mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
		}
#endif
		break;
	case TTM_PL_VRAM:
		mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
		/* check if it's visible */
		if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
			return -EINVAL;
		mem->bus.base = rdev->mc.aper_base;
		mem->bus.is_iomem = true;
		break;
	default:
		return -EINVAL;
	}
	return 0;
}

static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
}

476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513
static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
				bool lazy, bool interruptible)
{
	return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
}

static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg)
{
	return 0;
}

static void radeon_sync_obj_unref(void **sync_obj)
{
	radeon_fence_unref((struct radeon_fence **)sync_obj);
}

static void *radeon_sync_obj_ref(void *sync_obj)
{
	return radeon_fence_ref((struct radeon_fence *)sync_obj);
}

static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
{
	return radeon_fence_signaled((struct radeon_fence *)sync_obj);
}

static struct ttm_bo_driver radeon_bo_driver = {
	.create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
	.invalidate_caches = &radeon_invalidate_caches,
	.init_mem_type = &radeon_init_mem_type,
	.evict_flags = &radeon_evict_flags,
	.move = &radeon_bo_move,
	.verify_access = &radeon_verify_access,
	.sync_obj_signaled = &radeon_sync_obj_signaled,
	.sync_obj_wait = &radeon_sync_obj_wait,
	.sync_obj_flush = &radeon_sync_obj_flush,
	.sync_obj_unref = &radeon_sync_obj_unref,
	.sync_obj_ref = &radeon_sync_obj_ref,
514 515
	.move_notify = &radeon_bo_move_notify,
	.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
516 517
	.io_mem_reserve = &radeon_ttm_io_mem_reserve,
	.io_mem_free = &radeon_ttm_io_mem_free,
518 519 520 521 522 523 524 525 526 527 528 529
};

int radeon_ttm_init(struct radeon_device *rdev)
{
	int r;

	r = radeon_ttm_global_init(rdev);
	if (r) {
		return r;
	}
	/* No others user of address space so set it to 0 */
	r = ttm_bo_device_init(&rdev->mman.bdev,
530
			       rdev->mman.bo_global_ref.ref.object,
D
Dave Airlie 已提交
531 532
			       &radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
			       rdev->need_dma32);
533 534 535 536
	if (r) {
		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
		return r;
	}
537
	rdev->mman.initialized = true;
538
	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
539
				rdev->mc.real_vram_size >> PAGE_SHIFT);
540 541 542 543
	if (r) {
		DRM_ERROR("Failed initializing VRAM heap.\n");
		return r;
	}
544 545 546
	r = radeon_bo_create(rdev, NULL, 256 * 1024, true,
				RADEON_GEM_DOMAIN_VRAM,
				&rdev->stollen_vga_memory);
547 548 549
	if (r) {
		return r;
	}
550 551 552 553 554
	r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
	if (r)
		return r;
	r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
	radeon_bo_unreserve(rdev->stollen_vga_memory);
555
	if (r) {
556
		radeon_bo_unref(&rdev->stollen_vga_memory);
557 558 559
		return r;
	}
	DRM_INFO("radeon: %uM of VRAM memory ready\n",
560
		 (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
561
	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
562
				rdev->mc.gtt_size >> PAGE_SHIFT);
563 564 565 566 567
	if (r) {
		DRM_ERROR("Failed initializing GTT heap.\n");
		return r;
	}
	DRM_INFO("radeon: %uM of GTT memory ready.\n",
568
		 (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
569 570 571
	if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
		rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
	}
572 573 574 575 576 577

	r = radeon_ttm_debugfs_init(rdev);
	if (r) {
		DRM_ERROR("Failed to init debugfs\n");
		return r;
	}
578 579 580 581 582
	return 0;
}

void radeon_ttm_fini(struct radeon_device *rdev)
{
583 584
	int r;

585 586
	if (!rdev->mman.initialized)
		return;
587
	if (rdev->stollen_vga_memory) {
588 589 590 591 592 593
		r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
		if (r == 0) {
			radeon_bo_unpin(rdev->stollen_vga_memory);
			radeon_bo_unreserve(rdev->stollen_vga_memory);
		}
		radeon_bo_unref(&rdev->stollen_vga_memory);
594 595 596 597 598 599
	}
	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
	ttm_bo_device_release(&rdev->mman.bdev);
	radeon_gart_fini(rdev);
	radeon_ttm_global_fini(rdev);
600
	rdev->mman.initialized = false;
601 602 603 604
	DRM_INFO("radeon: ttm finalized\n");
}

static struct vm_operations_struct radeon_ttm_vm_ops;
605
static const struct vm_operations_struct *ttm_vm_ops = NULL;
606 607 608 609

static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct ttm_buffer_object *bo;
610
	struct radeon_device *rdev;
611 612
	int r;

613
	bo = (struct ttm_buffer_object *)vma->vm_private_data;	
614 615 616
	if (bo == NULL) {
		return VM_FAULT_NOPAGE;
	}
617 618
	rdev = radeon_get_rdev(bo->bdev);
	mutex_lock(&rdev->vram_mutex);
619
	r = ttm_vm_ops->fault(vma, vmf);
620
	mutex_unlock(&rdev->vram_mutex);
621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764
	return r;
}

int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct drm_file *file_priv;
	struct radeon_device *rdev;
	int r;

	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
		return drm_mmap(filp, vma);
	}

	file_priv = (struct drm_file *)filp->private_data;
	rdev = file_priv->minor->dev->dev_private;
	if (rdev == NULL) {
		return -EINVAL;
	}
	r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
	if (unlikely(r != 0)) {
		return r;
	}
	if (unlikely(ttm_vm_ops == NULL)) {
		ttm_vm_ops = vma->vm_ops;
		radeon_ttm_vm_ops = *ttm_vm_ops;
		radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
	}
	vma->vm_ops = &radeon_ttm_vm_ops;
	return 0;
}


/*
 * TTM backend functions.
 */
struct radeon_ttm_backend {
	struct ttm_backend		backend;
	struct radeon_device		*rdev;
	unsigned long			num_pages;
	struct page			**pages;
	struct page			*dummy_read_page;
	bool				populated;
	bool				bound;
	unsigned			offset;
};

static int radeon_ttm_backend_populate(struct ttm_backend *backend,
				       unsigned long num_pages,
				       struct page **pages,
				       struct page *dummy_read_page)
{
	struct radeon_ttm_backend *gtt;

	gtt = container_of(backend, struct radeon_ttm_backend, backend);
	gtt->pages = pages;
	gtt->num_pages = num_pages;
	gtt->dummy_read_page = dummy_read_page;
	gtt->populated = true;
	return 0;
}

static void radeon_ttm_backend_clear(struct ttm_backend *backend)
{
	struct radeon_ttm_backend *gtt;

	gtt = container_of(backend, struct radeon_ttm_backend, backend);
	gtt->pages = NULL;
	gtt->num_pages = 0;
	gtt->dummy_read_page = NULL;
	gtt->populated = false;
	gtt->bound = false;
}


static int radeon_ttm_backend_bind(struct ttm_backend *backend,
				   struct ttm_mem_reg *bo_mem)
{
	struct radeon_ttm_backend *gtt;
	int r;

	gtt = container_of(backend, struct radeon_ttm_backend, backend);
	gtt->offset = bo_mem->mm_node->start << PAGE_SHIFT;
	if (!gtt->num_pages) {
		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", gtt->num_pages, bo_mem, backend);
	}
	r = radeon_gart_bind(gtt->rdev, gtt->offset,
			     gtt->num_pages, gtt->pages);
	if (r) {
		DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
			  gtt->num_pages, gtt->offset);
		return r;
	}
	gtt->bound = true;
	return 0;
}

static int radeon_ttm_backend_unbind(struct ttm_backend *backend)
{
	struct radeon_ttm_backend *gtt;

	gtt = container_of(backend, struct radeon_ttm_backend, backend);
	radeon_gart_unbind(gtt->rdev, gtt->offset, gtt->num_pages);
	gtt->bound = false;
	return 0;
}

static void radeon_ttm_backend_destroy(struct ttm_backend *backend)
{
	struct radeon_ttm_backend *gtt;

	gtt = container_of(backend, struct radeon_ttm_backend, backend);
	if (gtt->bound) {
		radeon_ttm_backend_unbind(backend);
	}
	kfree(gtt);
}

static struct ttm_backend_func radeon_backend_func = {
	.populate = &radeon_ttm_backend_populate,
	.clear = &radeon_ttm_backend_clear,
	.bind = &radeon_ttm_backend_bind,
	.unbind = &radeon_ttm_backend_unbind,
	.destroy = &radeon_ttm_backend_destroy,
};

struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
{
	struct radeon_ttm_backend *gtt;

	gtt = kzalloc(sizeof(struct radeon_ttm_backend), GFP_KERNEL);
	if (gtt == NULL) {
		return NULL;
	}
	gtt->backend.bdev = &rdev->mman.bdev;
	gtt->backend.flags = 0;
	gtt->backend.func = &radeon_backend_func;
	gtt->rdev = rdev;
	gtt->pages = NULL;
	gtt->num_pages = 0;
	gtt->dummy_read_page = NULL;
	gtt->populated = false;
	gtt->bound = false;
	return &gtt->backend;
}
765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786

#define RADEON_DEBUGFS_MEM_TYPES 2

#if defined(CONFIG_DEBUG_FS)
static int radeon_mm_dump_table(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *)m->private;
	struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
	struct drm_device *dev = node->minor->dev;
	struct radeon_device *rdev = dev->dev_private;
	int ret;
	struct ttm_bo_global *glob = rdev->mman.bdev.glob;

	spin_lock(&glob->lru_lock);
	ret = drm_mm_dump_table(m, mm);
	spin_unlock(&glob->lru_lock);
	return ret;
}
#endif

static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
{
787
#if defined(CONFIG_DEBUG_FS)
788 789
	static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+1];
	static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+1][32];
790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805
	unsigned i;

	for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
		if (i == 0)
			sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
		else
			sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
		radeon_mem_types_list[i].name = radeon_mem_types_names[i];
		radeon_mem_types_list[i].show = &radeon_mm_dump_table;
		radeon_mem_types_list[i].driver_features = 0;
		if (i == 0)
			radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_VRAM].manager;
		else
			radeon_mem_types_list[i].data = &rdev->mman.bdev.man[TTM_PL_TT].manager;

	}
806 807 808 809 810 811 812
	/* Add ttm page pool to debugfs */
	sprintf(radeon_mem_types_names[i], "ttm_page_pool");
	radeon_mem_types_list[i].name = radeon_mem_types_names[i];
	radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
	radeon_mem_types_list[i].driver_features = 0;
	radeon_mem_types_list[i].data = NULL;
	return radeon_debugfs_add_files(rdev, radeon_mem_types_list, RADEON_DEBUGFS_MEM_TYPES+1);
813 814 815 816

#endif
	return 0;
}