radeon_ttm.c 24.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
/*
 * Copyright 2009 Jerome Glisse.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 */
/*
 * Authors:
 *    Jerome Glisse <glisse@freedesktop.org>
 *    Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
 *    Dave Airlie
 */
#include <ttm/ttm_bo_api.h>
#include <ttm/ttm_bo_driver.h>
#include <ttm/ttm_placement.h>
#include <ttm/ttm_module.h>
36
#include <ttm/ttm_page_alloc.h>
37 38
#include <drm/drmP.h>
#include <drm/radeon_drm.h>
39
#include <linux/seq_file.h>
40
#include <linux/slab.h>
41 42 43 44 45
#include "radeon_reg.h"
#include "radeon.h"

#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)

46 47
static int radeon_ttm_debugfs_init(struct radeon_device *rdev);

48 49 50 51 52 53 54 55 56 57 58 59 60 61
static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
{
	struct radeon_mman *mman;
	struct radeon_device *rdev;

	mman = container_of(bdev, struct radeon_mman, bdev);
	rdev = container_of(mman, struct radeon_device, mman);
	return rdev;
}


/*
 * Global memory.
 */
62
static int radeon_ttm_mem_global_init(struct drm_global_reference *ref)
63 64 65 66
{
	return ttm_mem_global_init(ref->object);
}

67
static void radeon_ttm_mem_global_release(struct drm_global_reference *ref)
68 69 70 71 72 73
{
	ttm_mem_global_release(ref->object);
}

static int radeon_ttm_global_init(struct radeon_device *rdev)
{
74
	struct drm_global_reference *global_ref;
75 76 77 78
	int r;

	rdev->mman.mem_global_referenced = false;
	global_ref = &rdev->mman.mem_global_ref;
79
	global_ref->global_type = DRM_GLOBAL_TTM_MEM;
80 81 82
	global_ref->size = sizeof(struct ttm_mem_global);
	global_ref->init = &radeon_ttm_mem_global_init;
	global_ref->release = &radeon_ttm_mem_global_release;
83
	r = drm_global_item_ref(global_ref);
84
	if (r != 0) {
85 86
		DRM_ERROR("Failed setting up TTM memory accounting "
			  "subsystem.\n");
87 88
		return r;
	}
89 90 91 92

	rdev->mman.bo_global_ref.mem_glob =
		rdev->mman.mem_global_ref.object;
	global_ref = &rdev->mman.bo_global_ref.ref;
93
	global_ref->global_type = DRM_GLOBAL_TTM_BO;
94
	global_ref->size = sizeof(struct ttm_bo_global);
95 96
	global_ref->init = &ttm_bo_global_init;
	global_ref->release = &ttm_bo_global_release;
97
	r = drm_global_item_ref(global_ref);
98 99
	if (r != 0) {
		DRM_ERROR("Failed setting up TTM BO subsystem.\n");
100
		drm_global_item_unref(&rdev->mman.mem_global_ref);
101 102 103
		return r;
	}

104 105 106 107 108 109 110
	rdev->mman.mem_global_referenced = true;
	return 0;
}

static void radeon_ttm_global_fini(struct radeon_device *rdev)
{
	if (rdev->mman.mem_global_referenced) {
111 112
		drm_global_item_unref(&rdev->mman.bo_global_ref.ref);
		drm_global_item_unref(&rdev->mman.mem_global_ref);
113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
		rdev->mman.mem_global_referenced = false;
	}
}

static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
{
	return 0;
}

static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
				struct ttm_mem_type_manager *man)
{
	struct radeon_device *rdev;

	rdev = radeon_get_rdev(bdev);

	switch (type) {
	case TTM_PL_SYSTEM:
		/* System memory */
		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
		man->available_caching = TTM_PL_MASK_CACHING;
		man->default_caching = TTM_PL_FLAG_CACHED;
		break;
	case TTM_PL_TT:
137
		man->func = &ttm_bo_manager_func;
138
		man->gpu_offset = rdev->mc.gtt_start;
139 140
		man->available_caching = TTM_PL_MASK_CACHING;
		man->default_caching = TTM_PL_FLAG_CACHED;
141
		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
142 143 144 145 146 147 148
#if __OS_HAS_AGP
		if (rdev->flags & RADEON_IS_AGP) {
			if (!(drm_core_has_AGP(rdev->ddev) && rdev->ddev->agp)) {
				DRM_ERROR("AGP is not enabled for memory type %u\n",
					  (unsigned)type);
				return -EINVAL;
			}
149
			if (!rdev->ddev->agp->cant_use_aperture)
150
				man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
151 152 153 154
			man->available_caching = TTM_PL_FLAG_UNCACHED |
						 TTM_PL_FLAG_WC;
			man->default_caching = TTM_PL_FLAG_WC;
		}
155
#endif
156 157 158
		break;
	case TTM_PL_VRAM:
		/* "On-card" video ram */
159
		man->func = &ttm_bo_manager_func;
160
		man->gpu_offset = rdev->mc.vram_start;
161 162 163 164 165 166 167 168 169 170 171 172
		man->flags = TTM_MEMTYPE_FLAG_FIXED |
			     TTM_MEMTYPE_FLAG_MAPPABLE;
		man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC;
		man->default_caching = TTM_PL_FLAG_WC;
		break;
	default:
		DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
		return -EINVAL;
	}
	return 0;
}

173 174
static void radeon_evict_flags(struct ttm_buffer_object *bo,
				struct ttm_placement *placement)
175
{
176 177 178 179 180 181 182 183 184 185 186 187 188
	struct radeon_bo *rbo;
	static u32 placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;

	if (!radeon_ttm_bo_is_radeon_bo(bo)) {
		placement->fpfn = 0;
		placement->lpfn = 0;
		placement->placement = &placements;
		placement->busy_placement = &placements;
		placement->num_placement = 1;
		placement->num_busy_placement = 1;
		return;
	}
	rbo = container_of(bo, struct radeon_bo, tbo);
189
	switch (bo->mem.mem_type) {
190
	case TTM_PL_VRAM:
191
		if (rbo->rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ready == false)
192 193 194
			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
		else
			radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
195 196
		break;
	case TTM_PL_TT:
197
	default:
198
		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
199
	}
200
	*placement = rbo->placement;
201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218
}

static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
	return 0;
}

static void radeon_move_null(struct ttm_buffer_object *bo,
			     struct ttm_mem_reg *new_mem)
{
	struct ttm_mem_reg *old_mem = &bo->mem;

	BUG_ON(old_mem->mm_node != NULL);
	*old_mem = *new_mem;
	new_mem->mm_node = NULL;
}

static int radeon_move_blit(struct ttm_buffer_object *bo,
219 220 221
			bool evict, int no_wait_reserve, bool no_wait_gpu,
			struct ttm_mem_reg *new_mem,
			struct ttm_mem_reg *old_mem)
222 223 224
{
	struct radeon_device *rdev;
	uint64_t old_start, new_start;
225
	struct radeon_fence *fence, *old_fence;
226
	struct radeon_semaphore *sem = NULL;
227
	int r;
228 229

	rdev = radeon_get_rdev(bo->bdev);
230
	r = radeon_fence_create(rdev, &fence, radeon_copy_ring_index(rdev));
231 232 233
	if (unlikely(r)) {
		return r;
	}
234 235
	old_start = old_mem->start << PAGE_SHIFT;
	new_start = new_mem->start << PAGE_SHIFT;
236 237 238

	switch (old_mem->mem_type) {
	case TTM_PL_VRAM:
239
		old_start += rdev->mc.vram_start;
240 241
		break;
	case TTM_PL_TT:
242
		old_start += rdev->mc.gtt_start;
243 244 245
		break;
	default:
		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
246
		radeon_fence_unref(&fence);
247 248 249 250
		return -EINVAL;
	}
	switch (new_mem->mem_type) {
	case TTM_PL_VRAM:
251
		new_start += rdev->mc.vram_start;
252 253
		break;
	case TTM_PL_TT:
254
		new_start += rdev->mc.gtt_start;
255 256 257
		break;
	default:
		DRM_ERROR("Unknown placement %d\n", old_mem->mem_type);
258
		radeon_fence_unref(&fence);
259 260
		return -EINVAL;
	}
261
	if (!rdev->ring[radeon_copy_ring_index(rdev)].ready) {
262
		DRM_ERROR("Trying to move memory with ring turned off.\n");
263
		radeon_fence_unref(&fence);
264 265
		return -EINVAL;
	}
266 267 268

	BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0);

269
	/* sync other rings */
270 271 272 273 274
	old_fence = bo->sync_obj;
	if (old_fence && old_fence->ring != fence->ring
	    && !radeon_fence_signaled(old_fence)) {
		bool sync_to_ring[RADEON_NUM_RINGS] = { };
		sync_to_ring[old_fence->ring] = true;
275

276
		r = radeon_semaphore_create(rdev, &sem);
277 278 279 280
		if (r) {
			radeon_fence_unref(&fence);
			return r;
		}
281

282
		r = radeon_semaphore_sync_rings(rdev, sem,
283 284
						sync_to_ring, fence->ring);
		if (r) {
285
			radeon_semaphore_free(rdev, sem, NULL);
286 287
			radeon_fence_unref(&fence);
			return r;
288 289 290
		}
	}

291 292 293
	r = radeon_copy(rdev, old_start, new_start,
			new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE), /* GPU pages */
			fence);
294 295
	/* FIXME: handle copy error */
	r = ttm_bo_move_accel_cleanup(bo, (void *)fence, NULL,
296
				      evict, no_wait_reserve, no_wait_gpu, new_mem);
297
	radeon_semaphore_free(rdev, sem, fence);
298 299 300 301 302
	radeon_fence_unref(&fence);
	return r;
}

static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
303 304
				bool evict, bool interruptible,
				bool no_wait_reserve, bool no_wait_gpu,
305 306 307 308 309
				struct ttm_mem_reg *new_mem)
{
	struct radeon_device *rdev;
	struct ttm_mem_reg *old_mem = &bo->mem;
	struct ttm_mem_reg tmp_mem;
310 311
	u32 placements;
	struct ttm_placement placement;
312 313 314 315 316
	int r;

	rdev = radeon_get_rdev(bo->bdev);
	tmp_mem = *new_mem;
	tmp_mem.mm_node = NULL;
317 318 319 320 321 322 323 324
	placement.fpfn = 0;
	placement.lpfn = 0;
	placement.num_placement = 1;
	placement.placement = &placements;
	placement.num_busy_placement = 1;
	placement.busy_placement = &placements;
	placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
325
			     interruptible, no_wait_reserve, no_wait_gpu);
326 327 328
	if (unlikely(r)) {
		return r;
	}
329 330 331 332 333 334

	r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
	if (unlikely(r)) {
		goto out_cleanup;
	}

335 336 337 338
	r = ttm_tt_bind(bo->ttm, &tmp_mem);
	if (unlikely(r)) {
		goto out_cleanup;
	}
339
	r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
340 341 342
	if (unlikely(r)) {
		goto out_cleanup;
	}
343
	r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
344
out_cleanup:
345
	ttm_bo_mem_put(bo, &tmp_mem);
346 347 348 349
	return r;
}

static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
350 351
				bool evict, bool interruptible,
				bool no_wait_reserve, bool no_wait_gpu,
352 353 354 355 356
				struct ttm_mem_reg *new_mem)
{
	struct radeon_device *rdev;
	struct ttm_mem_reg *old_mem = &bo->mem;
	struct ttm_mem_reg tmp_mem;
357 358
	struct ttm_placement placement;
	u32 placements;
359 360 361 362 363
	int r;

	rdev = radeon_get_rdev(bo->bdev);
	tmp_mem = *new_mem;
	tmp_mem.mm_node = NULL;
364 365 366 367 368 369 370
	placement.fpfn = 0;
	placement.lpfn = 0;
	placement.num_placement = 1;
	placement.placement = &placements;
	placement.num_busy_placement = 1;
	placement.busy_placement = &placements;
	placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
371
	r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu);
372 373 374
	if (unlikely(r)) {
		return r;
	}
375
	r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
376 377 378
	if (unlikely(r)) {
		goto out_cleanup;
	}
379
	r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
380 381 382 383
	if (unlikely(r)) {
		goto out_cleanup;
	}
out_cleanup:
384
	ttm_bo_mem_put(bo, &tmp_mem);
385 386 387 388
	return r;
}

static int radeon_bo_move(struct ttm_buffer_object *bo,
389 390 391
			bool evict, bool interruptible,
			bool no_wait_reserve, bool no_wait_gpu,
			struct ttm_mem_reg *new_mem)
392 393 394 395 396 397 398 399 400 401 402 403 404 405
{
	struct radeon_device *rdev;
	struct ttm_mem_reg *old_mem = &bo->mem;
	int r;

	rdev = radeon_get_rdev(bo->bdev);
	if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
		radeon_move_null(bo, new_mem);
		return 0;
	}
	if ((old_mem->mem_type == TTM_PL_TT &&
	     new_mem->mem_type == TTM_PL_SYSTEM) ||
	    (old_mem->mem_type == TTM_PL_SYSTEM &&
	     new_mem->mem_type == TTM_PL_TT)) {
406
		/* bind is enough */
407 408 409
		radeon_move_null(bo, new_mem);
		return 0;
	}
410 411
	if (!rdev->ring[radeon_copy_ring_index(rdev)].ready ||
	    rdev->asic->copy.copy == NULL) {
412
		/* use memcpy */
413
		goto memcpy;
414 415 416 417
	}

	if (old_mem->mem_type == TTM_PL_VRAM &&
	    new_mem->mem_type == TTM_PL_SYSTEM) {
418
		r = radeon_move_vram_ram(bo, evict, interruptible,
419
					no_wait_reserve, no_wait_gpu, new_mem);
420 421
	} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
		   new_mem->mem_type == TTM_PL_VRAM) {
422
		r = radeon_move_ram_vram(bo, evict, interruptible,
423
					    no_wait_reserve, no_wait_gpu, new_mem);
424
	} else {
425
		r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem);
426
	}
427 428 429

	if (r) {
memcpy:
430
		r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
431
	}
432 433 434
	return r;
}

435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454
static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
	struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
	struct radeon_device *rdev = radeon_get_rdev(bdev);

	mem->bus.addr = NULL;
	mem->bus.offset = 0;
	mem->bus.size = mem->num_pages << PAGE_SHIFT;
	mem->bus.base = 0;
	mem->bus.is_iomem = false;
	if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
		return -EINVAL;
	switch (mem->mem_type) {
	case TTM_PL_SYSTEM:
		/* system memory */
		return 0;
	case TTM_PL_TT:
#if __OS_HAS_AGP
		if (rdev->flags & RADEON_IS_AGP) {
			/* RADEON_IS_AGP is set only if AGP is active */
455
			mem->bus.offset = mem->start << PAGE_SHIFT;
456
			mem->bus.base = rdev->mc.agp_base;
457
			mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
458 459 460 461
		}
#endif
		break;
	case TTM_PL_VRAM:
462
		mem->bus.offset = mem->start << PAGE_SHIFT;
463 464 465 466 467
		/* check if it's visible */
		if ((mem->bus.offset + mem->bus.size) > rdev->mc.visible_vram_size)
			return -EINVAL;
		mem->bus.base = rdev->mc.aper_base;
		mem->bus.is_iomem = true;
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
#ifdef __alpha__
		/*
		 * Alpha: use bus.addr to hold the ioremap() return,
		 * so we can modify bus.base below.
		 */
		if (mem->placement & TTM_PL_FLAG_WC)
			mem->bus.addr =
				ioremap_wc(mem->bus.base + mem->bus.offset,
					   mem->bus.size);
		else
			mem->bus.addr =
				ioremap_nocache(mem->bus.base + mem->bus.offset,
						mem->bus.size);

		/*
		 * Alpha: Use just the bus offset plus
		 * the hose/domain memory base for bus.base.
		 * It then can be used to build PTEs for VRAM
		 * access, as done in ttm_bo_vm_fault().
		 */
		mem->bus.base = (mem->bus.base & 0x0ffffffffUL) +
			rdev->ddev->hose->dense_mem_base;
#endif
491 492 493 494 495 496 497 498 499 500 501
		break;
	default:
		return -EINVAL;
	}
	return 0;
}

static void radeon_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
}

502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527
static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
				bool lazy, bool interruptible)
{
	return radeon_fence_wait((struct radeon_fence *)sync_obj, interruptible);
}

static int radeon_sync_obj_flush(void *sync_obj, void *sync_arg)
{
	return 0;
}

static void radeon_sync_obj_unref(void **sync_obj)
{
	radeon_fence_unref((struct radeon_fence **)sync_obj);
}

static void *radeon_sync_obj_ref(void *sync_obj)
{
	return radeon_fence_ref((struct radeon_fence *)sync_obj);
}

static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
{
	return radeon_fence_signaled((struct radeon_fence *)sync_obj);
}

528 529 530 531
/*
 * TTM backend functions.
 */
struct radeon_ttm_tt {
532
	struct ttm_dma_tt		ttm;
533 534 535 536 537 538 539
	struct radeon_device		*rdev;
	u64				offset;
};

static int radeon_ttm_backend_bind(struct ttm_tt *ttm,
				   struct ttm_mem_reg *bo_mem)
{
540
	struct radeon_ttm_tt *gtt = (void*)ttm;
541 542 543 544 545 546 547 548
	int r;

	gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT);
	if (!ttm->num_pages) {
		WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n",
		     ttm->num_pages, bo_mem, ttm);
	}
	r = radeon_gart_bind(gtt->rdev, gtt->offset,
549
			     ttm->num_pages, ttm->pages, gtt->ttm.dma_address);
550 551 552 553 554 555 556 557 558 559
	if (r) {
		DRM_ERROR("failed to bind %lu pages at 0x%08X\n",
			  ttm->num_pages, (unsigned)gtt->offset);
		return r;
	}
	return 0;
}

static int radeon_ttm_backend_unbind(struct ttm_tt *ttm)
{
560
	struct radeon_ttm_tt *gtt = (void *)ttm;
561 562 563 564 565 566 567

	radeon_gart_unbind(gtt->rdev, gtt->offset, ttm->num_pages);
	return 0;
}

static void radeon_ttm_backend_destroy(struct ttm_tt *ttm)
{
568
	struct radeon_ttm_tt *gtt = (void *)ttm;
569

570
	ttm_dma_tt_fini(&gtt->ttm);
571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598
	kfree(gtt);
}

static struct ttm_backend_func radeon_backend_func = {
	.bind = &radeon_ttm_backend_bind,
	.unbind = &radeon_ttm_backend_unbind,
	.destroy = &radeon_ttm_backend_destroy,
};

struct ttm_tt *radeon_ttm_tt_create(struct ttm_bo_device *bdev,
				    unsigned long size, uint32_t page_flags,
				    struct page *dummy_read_page)
{
	struct radeon_device *rdev;
	struct radeon_ttm_tt *gtt;

	rdev = radeon_get_rdev(bdev);
#if __OS_HAS_AGP
	if (rdev->flags & RADEON_IS_AGP) {
		return ttm_agp_tt_create(bdev, rdev->ddev->agp->bridge,
					 size, page_flags, dummy_read_page);
	}
#endif

	gtt = kzalloc(sizeof(struct radeon_ttm_tt), GFP_KERNEL);
	if (gtt == NULL) {
		return NULL;
	}
599
	gtt->ttm.ttm.func = &radeon_backend_func;
600
	gtt->rdev = rdev;
601 602
	if (ttm_dma_tt_init(&gtt->ttm, bdev, size, page_flags, dummy_read_page)) {
		kfree(gtt);
603 604
		return NULL;
	}
605
	return &gtt->ttm.ttm;
606 607
}

608 609 610
static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
{
	struct radeon_device *rdev;
611
	struct radeon_ttm_tt *gtt = (void *)ttm;
612 613 614 615 616 617 618
	unsigned i;
	int r;

	if (ttm->state != tt_unpopulated)
		return 0;

	rdev = radeon_get_rdev(ttm->bdev);
J
Jerome Glisse 已提交
619 620 621 622 623
#if __OS_HAS_AGP
	if (rdev->flags & RADEON_IS_AGP) {
		return ttm_agp_tt_populate(ttm);
	}
#endif
624 625 626

#ifdef CONFIG_SWIOTLB
	if (swiotlb_nr_tbl()) {
627
		return ttm_dma_populate(&gtt->ttm, rdev->dev);
628 629 630 631 632 633 634 635 636
	}
#endif

	r = ttm_pool_populate(ttm);
	if (r) {
		return r;
	}

	for (i = 0; i < ttm->num_pages; i++) {
637 638 639 640
		gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
						       0, PAGE_SIZE,
						       PCI_DMA_BIDIRECTIONAL);
		if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
641
			while (--i) {
642
				pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
643
					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
644
				gtt->ttm.dma_address[i] = 0;
645 646 647 648 649 650 651 652 653 654 655
			}
			ttm_pool_unpopulate(ttm);
			return -EFAULT;
		}
	}
	return 0;
}

static void radeon_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
	struct radeon_device *rdev;
656
	struct radeon_ttm_tt *gtt = (void *)ttm;
657 658 659
	unsigned i;

	rdev = radeon_get_rdev(ttm->bdev);
J
Jerome Glisse 已提交
660 661 662 663 664 665
#if __OS_HAS_AGP
	if (rdev->flags & RADEON_IS_AGP) {
		ttm_agp_tt_unpopulate(ttm);
		return;
	}
#endif
666 667 668

#ifdef CONFIG_SWIOTLB
	if (swiotlb_nr_tbl()) {
669
		ttm_dma_unpopulate(&gtt->ttm, rdev->dev);
670 671 672 673 674
		return;
	}
#endif

	for (i = 0; i < ttm->num_pages; i++) {
675 676
		if (gtt->ttm.dma_address[i]) {
			pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
677 678 679 680 681 682
				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
		}
	}

	ttm_pool_unpopulate(ttm);
}
683

684
static struct ttm_bo_driver radeon_bo_driver = {
685
	.ttm_tt_create = &radeon_ttm_tt_create,
686 687
	.ttm_tt_populate = &radeon_ttm_tt_populate,
	.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
688 689 690 691 692 693 694 695 696 697
	.invalidate_caches = &radeon_invalidate_caches,
	.init_mem_type = &radeon_init_mem_type,
	.evict_flags = &radeon_evict_flags,
	.move = &radeon_bo_move,
	.verify_access = &radeon_verify_access,
	.sync_obj_signaled = &radeon_sync_obj_signaled,
	.sync_obj_wait = &radeon_sync_obj_wait,
	.sync_obj_flush = &radeon_sync_obj_flush,
	.sync_obj_unref = &radeon_sync_obj_unref,
	.sync_obj_ref = &radeon_sync_obj_ref,
698 699
	.move_notify = &radeon_bo_move_notify,
	.fault_reserve_notify = &radeon_bo_fault_reserve_notify,
700 701
	.io_mem_reserve = &radeon_ttm_io_mem_reserve,
	.io_mem_free = &radeon_ttm_io_mem_free,
702 703 704 705 706 707 708 709 710 711 712 713
};

int radeon_ttm_init(struct radeon_device *rdev)
{
	int r;

	r = radeon_ttm_global_init(rdev);
	if (r) {
		return r;
	}
	/* No others user of address space so set it to 0 */
	r = ttm_bo_device_init(&rdev->mman.bdev,
714
			       rdev->mman.bo_global_ref.ref.object,
D
Dave Airlie 已提交
715 716
			       &radeon_bo_driver, DRM_FILE_PAGE_OFFSET,
			       rdev->need_dma32);
717 718 719 720
	if (r) {
		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
		return r;
	}
721
	rdev->mman.initialized = true;
722
	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
723
				rdev->mc.real_vram_size >> PAGE_SHIFT);
724 725 726 727
	if (r) {
		DRM_ERROR("Failed initializing VRAM heap.\n");
		return r;
	}
728
	r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
729 730
				RADEON_GEM_DOMAIN_VRAM,
				&rdev->stollen_vga_memory);
731 732 733
	if (r) {
		return r;
	}
734 735 736 737 738
	r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
	if (r)
		return r;
	r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
	radeon_bo_unreserve(rdev->stollen_vga_memory);
739
	if (r) {
740
		radeon_bo_unref(&rdev->stollen_vga_memory);
741 742 743
		return r;
	}
	DRM_INFO("radeon: %uM of VRAM memory ready\n",
744
		 (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
745
	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
746
				rdev->mc.gtt_size >> PAGE_SHIFT);
747 748 749 750 751
	if (r) {
		DRM_ERROR("Failed initializing GTT heap.\n");
		return r;
	}
	DRM_INFO("radeon: %uM of GTT memory ready.\n",
752
		 (unsigned)(rdev->mc.gtt_size / (1024 * 1024)));
753 754 755
	if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
		rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
	}
756 757 758 759 760 761

	r = radeon_ttm_debugfs_init(rdev);
	if (r) {
		DRM_ERROR("Failed to init debugfs\n");
		return r;
	}
762 763 764 765 766
	return 0;
}

void radeon_ttm_fini(struct radeon_device *rdev)
{
767 768
	int r;

769 770
	if (!rdev->mman.initialized)
		return;
771
	if (rdev->stollen_vga_memory) {
772 773 774 775 776 777
		r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
		if (r == 0) {
			radeon_bo_unpin(rdev->stollen_vga_memory);
			radeon_bo_unreserve(rdev->stollen_vga_memory);
		}
		radeon_bo_unref(&rdev->stollen_vga_memory);
778 779 780 781 782 783
	}
	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);
	ttm_bo_device_release(&rdev->mman.bdev);
	radeon_gart_fini(rdev);
	radeon_ttm_global_fini(rdev);
784
	rdev->mman.initialized = false;
785 786 787
	DRM_INFO("radeon: ttm finalized\n");
}

788 789 790 791 792 793 794 795 796 797 798 799 800 801
/* this should only be called at bootup or when userspace
 * isn't running */
void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
{
	struct ttm_mem_type_manager *man;

	if (!rdev->mman.initialized)
		return;

	man = &rdev->mman.bdev.man[TTM_PL_VRAM];
	/* this just adjusts TTM size idea, which sets lpfn to the correct value */
	man->size = size >> PAGE_SHIFT;
}

802
static struct vm_operations_struct radeon_ttm_vm_ops;
803
static const struct vm_operations_struct *ttm_vm_ops = NULL;
804 805 806 807

static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
	struct ttm_buffer_object *bo;
808
	struct radeon_device *rdev;
809 810
	int r;

811
	bo = (struct ttm_buffer_object *)vma->vm_private_data;	
812 813 814
	if (bo == NULL) {
		return VM_FAULT_NOPAGE;
	}
815 816
	rdev = radeon_get_rdev(bo->bdev);
	mutex_lock(&rdev->vram_mutex);
817
	r = ttm_vm_ops->fault(vma, vmf);
818
	mutex_unlock(&rdev->vram_mutex);
819 820 821 822 823 824 825 826 827 828 829 830 831
	return r;
}

int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
{
	struct drm_file *file_priv;
	struct radeon_device *rdev;
	int r;

	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
		return drm_mmap(filp, vma);
	}

832
	file_priv = filp->private_data;
833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850
	rdev = file_priv->minor->dev->dev_private;
	if (rdev == NULL) {
		return -EINVAL;
	}
	r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
	if (unlikely(r != 0)) {
		return r;
	}
	if (unlikely(ttm_vm_ops == NULL)) {
		ttm_vm_ops = vma->vm_ops;
		radeon_ttm_vm_ops = *ttm_vm_ops;
		radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
	}
	vma->vm_ops = &radeon_ttm_vm_ops;
	return 0;
}


851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871
#define RADEON_DEBUGFS_MEM_TYPES 2

#if defined(CONFIG_DEBUG_FS)
static int radeon_mm_dump_table(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *)m->private;
	struct drm_mm *mm = (struct drm_mm *)node->info_ent->data;
	struct drm_device *dev = node->minor->dev;
	struct radeon_device *rdev = dev->dev_private;
	int ret;
	struct ttm_bo_global *glob = rdev->mman.bdev.glob;

	spin_lock(&glob->lru_lock);
	ret = drm_mm_dump_table(m, mm);
	spin_unlock(&glob->lru_lock);
	return ret;
}
#endif

static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
{
872
#if defined(CONFIG_DEBUG_FS)
873 874
	static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
	static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
875 876 877 878 879 880 881 882 883 884 885
	unsigned i;

	for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
		if (i == 0)
			sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
		else
			sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
		radeon_mem_types_list[i].name = radeon_mem_types_names[i];
		radeon_mem_types_list[i].show = &radeon_mm_dump_table;
		radeon_mem_types_list[i].driver_features = 0;
		if (i == 0)
886
			radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
887
		else
888
			radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
889 890

	}
891 892 893 894 895
	/* Add ttm page pool to debugfs */
	sprintf(radeon_mem_types_names[i], "ttm_page_pool");
	radeon_mem_types_list[i].name = radeon_mem_types_names[i];
	radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
	radeon_mem_types_list[i].driver_features = 0;
896 897 898 899 900 901 902 903 904 905 906
	radeon_mem_types_list[i++].data = NULL;
#ifdef CONFIG_SWIOTLB
	if (swiotlb_nr_tbl()) {
		sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
		radeon_mem_types_list[i].name = radeon_mem_types_names[i];
		radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
		radeon_mem_types_list[i].driver_features = 0;
		radeon_mem_types_list[i++].data = NULL;
	}
#endif
	return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
907 908 909 910

#endif
	return 0;
}