radeon_ring.c 14.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright 2008 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 * Copyright 2009 Jerome Glisse.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie
 *          Alex Deucher
 *          Jerome Glisse
 */
#include <linux/seq_file.h>
29
#include <linux/slab.h>
30 31 32 33 34 35 36
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"

int radeon_debugfs_ib_init(struct radeon_device *rdev);
37
int radeon_debugfs_ring_init(struct radeon_device *rdev);
38

39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
{
	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
	u32 pg_idx, pg_offset;
	u32 idx_value = 0;
	int new_page;

	pg_idx = (idx * 4) / PAGE_SIZE;
	pg_offset = (idx * 4) % PAGE_SIZE;

	if (ibc->kpage_idx[0] == pg_idx)
		return ibc->kpage[0][pg_offset/4];
	if (ibc->kpage_idx[1] == pg_idx)
		return ibc->kpage[1][pg_offset/4];

	new_page = radeon_cs_update_pages(p, pg_idx);
	if (new_page < 0) {
		p->parser_error = new_page;
		return 0;
	}

	idx_value = ibc->kpage[new_page][pg_offset/4];
	return idx_value;
}

64
void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
65 66
{
#if DRM_DEBUG_CODE
67
	if (ring->count_dw <= 0) {
68 69 70
		DRM_ERROR("radeon: writting more dword to ring than expected !\n");
	}
#endif
71 72 73 74
	ring->ring[ring->wptr++] = v;
	ring->wptr &= ring->ptr_mask;
	ring->count_dw--;
	ring->ring_free_dw--;
75 76
}

77 78 79
/*
 * IB.
 */
80
bool radeon_ib_try_free(struct radeon_device *rdev, struct radeon_ib *ib)
81
{
82 83 84 85 86 87 88 89 90
	bool done = false;

	/* only free ib which have been emited */
	if (ib->fence && ib->fence->emitted) {
		if (radeon_fence_signaled(ib->fence)) {
			radeon_fence_unref(&ib->fence);
			radeon_sa_bo_free(rdev, &ib->sa_bo);
			done = true;
		}
91
	}
92
	return done;
93 94
}

95 96
int radeon_ib_get(struct radeon_device *rdev, int ring,
		  struct radeon_ib **ib, unsigned size)
97 98
{
	struct radeon_fence *fence;
99 100
	unsigned cretry = 0;
	int r = 0, i, idx;
101 102

	*ib = NULL;
103 104
	/* align size on 256 bytes */
	size = ALIGN(size, 256);
105

106
	r = radeon_fence_create(rdev, &fence, ring);
107
	if (r) {
108
		dev_err(rdev->dev, "failed to create fence for new IB\n");
109 110
		return r;
	}
111

112
	mutex_lock(&rdev->ib_pool.mutex);
113 114 115 116
	idx = rdev->ib_pool.head_id;
retry:
	if (cretry > 5) {
		dev_err(rdev->dev, "failed to get an ib after 5 retry\n");
117
		mutex_unlock(&rdev->ib_pool.mutex);
118
		radeon_fence_unref(&fence);
119
		return -ENOMEM;
120
	}
121 122 123 124 125 126
	cretry++;
	for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
		radeon_ib_try_free(rdev, &rdev->ib_pool.ibs[idx]);
		if (rdev->ib_pool.ibs[idx].fence == NULL) {
			r = radeon_sa_bo_new(rdev, &rdev->ib_pool.sa_manager,
					     &rdev->ib_pool.ibs[idx].sa_bo,
127
					     size, 256);
128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143
			if (!r) {
				*ib = &rdev->ib_pool.ibs[idx];
				(*ib)->ptr = rdev->ib_pool.sa_manager.cpu_ptr;
				(*ib)->ptr += ((*ib)->sa_bo.offset >> 2);
				(*ib)->gpu_addr = rdev->ib_pool.sa_manager.gpu_addr;
				(*ib)->gpu_addr += (*ib)->sa_bo.offset;
				(*ib)->fence = fence;
				/* ib are most likely to be allocated in a ring fashion
				 * thus rdev->ib_pool.head_id should be the id of the
				 * oldest ib
				 */
				rdev->ib_pool.head_id = (1 + idx);
				rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1);
				mutex_unlock(&rdev->ib_pool.mutex);
				return 0;
			}
144
		}
145 146 147 148 149
		idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
	}
	/* this should be rare event, ie all ib scheduled none signaled yet.
	 */
	for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
150
		if (rdev->ib_pool.ibs[idx].fence && rdev->ib_pool.ibs[idx].fence->emitted) {
151 152 153 154 155 156 157 158
			r = radeon_fence_wait(rdev->ib_pool.ibs[idx].fence, false);
			if (!r) {
				goto retry;
			}
			/* an error happened */
			break;
		}
		idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
159
	}
160
	mutex_unlock(&rdev->ib_pool.mutex);
161 162
	radeon_fence_unref(&fence);
	return r;
163 164 165 166 167 168 169 170 171 172 173
}

void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
{
	struct radeon_ib *tmp = *ib;

	*ib = NULL;
	if (tmp == NULL) {
		return;
	}
	mutex_lock(&rdev->ib_pool.mutex);
174 175 176 177
	if (tmp->fence && !tmp->fence->emitted) {
		radeon_sa_bo_free(rdev, &tmp->sa_bo);
		radeon_fence_unref(&tmp->fence);
	}
178 179 180 181 182
	mutex_unlock(&rdev->ib_pool.mutex);
}

int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
{
183
	struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
184 185
	int r = 0;

186
	if (!ib->length_dw || !ring->ready) {
187
		/* TODO: Nothings in the ib we should report. */
188
		DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
189 190
		return -EINVAL;
	}
191

192
	/* 64 dwords should be enough for fence too */
193
	r = radeon_ring_lock(rdev, ring, 64);
194
	if (r) {
P
Paul Bolle 已提交
195
		DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
196 197
		return r;
	}
198
	radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
199
	radeon_fence_emit(rdev, ib->fence);
200
	radeon_ring_unlock_commit(rdev, ring);
201 202 203 204 205
	return 0;
}

int radeon_ib_pool_init(struct radeon_device *rdev)
{
206
	int i, r;
207

208 209 210
	mutex_lock(&rdev->ib_pool.mutex);
	if (rdev->ib_pool.ready) {
		mutex_unlock(&rdev->ib_pool.mutex);
211
		return 0;
212
	}
213 214 215 216

	r = radeon_sa_bo_manager_init(rdev, &rdev->ib_pool.sa_manager,
				      RADEON_IB_POOL_SIZE*64*1024,
				      RADEON_GEM_DOMAIN_GTT);
217
	if (r) {
218
		mutex_unlock(&rdev->ib_pool.mutex);
219 220 221
		return r;
	}

222 223
	for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
		rdev->ib_pool.ibs[i].fence = NULL;
224 225
		rdev->ib_pool.ibs[i].idx = i;
		rdev->ib_pool.ibs[i].length_dw = 0;
226
		INIT_LIST_HEAD(&rdev->ib_pool.ibs[i].sa_bo.list);
227
	}
228
	rdev->ib_pool.head_id = 0;
229 230
	rdev->ib_pool.ready = true;
	DRM_INFO("radeon: ib pool ready.\n");
231

232 233 234
	if (radeon_debugfs_ib_init(rdev)) {
		DRM_ERROR("Failed to register debugfs file for IB !\n");
	}
235 236 237
	if (radeon_debugfs_ring_init(rdev)) {
		DRM_ERROR("Failed to register debugfs file for rings !\n");
	}
238 239
	mutex_unlock(&rdev->ib_pool.mutex);
	return 0;
240 241 242 243
}

void radeon_ib_pool_fini(struct radeon_device *rdev)
{
244
	unsigned i;
245

246
	mutex_lock(&rdev->ib_pool.mutex);
247 248 249 250
	if (rdev->ib_pool.ready) {
		for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
			radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo);
			radeon_fence_unref(&rdev->ib_pool.ibs[i].fence);
251
		}
252 253
		radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
		rdev->ib_pool.ready = false;
254
	}
255
	mutex_unlock(&rdev->ib_pool.mutex);
256 257
}

258 259 260 261 262 263 264 265 266
int radeon_ib_pool_start(struct radeon_device *rdev)
{
	return radeon_sa_bo_manager_start(rdev, &rdev->ib_pool.sa_manager);
}

int radeon_ib_pool_suspend(struct radeon_device *rdev)
{
	return radeon_sa_bo_manager_suspend(rdev, &rdev->ib_pool.sa_manager);
}
267 268 269 270

/*
 * Ring.
 */
271
int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
272 273 274 275 276 277
{
	/* r1xx-r5xx only has CP ring */
	if (rdev->family < CHIP_R600)
		return RADEON_RING_TYPE_GFX_INDEX;

	if (rdev->family >= CHIP_CAYMAN) {
278
		if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX])
279
			return CAYMAN_RING_TYPE_CP1_INDEX;
280
		else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX])
281 282 283 284 285
			return CAYMAN_RING_TYPE_CP2_INDEX;
	}
	return RADEON_RING_TYPE_GFX_INDEX;
}

286
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
287
{
288 289
	u32 rptr;

290
	if (rdev->wb.enabled)
291
		rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
292
	else
293 294
		rptr = RREG32(ring->rptr_reg);
	ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
295
	/* This works because ring_size is a power of 2 */
296 297 298 299 300
	ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
	ring->ring_free_dw -= ring->wptr;
	ring->ring_free_dw &= ring->ptr_mask;
	if (!ring->ring_free_dw) {
		ring->ring_free_dw = ring->ring_size / 4;
301 302 303
	}
}

304

305
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
306 307 308 309 310
{
	int r;

	/* Align requested size with padding so unlock_commit can
	 * pad safely */
311 312 313 314
	ndw = (ndw + ring->align_mask) & ~ring->align_mask;
	while (ndw > (ring->ring_free_dw - 1)) {
		radeon_ring_free_size(rdev, ring);
		if (ndw < ring->ring_free_dw) {
315 316
			break;
		}
317
		r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring));
318
		if (r)
319 320
			return r;
	}
321 322
	ring->count_dw = ndw;
	ring->wptr_old = ring->wptr;
323 324 325
	return 0;
}

326
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
327 328 329
{
	int r;

330 331
	mutex_lock(&ring->mutex);
	r = radeon_ring_alloc(rdev, ring, ndw);
332
	if (r) {
333
		mutex_unlock(&ring->mutex);
334 335 336 337 338
		return r;
	}
	return 0;
}

339
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
340 341 342 343 344
{
	unsigned count_dw_pad;
	unsigned i;

	/* We pad to match fetch size */
345 346
	count_dw_pad = (ring->align_mask + 1) -
		       (ring->wptr & ring->align_mask);
347
	for (i = 0; i < count_dw_pad; i++) {
348
		radeon_ring_write(ring, ring->nop);
349 350
	}
	DRM_MEMORYBARRIER();
351
	WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
352
	(void)RREG32(ring->wptr_reg);
353 354
}

355
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
356
{
357 358
	radeon_ring_commit(rdev, ring);
	mutex_unlock(&ring->mutex);
359 360
}

361
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
362
{
363 364
	ring->wptr = ring->wptr_old;
	mutex_unlock(&ring->mutex);
365 366
}

367
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
368 369
		     unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
		     u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
370 371 372
{
	int r;

373 374 375 376
	ring->ring_size = ring_size;
	ring->rptr_offs = rptr_offs;
	ring->rptr_reg = rptr_reg;
	ring->wptr_reg = wptr_reg;
377 378 379
	ring->ptr_reg_shift = ptr_reg_shift;
	ring->ptr_reg_mask = ptr_reg_mask;
	ring->nop = nop;
380
	/* Allocate ring buffer */
381 382
	if (ring->ring_obj == NULL) {
		r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
383
					RADEON_GEM_DOMAIN_GTT,
384
					&ring->ring_obj);
385
		if (r) {
386
			dev_err(rdev->dev, "(%d) ring create failed\n", r);
387 388
			return r;
		}
389
		r = radeon_bo_reserve(ring->ring_obj, false);
390 391
		if (unlikely(r != 0))
			return r;
392 393
		r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
					&ring->gpu_addr);
394
		if (r) {
395
			radeon_bo_unreserve(ring->ring_obj);
396
			dev_err(rdev->dev, "(%d) ring pin failed\n", r);
397 398
			return r;
		}
399 400 401
		r = radeon_bo_kmap(ring->ring_obj,
				       (void **)&ring->ring);
		radeon_bo_unreserve(ring->ring_obj);
402
		if (r) {
403
			dev_err(rdev->dev, "(%d) ring map failed\n", r);
404 405 406
			return r;
		}
	}
407 408
	ring->ptr_mask = (ring->ring_size / 4) - 1;
	ring->ring_free_dw = ring->ring_size / 4;
409 410 411
	return 0;
}

412
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
413
{
414
	int r;
415
	struct radeon_bo *ring_obj;
416

417 418 419 420 421
	mutex_lock(&ring->mutex);
	ring_obj = ring->ring_obj;
	ring->ring = NULL;
	ring->ring_obj = NULL;
	mutex_unlock(&ring->mutex);
422 423 424

	if (ring_obj) {
		r = radeon_bo_reserve(ring_obj, false);
425
		if (likely(r == 0)) {
426 427 428
			radeon_bo_kunmap(ring_obj);
			radeon_bo_unpin(ring_obj);
			radeon_bo_unreserve(ring_obj);
429
		}
430
		radeon_bo_unref(&ring_obj);
431 432 433 434 435 436 437
	}
}

/*
 * Debugfs info
 */
#if defined(CONFIG_DEBUG_FS)
438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473

static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *) m->private;
	struct drm_device *dev = node->minor->dev;
	struct radeon_device *rdev = dev->dev_private;
	int ridx = *(int*)node->info_ent->data;
	struct radeon_ring *ring = &rdev->ring[ridx];
	unsigned count, i, j;

	radeon_ring_free_size(rdev, ring);
	count = (ring->ring_size / 4) - ring->ring_free_dw;
	seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
	seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
	seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
	seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
	seq_printf(m, "%u dwords in ring\n", count);
	i = ring->rptr;
	for (j = 0; j <= count; j++) {
		seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
		i = (i + 1) & ring->ptr_mask;
	}
	return 0;
}

static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;

static struct drm_info_list radeon_debugfs_ring_info_list[] = {
	{"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
	{"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
	{"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
};

474 475 476 477 478 479 480 481 482
static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *) m->private;
	struct radeon_ib *ib = node->info_ent->data;
	unsigned i;

	if (ib == NULL) {
		return 0;
	}
483
	seq_printf(m, "IB %04u\n", ib->idx);
484 485 486 487 488 489 490 491 492 493 494 495
	seq_printf(m, "IB fence %p\n", ib->fence);
	seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
	for (i = 0; i < ib->length_dw; i++) {
		seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
	}
	return 0;
}

static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
#endif

496 497 498 499 500 501 502 503 504 505
int radeon_debugfs_ring_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
	return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list,
					ARRAY_SIZE(radeon_debugfs_ring_info_list));
#else
	return 0;
#endif
}

506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523
int radeon_debugfs_ib_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
	unsigned i;

	for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
		sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
		radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
		radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
		radeon_debugfs_ib_list[i].driver_features = 0;
		radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
	}
	return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
					RADEON_IB_POOL_SIZE);
#else
	return 0;
#endif
}