radeon_ring.c 13.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright 2008 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 * Copyright 2009 Jerome Glisse.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie
 *          Alex Deucher
 *          Jerome Glisse
 */
#include <linux/seq_file.h>
29
#include <linux/slab.h>
30 31 32 33 34 35 36 37
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"

int radeon_debugfs_ib_init(struct radeon_device *rdev);

38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
{
	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
	u32 pg_idx, pg_offset;
	u32 idx_value = 0;
	int new_page;

	pg_idx = (idx * 4) / PAGE_SIZE;
	pg_offset = (idx * 4) % PAGE_SIZE;

	if (ibc->kpage_idx[0] == pg_idx)
		return ibc->kpage[0][pg_offset/4];
	if (ibc->kpage_idx[1] == pg_idx)
		return ibc->kpage[1][pg_offset/4];

	new_page = radeon_cs_update_pages(p, pg_idx);
	if (new_page < 0) {
		p->parser_error = new_page;
		return 0;
	}

	idx_value = ibc->kpage[new_page][pg_offset/4];
	return idx_value;
}

63
void radeon_ring_write(struct radeon_cp *cp, uint32_t v)
64 65
{
#if DRM_DEBUG_CODE
66
	if (cp->count_dw <= 0) {
67 68 69
		DRM_ERROR("radeon: writting more dword to ring than expected !\n");
	}
#endif
70 71 72 73
	cp->ring[cp->wptr++] = v;
	cp->wptr &= cp->ptr_mask;
	cp->count_dw--;
	cp->ring_free_dw--;
74 75
}

76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
{
	struct radeon_ib *ib, *n;

	list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) {
		list_del(&ib->list);
		vfree(ib->ptr);
		kfree(ib);
	}
}

void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
{
	struct radeon_ib *bib;

	bib = kmalloc(sizeof(*bib), GFP_KERNEL);
	if (bib == NULL)
		return;
	bib->ptr = vmalloc(ib->length_dw * 4);
	if (bib->ptr == NULL) {
		kfree(bib);
		return;
	}
	memcpy(bib->ptr, ib->ptr, ib->length_dw * 4);
	bib->length_dw = ib->length_dw;
	mutex_lock(&rdev->ib_pool.mutex);
	list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib);
	mutex_unlock(&rdev->ib_pool.mutex);
}

106 107 108
/*
 * IB.
 */
109
int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib **ib)
110 111 112
{
	struct radeon_fence *fence;
	struct radeon_ib *nib;
113
	int r = 0, i, c;
114 115

	*ib = NULL;
116
	r = radeon_fence_create(rdev, &fence, ring);
117
	if (r) {
118
		dev_err(rdev->dev, "failed to create fence for new IB\n");
119 120 121
		return r;
	}
	mutex_lock(&rdev->ib_pool.mutex);
122 123 124 125 126 127
	for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
		i &= (RADEON_IB_POOL_SIZE - 1);
		if (rdev->ib_pool.ibs[i].free) {
			nib = &rdev->ib_pool.ibs[i];
			break;
		}
128
	}
129 130 131 132 133 134 135
	if (nib == NULL) {
		/* This should never happen, it means we allocated all
		 * IB and haven't scheduled one yet, return EBUSY to
		 * userspace hoping that on ioctl recall we get better
		 * luck
		 */
		dev_err(rdev->dev, "no free indirect buffer !\n");
136
		mutex_unlock(&rdev->ib_pool.mutex);
137 138
		radeon_fence_unref(&fence);
		return -EBUSY;
139
	}
140 141 142
	rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
	nib->free = false;
	if (nib->fence) {
143
		mutex_unlock(&rdev->ib_pool.mutex);
144 145 146 147 148 149 150 151 152 153 154
		r = radeon_fence_wait(nib->fence, false);
		if (r) {
			dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
				nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
			mutex_lock(&rdev->ib_pool.mutex);
			nib->free = true;
			mutex_unlock(&rdev->ib_pool.mutex);
			radeon_fence_unref(&fence);
			return r;
		}
		mutex_lock(&rdev->ib_pool.mutex);
155 156
	}
	radeon_fence_unref(&nib->fence);
157
	nib->fence = fence;
158
	nib->length_dw = 0;
159
	mutex_unlock(&rdev->ib_pool.mutex);
160
	*ib = nib;
161
	return 0;
162 163 164 165 166 167 168 169 170 171
}

void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
{
	struct radeon_ib *tmp = *ib;

	*ib = NULL;
	if (tmp == NULL) {
		return;
	}
172
	if (!tmp->fence->emitted)
173
		radeon_fence_unref(&tmp->fence);
174
	mutex_lock(&rdev->ib_pool.mutex);
175
	tmp->free = true;
176 177 178 179 180
	mutex_unlock(&rdev->ib_pool.mutex);
}

int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
{
181
	struct radeon_cp *cp = &rdev->cp[ib->fence->ring];
182 183
	int r = 0;

184
	if (!ib->length_dw || !cp->ready) {
185
		/* TODO: Nothings in the ib we should report. */
186
		DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
187 188
		return -EINVAL;
	}
189

190
	/* 64 dwords should be enough for fence too */
191
	r = radeon_ring_lock(rdev, cp, 64);
192
	if (r) {
P
Paul Bolle 已提交
193
		DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
194 195
		return r;
	}
196
	radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
197
	radeon_fence_emit(rdev, ib->fence);
198
	mutex_lock(&rdev->ib_pool.mutex);
199 200
	/* once scheduled IB is considered free and protected by the fence */
	ib->free = true;
201
	mutex_unlock(&rdev->ib_pool.mutex);
202
	radeon_ring_unlock_commit(rdev, cp);
203 204 205 206 207 208 209 210 211 212
	return 0;
}

int radeon_ib_pool_init(struct radeon_device *rdev)
{
	void *ptr;
	uint64_t gpu_addr;
	int i;
	int r = 0;

213 214
	if (rdev->ib_pool.robj)
		return 0;
215
	INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
216
	/* Allocate 1M object buffer */
217
	r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
218 219
			     PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
			     &rdev->ib_pool.robj);
220 221 222 223
	if (r) {
		DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
		return r;
	}
224 225 226 227
	r = radeon_bo_reserve(rdev->ib_pool.robj, false);
	if (unlikely(r != 0))
		return r;
	r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
228
	if (r) {
229
		radeon_bo_unreserve(rdev->ib_pool.robj);
230 231 232
		DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
		return r;
	}
233 234
	r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
	radeon_bo_unreserve(rdev->ib_pool.robj);
235
	if (r) {
236
		DRM_ERROR("radeon: failed to map ib pool (%d).\n", r);
237 238 239 240 241 242 243 244 245 246
		return r;
	}
	for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
		unsigned offset;

		offset = i * 64 * 1024;
		rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
		rdev->ib_pool.ibs[i].ptr = ptr + offset;
		rdev->ib_pool.ibs[i].idx = i;
		rdev->ib_pool.ibs[i].length_dw = 0;
247
		rdev->ib_pool.ibs[i].free = true;
248
	}
249
	rdev->ib_pool.head_id = 0;
250 251 252 253 254 255 256 257 258 259
	rdev->ib_pool.ready = true;
	DRM_INFO("radeon: ib pool ready.\n");
	if (radeon_debugfs_ib_init(rdev)) {
		DRM_ERROR("Failed to register debugfs file for IB !\n");
	}
	return r;
}

void radeon_ib_pool_fini(struct radeon_device *rdev)
{
260
	int r;
261
	struct radeon_bo *robj;
262

263 264 265 266
	if (!rdev->ib_pool.ready) {
		return;
	}
	mutex_lock(&rdev->ib_pool.mutex);
267
	radeon_ib_bogus_cleanup(rdev);
268 269 270
	robj = rdev->ib_pool.robj;
	rdev->ib_pool.robj = NULL;
	mutex_unlock(&rdev->ib_pool.mutex);
271

272 273
	if (robj) {
		r = radeon_bo_reserve(robj, false);
274
		if (likely(r == 0)) {
275 276 277
			radeon_bo_kunmap(robj);
			radeon_bo_unpin(robj);
			radeon_bo_unreserve(robj);
278
		}
279
		radeon_bo_unref(&robj);
280 281 282 283 284 285 286
	}
}


/*
 * Ring.
 */
287 288 289 290 291 292 293 294 295 296 297 298 299 300 301
int radeon_ring_index(struct radeon_device *rdev, struct radeon_cp *cp)
{
	/* r1xx-r5xx only has CP ring */
	if (rdev->family < CHIP_R600)
		return RADEON_RING_TYPE_GFX_INDEX;

	if (rdev->family >= CHIP_CAYMAN) {
		if (cp == &rdev->cp[CAYMAN_RING_TYPE_CP1_INDEX])
			return CAYMAN_RING_TYPE_CP1_INDEX;
		else if (cp == &rdev->cp[CAYMAN_RING_TYPE_CP2_INDEX])
			return CAYMAN_RING_TYPE_CP2_INDEX;
	}
	return RADEON_RING_TYPE_GFX_INDEX;
}

302
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_cp *cp)
303
{
304
	if (rdev->wb.enabled)
305 306 307
		cp->rptr = le32_to_cpu(rdev->wb.wb[cp->rptr_offs/4]);
	else
		cp->rptr = RREG32(cp->rptr_reg);
308
	/* This works because ring_size is a power of 2 */
309 310 311 312 313
	cp->ring_free_dw = (cp->rptr + (cp->ring_size / 4));
	cp->ring_free_dw -= cp->wptr;
	cp->ring_free_dw &= cp->ptr_mask;
	if (!cp->ring_free_dw) {
		cp->ring_free_dw = cp->ring_size / 4;
314 315 316
	}
}

317 318

int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw)
319 320 321 322 323
{
	int r;

	/* Align requested size with padding so unlock_commit can
	 * pad safely */
324 325 326 327
	ndw = (ndw + cp->align_mask) & ~cp->align_mask;
	while (ndw > (cp->ring_free_dw - 1)) {
		radeon_ring_free_size(rdev, cp);
		if (ndw < cp->ring_free_dw) {
328 329
			break;
		}
330
		r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, cp));
331
		if (r)
332 333
			return r;
	}
334 335
	cp->count_dw = ndw;
	cp->wptr_old = cp->wptr;
336 337 338
	return 0;
}

339
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ndw)
340 341 342
{
	int r;

343 344
	mutex_lock(&cp->mutex);
	r = radeon_ring_alloc(rdev, cp, ndw);
345
	if (r) {
346
		mutex_unlock(&cp->mutex);
347 348 349 350 351
		return r;
	}
	return 0;
}

352
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_cp *cp)
353 354 355 356 357
{
	unsigned count_dw_pad;
	unsigned i;

	/* We pad to match fetch size */
358 359
	count_dw_pad = (cp->align_mask + 1) -
		       (cp->wptr & cp->align_mask);
360
	for (i = 0; i < count_dw_pad; i++) {
361
		radeon_ring_write(cp, 2 << 30);
362 363
	}
	DRM_MEMORYBARRIER();
364 365
	WREG32(cp->wptr_reg, cp->wptr);
	(void)RREG32(cp->wptr_reg);
366 367
}

368
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_cp *cp)
369
{
370 371
	radeon_ring_commit(rdev, cp);
	mutex_unlock(&cp->mutex);
372 373
}

374
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_cp *cp)
375
{
376 377
	cp->wptr = cp->wptr_old;
	mutex_unlock(&cp->mutex);
378 379
}

380 381
int radeon_ring_init(struct radeon_device *rdev, struct radeon_cp *cp, unsigned ring_size,
		     unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg)
382 383 384
{
	int r;

385
	cp->ring_size = ring_size;
386 387 388
	cp->rptr_offs = rptr_offs;
	cp->rptr_reg = rptr_reg;
	cp->wptr_reg = wptr_reg;
389
	/* Allocate ring buffer */
390 391
	if (cp->ring_obj == NULL) {
		r = radeon_bo_create(rdev, cp->ring_size, PAGE_SIZE, true,
392
					RADEON_GEM_DOMAIN_GTT,
393
					&cp->ring_obj);
394
		if (r) {
395
			dev_err(rdev->dev, "(%d) ring create failed\n", r);
396 397
			return r;
		}
398
		r = radeon_bo_reserve(cp->ring_obj, false);
399 400
		if (unlikely(r != 0))
			return r;
401 402
		r = radeon_bo_pin(cp->ring_obj, RADEON_GEM_DOMAIN_GTT,
					&cp->gpu_addr);
403
		if (r) {
404
			radeon_bo_unreserve(cp->ring_obj);
405
			dev_err(rdev->dev, "(%d) ring pin failed\n", r);
406 407
			return r;
		}
408 409 410
		r = radeon_bo_kmap(cp->ring_obj,
				       (void **)&cp->ring);
		radeon_bo_unreserve(cp->ring_obj);
411
		if (r) {
412
			dev_err(rdev->dev, "(%d) ring map failed\n", r);
413 414 415
			return r;
		}
	}
416 417
	cp->ptr_mask = (cp->ring_size / 4) - 1;
	cp->ring_free_dw = cp->ring_size / 4;
418 419 420
	return 0;
}

421
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_cp *cp)
422
{
423
	int r;
424
	struct radeon_bo *ring_obj;
425

426 427 428 429 430
	mutex_lock(&cp->mutex);
	ring_obj = cp->ring_obj;
	cp->ring = NULL;
	cp->ring_obj = NULL;
	mutex_unlock(&cp->mutex);
431 432 433

	if (ring_obj) {
		r = radeon_bo_reserve(ring_obj, false);
434
		if (likely(r == 0)) {
435 436 437
			radeon_bo_kunmap(ring_obj);
			radeon_bo_unpin(ring_obj);
			radeon_bo_unreserve(ring_obj);
438
		}
439
		radeon_bo_unref(&ring_obj);
440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
	}
}

/*
 * Debugfs info
 */
#if defined(CONFIG_DEBUG_FS)
static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *) m->private;
	struct radeon_ib *ib = node->info_ent->data;
	unsigned i;

	if (ib == NULL) {
		return 0;
	}
456
	seq_printf(m, "IB %04u\n", ib->idx);
457 458 459 460 461 462 463 464
	seq_printf(m, "IB fence %p\n", ib->fence);
	seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
	for (i = 0; i < ib->length_dw; i++) {
		seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
	}
	return 0;
}

465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489
static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *) m->private;
	struct radeon_device *rdev = node->info_ent->data;
	struct radeon_ib *ib;
	unsigned i;

	mutex_lock(&rdev->ib_pool.mutex);
	if (list_empty(&rdev->ib_pool.bogus_ib)) {
		mutex_unlock(&rdev->ib_pool.mutex);
		seq_printf(m, "no bogus IB recorded\n");
		return 0;
	}
	ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
	list_del_init(&ib->list);
	mutex_unlock(&rdev->ib_pool.mutex);
	seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
	for (i = 0; i < ib->length_dw; i++) {
		seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
	}
	vfree(ib->ptr);
	kfree(ib);
	return 0;
}

490 491
static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
492 493 494 495

static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
	{"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
};
496 497 498 499 500 501
#endif

int radeon_debugfs_ib_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
	unsigned i;
502
	int r;
503

504 505 506 507
	radeon_debugfs_ib_bogus_info_list[0].data = rdev;
	r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
	if (r)
		return r;
508 509 510 511 512 513 514 515 516 517 518 519 520
	for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
		sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
		radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
		radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
		radeon_debugfs_ib_list[i].driver_features = 0;
		radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
	}
	return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
					RADEON_IB_POOL_SIZE);
#else
	return 0;
#endif
}