radeon_ring.c 15.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright 2008 Advanced Micro Devices, Inc.
 * Copyright 2008 Red Hat Inc.
 * Copyright 2009 Jerome Glisse.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
 * OTHER DEALINGS IN THE SOFTWARE.
 *
 * Authors: Dave Airlie
 *          Alex Deucher
 *          Jerome Glisse
 */
#include <linux/seq_file.h>
29
#include <linux/slab.h>
30 31 32 33 34 35 36
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon_reg.h"
#include "radeon.h"
#include "atom.h"

int radeon_debugfs_ib_init(struct radeon_device *rdev);
37
int radeon_debugfs_ring_init(struct radeon_device *rdev);
38

39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63
u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
{
	struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
	u32 pg_idx, pg_offset;
	u32 idx_value = 0;
	int new_page;

	pg_idx = (idx * 4) / PAGE_SIZE;
	pg_offset = (idx * 4) % PAGE_SIZE;

	if (ibc->kpage_idx[0] == pg_idx)
		return ibc->kpage[0][pg_offset/4];
	if (ibc->kpage_idx[1] == pg_idx)
		return ibc->kpage[1][pg_offset/4];

	new_page = radeon_cs_update_pages(p, pg_idx);
	if (new_page < 0) {
		p->parser_error = new_page;
		return 0;
	}

	idx_value = ibc->kpage[new_page][pg_offset/4];
	return idx_value;
}

64
void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
65 66
{
#if DRM_DEBUG_CODE
67
	if (ring->count_dw <= 0) {
68 69 70
		DRM_ERROR("radeon: writting more dword to ring than expected !\n");
	}
#endif
71 72 73 74
	ring->ring[ring->wptr++] = v;
	ring->wptr &= ring->ptr_mask;
	ring->count_dw--;
	ring->ring_free_dw--;
75 76
}

77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
void radeon_ib_bogus_cleanup(struct radeon_device *rdev)
{
	struct radeon_ib *ib, *n;

	list_for_each_entry_safe(ib, n, &rdev->ib_pool.bogus_ib, list) {
		list_del(&ib->list);
		vfree(ib->ptr);
		kfree(ib);
	}
}

void radeon_ib_bogus_add(struct radeon_device *rdev, struct radeon_ib *ib)
{
	struct radeon_ib *bib;

	bib = kmalloc(sizeof(*bib), GFP_KERNEL);
	if (bib == NULL)
		return;
	bib->ptr = vmalloc(ib->length_dw * 4);
	if (bib->ptr == NULL) {
		kfree(bib);
		return;
	}
	memcpy(bib->ptr, ib->ptr, ib->length_dw * 4);
	bib->length_dw = ib->length_dw;
	mutex_lock(&rdev->ib_pool.mutex);
	list_add_tail(&bib->list, &rdev->ib_pool.bogus_ib);
	mutex_unlock(&rdev->ib_pool.mutex);
}

107 108 109
/*
 * IB.
 */
110
int radeon_ib_get(struct radeon_device *rdev, int ring, struct radeon_ib **ib)
111 112 113
{
	struct radeon_fence *fence;
	struct radeon_ib *nib;
114
	int r = 0, i, c;
115 116

	*ib = NULL;
117
	r = radeon_fence_create(rdev, &fence, ring);
118
	if (r) {
119
		dev_err(rdev->dev, "failed to create fence for new IB\n");
120 121 122
		return r;
	}
	mutex_lock(&rdev->ib_pool.mutex);
123 124 125 126 127 128
	for (i = rdev->ib_pool.head_id, c = 0, nib = NULL; c < RADEON_IB_POOL_SIZE; c++, i++) {
		i &= (RADEON_IB_POOL_SIZE - 1);
		if (rdev->ib_pool.ibs[i].free) {
			nib = &rdev->ib_pool.ibs[i];
			break;
		}
129
	}
130 131 132 133 134 135 136
	if (nib == NULL) {
		/* This should never happen, it means we allocated all
		 * IB and haven't scheduled one yet, return EBUSY to
		 * userspace hoping that on ioctl recall we get better
		 * luck
		 */
		dev_err(rdev->dev, "no free indirect buffer !\n");
137
		mutex_unlock(&rdev->ib_pool.mutex);
138 139
		radeon_fence_unref(&fence);
		return -EBUSY;
140
	}
141 142 143
	rdev->ib_pool.head_id = (nib->idx + 1) & (RADEON_IB_POOL_SIZE - 1);
	nib->free = false;
	if (nib->fence) {
144
		mutex_unlock(&rdev->ib_pool.mutex);
145 146 147 148 149 150 151 152 153 154 155
		r = radeon_fence_wait(nib->fence, false);
		if (r) {
			dev_err(rdev->dev, "error waiting fence of IB(%u:0x%016lX:%u)\n",
				nib->idx, (unsigned long)nib->gpu_addr, nib->length_dw);
			mutex_lock(&rdev->ib_pool.mutex);
			nib->free = true;
			mutex_unlock(&rdev->ib_pool.mutex);
			radeon_fence_unref(&fence);
			return r;
		}
		mutex_lock(&rdev->ib_pool.mutex);
156 157
	}
	radeon_fence_unref(&nib->fence);
158
	nib->fence = fence;
159
	nib->length_dw = 0;
160
	mutex_unlock(&rdev->ib_pool.mutex);
161
	*ib = nib;
162
	return 0;
163 164 165 166 167 168 169 170 171 172
}

void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
{
	struct radeon_ib *tmp = *ib;

	*ib = NULL;
	if (tmp == NULL) {
		return;
	}
173
	if (!tmp->fence->emitted)
174
		radeon_fence_unref(&tmp->fence);
175
	mutex_lock(&rdev->ib_pool.mutex);
176
	tmp->free = true;
177 178 179 180 181
	mutex_unlock(&rdev->ib_pool.mutex);
}

int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
{
182
	struct radeon_ring *ring = &rdev->ring[ib->fence->ring];
183 184
	int r = 0;

185
	if (!ib->length_dw || !ring->ready) {
186
		/* TODO: Nothings in the ib we should report. */
187
		DRM_ERROR("radeon: couldn't schedule IB(%u).\n", ib->idx);
188 189
		return -EINVAL;
	}
190

191
	/* 64 dwords should be enough for fence too */
192
	r = radeon_ring_lock(rdev, ring, 64);
193
	if (r) {
P
Paul Bolle 已提交
194
		DRM_ERROR("radeon: scheduling IB failed (%d).\n", r);
195 196
		return r;
	}
197
	radeon_ring_ib_execute(rdev, ib->fence->ring, ib);
198
	radeon_fence_emit(rdev, ib->fence);
199
	mutex_lock(&rdev->ib_pool.mutex);
200 201
	/* once scheduled IB is considered free and protected by the fence */
	ib->free = true;
202
	mutex_unlock(&rdev->ib_pool.mutex);
203
	radeon_ring_unlock_commit(rdev, ring);
204 205 206 207 208 209 210 211 212 213
	return 0;
}

int radeon_ib_pool_init(struct radeon_device *rdev)
{
	void *ptr;
	uint64_t gpu_addr;
	int i;
	int r = 0;

214 215
	if (rdev->ib_pool.robj)
		return 0;
216
	INIT_LIST_HEAD(&rdev->ib_pool.bogus_ib);
217
	/* Allocate 1M object buffer */
218
	r = radeon_bo_create(rdev, RADEON_IB_POOL_SIZE*64*1024,
219 220
			     PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT,
			     &rdev->ib_pool.robj);
221 222 223 224
	if (r) {
		DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
		return r;
	}
225 226 227 228
	r = radeon_bo_reserve(rdev->ib_pool.robj, false);
	if (unlikely(r != 0))
		return r;
	r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
229
	if (r) {
230
		radeon_bo_unreserve(rdev->ib_pool.robj);
231 232 233
		DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
		return r;
	}
234 235
	r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
	radeon_bo_unreserve(rdev->ib_pool.robj);
236
	if (r) {
237
		DRM_ERROR("radeon: failed to map ib pool (%d).\n", r);
238 239 240 241 242 243 244 245 246 247
		return r;
	}
	for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
		unsigned offset;

		offset = i * 64 * 1024;
		rdev->ib_pool.ibs[i].gpu_addr = gpu_addr + offset;
		rdev->ib_pool.ibs[i].ptr = ptr + offset;
		rdev->ib_pool.ibs[i].idx = i;
		rdev->ib_pool.ibs[i].length_dw = 0;
248
		rdev->ib_pool.ibs[i].free = true;
249
	}
250
	rdev->ib_pool.head_id = 0;
251 252 253 254 255
	rdev->ib_pool.ready = true;
	DRM_INFO("radeon: ib pool ready.\n");
	if (radeon_debugfs_ib_init(rdev)) {
		DRM_ERROR("Failed to register debugfs file for IB !\n");
	}
256 257 258
	if (radeon_debugfs_ring_init(rdev)) {
		DRM_ERROR("Failed to register debugfs file for rings !\n");
	}
259 260 261 262 263
	return r;
}

void radeon_ib_pool_fini(struct radeon_device *rdev)
{
264
	int r;
265
	struct radeon_bo *robj;
266

267 268 269 270
	if (!rdev->ib_pool.ready) {
		return;
	}
	mutex_lock(&rdev->ib_pool.mutex);
271
	radeon_ib_bogus_cleanup(rdev);
272 273 274
	robj = rdev->ib_pool.robj;
	rdev->ib_pool.robj = NULL;
	mutex_unlock(&rdev->ib_pool.mutex);
275

276 277
	if (robj) {
		r = radeon_bo_reserve(robj, false);
278
		if (likely(r == 0)) {
279 280 281
			radeon_bo_kunmap(robj);
			radeon_bo_unpin(robj);
			radeon_bo_unreserve(robj);
282
		}
283
		radeon_bo_unref(&robj);
284 285 286 287 288 289 290
	}
}


/*
 * Ring.
 */
291
int radeon_ring_index(struct radeon_device *rdev, struct radeon_ring *ring)
292 293 294 295 296 297
{
	/* r1xx-r5xx only has CP ring */
	if (rdev->family < CHIP_R600)
		return RADEON_RING_TYPE_GFX_INDEX;

	if (rdev->family >= CHIP_CAYMAN) {
298
		if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX])
299
			return CAYMAN_RING_TYPE_CP1_INDEX;
300
		else if (ring == &rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX])
301 302 303 304 305
			return CAYMAN_RING_TYPE_CP2_INDEX;
	}
	return RADEON_RING_TYPE_GFX_INDEX;
}

306
void radeon_ring_free_size(struct radeon_device *rdev, struct radeon_ring *ring)
307
{
308 309
	u32 rptr;

310
	if (rdev->wb.enabled)
311
		rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]);
312
	else
313 314
		rptr = RREG32(ring->rptr_reg);
	ring->rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift;
315
	/* This works because ring_size is a power of 2 */
316 317 318 319 320
	ring->ring_free_dw = (ring->rptr + (ring->ring_size / 4));
	ring->ring_free_dw -= ring->wptr;
	ring->ring_free_dw &= ring->ptr_mask;
	if (!ring->ring_free_dw) {
		ring->ring_free_dw = ring->ring_size / 4;
321 322 323
	}
}

324

325
int radeon_ring_alloc(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
326 327 328 329 330
{
	int r;

	/* Align requested size with padding so unlock_commit can
	 * pad safely */
331 332 333 334
	ndw = (ndw + ring->align_mask) & ~ring->align_mask;
	while (ndw > (ring->ring_free_dw - 1)) {
		radeon_ring_free_size(rdev, ring);
		if (ndw < ring->ring_free_dw) {
335 336
			break;
		}
337
		r = radeon_fence_wait_next(rdev, radeon_ring_index(rdev, ring));
338
		if (r)
339 340
			return r;
	}
341 342
	ring->count_dw = ndw;
	ring->wptr_old = ring->wptr;
343 344 345
	return 0;
}

346
int radeon_ring_lock(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ndw)
347 348 349
{
	int r;

350 351
	mutex_lock(&ring->mutex);
	r = radeon_ring_alloc(rdev, ring, ndw);
352
	if (r) {
353
		mutex_unlock(&ring->mutex);
354 355 356 357 358
		return r;
	}
	return 0;
}

359
void radeon_ring_commit(struct radeon_device *rdev, struct radeon_ring *ring)
360 361 362 363 364
{
	unsigned count_dw_pad;
	unsigned i;

	/* We pad to match fetch size */
365 366
	count_dw_pad = (ring->align_mask + 1) -
		       (ring->wptr & ring->align_mask);
367
	for (i = 0; i < count_dw_pad; i++) {
368
		radeon_ring_write(ring, ring->nop);
369 370
	}
	DRM_MEMORYBARRIER();
371
	WREG32(ring->wptr_reg, (ring->wptr << ring->ptr_reg_shift) & ring->ptr_reg_mask);
372
	(void)RREG32(ring->wptr_reg);
373 374
}

375
void radeon_ring_unlock_commit(struct radeon_device *rdev, struct radeon_ring *ring)
376
{
377 378
	radeon_ring_commit(rdev, ring);
	mutex_unlock(&ring->mutex);
379 380
}

381
void radeon_ring_unlock_undo(struct radeon_device *rdev, struct radeon_ring *ring)
382
{
383 384
	ring->wptr = ring->wptr_old;
	mutex_unlock(&ring->mutex);
385 386
}

387
int radeon_ring_init(struct radeon_device *rdev, struct radeon_ring *ring, unsigned ring_size,
388 389
		     unsigned rptr_offs, unsigned rptr_reg, unsigned wptr_reg,
		     u32 ptr_reg_shift, u32 ptr_reg_mask, u32 nop)
390 391 392
{
	int r;

393 394 395 396
	ring->ring_size = ring_size;
	ring->rptr_offs = rptr_offs;
	ring->rptr_reg = rptr_reg;
	ring->wptr_reg = wptr_reg;
397 398 399
	ring->ptr_reg_shift = ptr_reg_shift;
	ring->ptr_reg_mask = ptr_reg_mask;
	ring->nop = nop;
400
	/* Allocate ring buffer */
401 402
	if (ring->ring_obj == NULL) {
		r = radeon_bo_create(rdev, ring->ring_size, PAGE_SIZE, true,
403
					RADEON_GEM_DOMAIN_GTT,
404
					&ring->ring_obj);
405
		if (r) {
406
			dev_err(rdev->dev, "(%d) ring create failed\n", r);
407 408
			return r;
		}
409
		r = radeon_bo_reserve(ring->ring_obj, false);
410 411
		if (unlikely(r != 0))
			return r;
412 413
		r = radeon_bo_pin(ring->ring_obj, RADEON_GEM_DOMAIN_GTT,
					&ring->gpu_addr);
414
		if (r) {
415
			radeon_bo_unreserve(ring->ring_obj);
416
			dev_err(rdev->dev, "(%d) ring pin failed\n", r);
417 418
			return r;
		}
419 420 421
		r = radeon_bo_kmap(ring->ring_obj,
				       (void **)&ring->ring);
		radeon_bo_unreserve(ring->ring_obj);
422
		if (r) {
423
			dev_err(rdev->dev, "(%d) ring map failed\n", r);
424 425 426
			return r;
		}
	}
427 428
	ring->ptr_mask = (ring->ring_size / 4) - 1;
	ring->ring_free_dw = ring->ring_size / 4;
429 430 431
	return 0;
}

432
void radeon_ring_fini(struct radeon_device *rdev, struct radeon_ring *ring)
433
{
434
	int r;
435
	struct radeon_bo *ring_obj;
436

437 438 439 440 441
	mutex_lock(&ring->mutex);
	ring_obj = ring->ring_obj;
	ring->ring = NULL;
	ring->ring_obj = NULL;
	mutex_unlock(&ring->mutex);
442 443 444

	if (ring_obj) {
		r = radeon_bo_reserve(ring_obj, false);
445
		if (likely(r == 0)) {
446 447 448
			radeon_bo_kunmap(ring_obj);
			radeon_bo_unpin(ring_obj);
			radeon_bo_unreserve(ring_obj);
449
		}
450
		radeon_bo_unref(&ring_obj);
451 452 453 454 455 456 457
	}
}

/*
 * Debugfs info
 */
#if defined(CONFIG_DEBUG_FS)
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493

static int radeon_debugfs_ring_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *) m->private;
	struct drm_device *dev = node->minor->dev;
	struct radeon_device *rdev = dev->dev_private;
	int ridx = *(int*)node->info_ent->data;
	struct radeon_ring *ring = &rdev->ring[ridx];
	unsigned count, i, j;

	radeon_ring_free_size(rdev, ring);
	count = (ring->ring_size / 4) - ring->ring_free_dw;
	seq_printf(m, "wptr(0x%04x): 0x%08x\n", ring->wptr_reg, RREG32(ring->wptr_reg));
	seq_printf(m, "rptr(0x%04x): 0x%08x\n", ring->rptr_reg, RREG32(ring->rptr_reg));
	seq_printf(m, "driver's copy of the wptr: 0x%08x\n", ring->wptr);
	seq_printf(m, "driver's copy of the rptr: 0x%08x\n", ring->rptr);
	seq_printf(m, "%u free dwords in ring\n", ring->ring_free_dw);
	seq_printf(m, "%u dwords in ring\n", count);
	i = ring->rptr;
	for (j = 0; j <= count; j++) {
		seq_printf(m, "r[%04d]=0x%08x\n", i, ring->ring[i]);
		i = (i + 1) & ring->ptr_mask;
	}
	return 0;
}

static int radeon_ring_type_gfx_index = RADEON_RING_TYPE_GFX_INDEX;
static int cayman_ring_type_cp1_index = CAYMAN_RING_TYPE_CP1_INDEX;
static int cayman_ring_type_cp2_index = CAYMAN_RING_TYPE_CP2_INDEX;

static struct drm_info_list radeon_debugfs_ring_info_list[] = {
	{"radeon_ring_gfx", radeon_debugfs_ring_info, 0, &radeon_ring_type_gfx_index},
	{"radeon_ring_cp1", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp1_index},
	{"radeon_ring_cp2", radeon_debugfs_ring_info, 0, &cayman_ring_type_cp2_index},
};

494 495 496 497 498 499 500 501 502
static int radeon_debugfs_ib_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *) m->private;
	struct radeon_ib *ib = node->info_ent->data;
	unsigned i;

	if (ib == NULL) {
		return 0;
	}
503
	seq_printf(m, "IB %04u\n", ib->idx);
504 505 506 507 508 509 510 511
	seq_printf(m, "IB fence %p\n", ib->fence);
	seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
	for (i = 0; i < ib->length_dw; i++) {
		seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
	}
	return 0;
}

512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536
static int radeon_debugfs_ib_bogus_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *) m->private;
	struct radeon_device *rdev = node->info_ent->data;
	struct radeon_ib *ib;
	unsigned i;

	mutex_lock(&rdev->ib_pool.mutex);
	if (list_empty(&rdev->ib_pool.bogus_ib)) {
		mutex_unlock(&rdev->ib_pool.mutex);
		seq_printf(m, "no bogus IB recorded\n");
		return 0;
	}
	ib = list_first_entry(&rdev->ib_pool.bogus_ib, struct radeon_ib, list);
	list_del_init(&ib->list);
	mutex_unlock(&rdev->ib_pool.mutex);
	seq_printf(m, "IB size %05u dwords\n", ib->length_dw);
	for (i = 0; i < ib->length_dw; i++) {
		seq_printf(m, "[%05u]=0x%08X\n", i, ib->ptr[i]);
	}
	vfree(ib->ptr);
	kfree(ib);
	return 0;
}

537 538
static struct drm_info_list radeon_debugfs_ib_list[RADEON_IB_POOL_SIZE];
static char radeon_debugfs_ib_names[RADEON_IB_POOL_SIZE][32];
539 540 541 542

static struct drm_info_list radeon_debugfs_ib_bogus_info_list[] = {
	{"radeon_ib_bogus", radeon_debugfs_ib_bogus_info, 0, NULL},
};
543 544
#endif

545 546 547 548 549 550 551 552 553 554
int radeon_debugfs_ring_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
	return radeon_debugfs_add_files(rdev, radeon_debugfs_ring_info_list,
					ARRAY_SIZE(radeon_debugfs_ring_info_list));
#else
	return 0;
#endif
}

555 556 557 558
int radeon_debugfs_ib_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
	unsigned i;
559
	int r;
560

561 562 563 564
	radeon_debugfs_ib_bogus_info_list[0].data = rdev;
	r = radeon_debugfs_add_files(rdev, radeon_debugfs_ib_bogus_info_list, 1);
	if (r)
		return r;
565 566 567 568 569 570 571 572 573 574 575 576 577
	for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
		sprintf(radeon_debugfs_ib_names[i], "radeon_ib_%04u", i);
		radeon_debugfs_ib_list[i].name = radeon_debugfs_ib_names[i];
		radeon_debugfs_ib_list[i].show = &radeon_debugfs_ib_info;
		radeon_debugfs_ib_list[i].driver_features = 0;
		radeon_debugfs_ib_list[i].data = &rdev->ib_pool.ibs[i];
	}
	return radeon_debugfs_add_files(rdev, radeon_debugfs_ib_list,
					RADEON_IB_POOL_SIZE);
#else
	return 0;
#endif
}