radeon_fence.c 13.8 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 * Copyright 2009 Jerome Glisse.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 */
/*
 * Authors:
 *    Jerome Glisse <glisse@freedesktop.org>
 *    Dave Airlie
 */
#include <linux/seq_file.h>
A
Arun Sharma 已提交
32
#include <linux/atomic.h>
33 34 35
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
36
#include <linux/slab.h>
37 38 39 40
#include "drmP.h"
#include "drm.h"
#include "radeon_reg.h"
#include "radeon.h"
41
#include "radeon_trace.h"
42

43
static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
44 45
{
	if (rdev->wb.enabled) {
46 47
		*rdev->fence_drv[ring].cpu_addr = cpu_to_le32(seq);
	} else {
48
		WREG32(rdev->fence_drv[ring].scratch_reg, seq);
49
	}
50 51
}

52
static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
53
{
54
	u32 seq = 0;
55 56

	if (rdev->wb.enabled) {
57 58
		seq = le32_to_cpu(*rdev->fence_drv[ring].cpu_addr);
	} else {
59
		seq = RREG32(rdev->fence_drv[ring].scratch_reg);
60
	}
61 62 63
	return seq;
}

64 65 66 67
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
{
	unsigned long irq_flags;

68
	write_lock_irqsave(&rdev->fence_lock, irq_flags);
69
	if (fence->emitted) {
70
		write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
71 72
		return 0;
	}
73
	fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
74
	if (!rdev->ring[fence->ring].ready)
75 76 77
		/* FIXME: cp is not running assume everythings is done right
		 * away
		 */
78
		radeon_fence_write(rdev, fence->seq, fence->ring);
79
	else
80
		radeon_fence_ring_emit(rdev, fence->ring, fence);
81

82
	trace_radeon_fence_emit(rdev->ddev, fence->seq);
83
	fence->emitted = true;
84 85
	list_move_tail(&fence->list, &rdev->fence_drv[fence->ring].emitted);
	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
86 87 88
	return 0;
}

89
static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
90 91 92 93 94
{
	struct radeon_fence *fence;
	struct list_head *i, *n;
	uint32_t seq;
	bool wake = false;
95
	unsigned long cjiffies;
96

97 98 99 100 101
	seq = radeon_fence_read(rdev, ring);
	if (seq != rdev->fence_drv[ring].last_seq) {
		rdev->fence_drv[ring].last_seq = seq;
		rdev->fence_drv[ring].last_jiffies = jiffies;
		rdev->fence_drv[ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
102 103
	} else {
		cjiffies = jiffies;
104 105 106
		if (time_after(cjiffies, rdev->fence_drv[ring].last_jiffies)) {
			cjiffies -= rdev->fence_drv[ring].last_jiffies;
			if (time_after(rdev->fence_drv[ring].last_timeout, cjiffies)) {
107
				/* update the timeout */
108
				rdev->fence_drv[ring].last_timeout -= cjiffies;
109 110 111 112
			} else {
				/* the 500ms timeout is elapsed we should test
				 * for GPU lockup
				 */
113
				rdev->fence_drv[ring].last_timeout = 1;
114 115 116 117 118
			}
		} else {
			/* wrap around update last jiffies, we will just wait
			 * a little longer
			 */
119
			rdev->fence_drv[ring].last_jiffies = cjiffies;
120 121 122
		}
		return false;
	}
123
	n = NULL;
124
	list_for_each(i, &rdev->fence_drv[ring].emitted) {
125 126 127 128 129 130 131 132 133 134 135
		fence = list_entry(i, struct radeon_fence, list);
		if (fence->seq == seq) {
			n = i;
			break;
		}
	}
	/* all fence previous to this one are considered as signaled */
	if (n) {
		i = n;
		do {
			n = i->prev;
136
			list_move_tail(i, &rdev->fence_drv[ring].signaled);
137 138 139
			fence = list_entry(i, struct radeon_fence, list);
			fence->signaled = true;
			i = n;
140
		} while (i != &rdev->fence_drv[ring].emitted);
141 142 143 144 145 146 147 148 149 150 151
		wake = true;
	}
	return wake;
}

static void radeon_fence_destroy(struct kref *kref)
{
	unsigned long irq_flags;
        struct radeon_fence *fence;

	fence = container_of(kref, struct radeon_fence, kref);
152
	write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
153
	list_del(&fence->list);
154
	fence->emitted = false;
155
	write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
156 157 158
	kfree(fence);
}

159 160 161
int radeon_fence_create(struct radeon_device *rdev,
			struct radeon_fence **fence,
			int ring)
162 163 164 165 166 167 168 169 170
{
	unsigned long irq_flags;

	*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
	if ((*fence) == NULL) {
		return -ENOMEM;
	}
	kref_init(&((*fence)->kref));
	(*fence)->rdev = rdev;
171
	(*fence)->emitted = false;
172 173
	(*fence)->signaled = false;
	(*fence)->seq = 0;
174
	(*fence)->ring = ring;
175 176
	INIT_LIST_HEAD(&(*fence)->list);

177 178 179
	write_lock_irqsave(&rdev->fence_lock, irq_flags);
	list_add_tail(&(*fence)->list, &rdev->fence_drv[ring].created);
	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
180 181 182 183 184 185 186 187
	return 0;
}

bool radeon_fence_signaled(struct radeon_fence *fence)
{
	unsigned long irq_flags;
	bool signaled = false;

188
	if (!fence)
189
		return true;
190 191

	if (fence->rdev->gpu_lockup)
192
		return true;
193

194
	write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
195 196 197 198 199
	signaled = fence->signaled;
	/* if we are shuting down report all fence as signaled */
	if (fence->rdev->shutdown) {
		signaled = true;
	}
200 201
	if (!fence->emitted) {
		WARN(1, "Querying an unemitted fence : %p !\n", fence);
202 203 204
		signaled = true;
	}
	if (!signaled) {
205
		radeon_fence_poll_locked(fence->rdev, fence->ring);
206 207
		signaled = fence->signaled;
	}
208
	write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
209 210 211
	return signaled;
}

212
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
213 214
{
	struct radeon_device *rdev;
215 216
	unsigned long irq_flags, timeout;
	u32 seq;
217 218 219 220 221 222 223 224 225 226
	int r;

	if (fence == NULL) {
		WARN(1, "Querying an invalid fence : %p !\n", fence);
		return 0;
	}
	rdev = fence->rdev;
	if (radeon_fence_signaled(fence)) {
		return 0;
	}
227
	timeout = rdev->fence_drv[fence->ring].last_timeout;
228
retry:
229
	/* save current sequence used to check for GPU lockup */
230
	seq = rdev->fence_drv[fence->ring].last_seq;
231
	trace_radeon_fence_wait_begin(rdev->ddev, seq);
232
	if (intr) {
233
		radeon_irq_kms_sw_irq_get(rdev);
234
		r = wait_event_interruptible_timeout(rdev->fence_drv[fence->ring].queue,
235
				radeon_fence_signaled(fence), timeout);
236
		radeon_irq_kms_sw_irq_put(rdev);
237
		if (unlikely(r < 0)) {
238
			return r;
239
		}
240
	} else {
241
		radeon_irq_kms_sw_irq_get(rdev);
242
		r = wait_event_timeout(rdev->fence_drv[fence->ring].queue,
243
			 radeon_fence_signaled(fence), timeout);
244
		radeon_irq_kms_sw_irq_put(rdev);
245
	}
246
	trace_radeon_fence_wait_end(rdev->ddev, seq);
247
	if (unlikely(!radeon_fence_signaled(fence))) {
248 249 250 251 252 253
		/* we were interrupted for some reason and fence isn't
		 * isn't signaled yet, resume wait
		 */
		if (r) {
			timeout = r;
			goto retry;
254
		}
255
		/* don't protect read access to rdev->fence_drv[t].last_seq
256 257
		 * if we experiencing a lockup the value doesn't change
		 */
258
		if (seq == rdev->fence_drv[fence->ring].last_seq &&
259
		    radeon_gpu_is_lockup(rdev, &rdev->ring[fence->ring])) {
260
			/* good news we believe it's a lockup */
261
			printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
J
Joe Perches 已提交
262
			     fence->seq, seq);
263 264 265
			/* FIXME: what should we do ? marking everyone
			 * as signaled for now
			 */
266 267 268 269
			rdev->gpu_lockup = true;
			r = radeon_gpu_reset(rdev);
			if (r)
				return r;
270
			radeon_fence_write(rdev, fence->seq, fence->ring);
271
			rdev->gpu_lockup = false;
272
		}
273
		timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
274 275 276 277
		write_lock_irqsave(&rdev->fence_lock, irq_flags);
		rdev->fence_drv[fence->ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
		rdev->fence_drv[fence->ring].last_jiffies = jiffies;
		write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
278 279 280 281 282
		goto retry;
	}
	return 0;
}

283
int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
284 285 286 287 288 289 290 291
{
	unsigned long irq_flags;
	struct radeon_fence *fence;
	int r;

	if (rdev->gpu_lockup) {
		return 0;
	}
292 293 294
	write_lock_irqsave(&rdev->fence_lock, irq_flags);
	if (list_empty(&rdev->fence_drv[ring].emitted)) {
		write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
295 296
		return 0;
	}
297
	fence = list_entry(rdev->fence_drv[ring].emitted.next,
298 299
			   struct radeon_fence, list);
	radeon_fence_ref(fence);
300
	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
301 302 303 304 305
	r = radeon_fence_wait(fence, false);
	radeon_fence_unref(&fence);
	return r;
}

306
int radeon_fence_wait_last(struct radeon_device *rdev, int ring)
307 308 309 310 311 312 313 314
{
	unsigned long irq_flags;
	struct radeon_fence *fence;
	int r;

	if (rdev->gpu_lockup) {
		return 0;
	}
315 316 317
	write_lock_irqsave(&rdev->fence_lock, irq_flags);
	if (list_empty(&rdev->fence_drv[ring].emitted)) {
		write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
318 319
		return 0;
	}
320
	fence = list_entry(rdev->fence_drv[ring].emitted.prev,
321 322
			   struct radeon_fence, list);
	radeon_fence_ref(fence);
323
	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
	r = radeon_fence_wait(fence, false);
	radeon_fence_unref(&fence);
	return r;
}

struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
{
	kref_get(&fence->kref);
	return fence;
}

void radeon_fence_unref(struct radeon_fence **fence)
{
	struct radeon_fence *tmp = *fence;

	*fence = NULL;
	if (tmp) {
P
Paul Bolle 已提交
341
		kref_put(&tmp->kref, radeon_fence_destroy);
342 343 344
	}
}

345
void radeon_fence_process(struct radeon_device *rdev, int ring)
346 347 348 349
{
	unsigned long irq_flags;
	bool wake;

350 351 352
	write_lock_irqsave(&rdev->fence_lock, irq_flags);
	wake = radeon_fence_poll_locked(rdev, ring);
	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
353
	if (wake) {
354
		wake_up_all(&rdev->fence_drv[ring].queue);
355 356 357
	}
}

358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378
int radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
{
	unsigned long irq_flags;
	int not_processed = 0;

	read_lock_irqsave(&rdev->fence_lock, irq_flags);
	if (!rdev->fence_drv[ring].initialized)
		return 0;

	if (!list_empty(&rdev->fence_drv[ring].emitted)) {
		struct list_head *ptr;
		list_for_each(ptr, &rdev->fence_drv[ring].emitted) {
			/* count up to 3, that's enought info */
			if (++not_processed >= 3)
				break;
		}
	}
	read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
	return not_processed;
}

379
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
380 381
{
	unsigned long irq_flags;
382 383
	uint64_t index;
	int r;
384

385 386 387 388 389 390
	write_lock_irqsave(&rdev->fence_lock, irq_flags);
	radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
	if (rdev->wb.use_event) {
		rdev->fence_drv[ring].scratch_reg = 0;
		index = R600_WB_EVENT_OFFSET + ring * 4;
	} else {
391 392 393 394 395 396
		r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
		if (r) {
			dev_err(rdev->dev, "fence failed to get scratch register\n");
			write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
			return r;
		}
397 398 399
		index = RADEON_WB_SCRATCH_OFFSET +
			rdev->fence_drv[ring].scratch_reg -
			rdev->scratch.reg_base;
400
	}
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
	rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
	rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
	radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
	rdev->fence_drv[ring].initialized = true;
	DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
		 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
	return 0;
}

static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
{
	rdev->fence_drv[ring].scratch_reg = -1;
	rdev->fence_drv[ring].cpu_addr = NULL;
	rdev->fence_drv[ring].gpu_addr = 0;
	atomic_set(&rdev->fence_drv[ring].seq, 0);
	INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
	INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
	INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
	init_waitqueue_head(&rdev->fence_drv[ring].queue);
	rdev->fence_drv[ring].initialized = false;
}

int radeon_fence_driver_init(struct radeon_device *rdev)
{
	unsigned long irq_flags;
	int ring;

	write_lock_irqsave(&rdev->fence_lock, irq_flags);
	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
		radeon_fence_driver_init_ring(rdev, ring);
432
	}
433
	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
434
	if (radeon_debugfs_fence_init(rdev)) {
435
		dev_err(rdev->dev, "fence debugfs file creation failed\n");
436 437 438 439 440 441 442
	}
	return 0;
}

void radeon_fence_driver_fini(struct radeon_device *rdev)
{
	unsigned long irq_flags;
443 444 445 446 447
	int ring;

	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
		if (!rdev->fence_drv[ring].initialized)
			continue;
448
		radeon_fence_wait_last(rdev, ring);
449 450 451 452 453 454
		wake_up_all(&rdev->fence_drv[ring].queue);
		write_lock_irqsave(&rdev->fence_lock, irq_flags);
		radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
		write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
		rdev->fence_drv[ring].initialized = false;
	}
455 456 457 458 459 460 461 462 463 464 465 466 467
}


/*
 * Fence debugfs
 */
#if defined(CONFIG_DEBUG_FS)
static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *)m->private;
	struct drm_device *dev = node->minor->dev;
	struct radeon_device *rdev = dev->dev_private;
	struct radeon_fence *fence;
468 469 470 471 472 473 474 475 476 477 478 479 480 481 482
	int i;

	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
		if (!rdev->fence_drv[i].initialized)
			continue;

		seq_printf(m, "--- ring %d ---\n", i);
		seq_printf(m, "Last signaled fence 0x%08X\n",
			   radeon_fence_read(rdev, i));
		if (!list_empty(&rdev->fence_drv[i].emitted)) {
			fence = list_entry(rdev->fence_drv[i].emitted.prev,
					   struct radeon_fence, list);
			seq_printf(m, "Last emitted fence %p with 0x%08X\n",
				   fence,  fence->seq);
		}
483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499
	}
	return 0;
}

static struct drm_info_list radeon_debugfs_fence_list[] = {
	{"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
};
#endif

int radeon_debugfs_fence_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
	return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
#else
	return 0;
#endif
}