radeon_fence.c 16.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 * Copyright 2009 Jerome Glisse.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 */
/*
 * Authors:
 *    Jerome Glisse <glisse@freedesktop.org>
 *    Dave Airlie
 */
#include <linux/seq_file.h>
A
Arun Sharma 已提交
32
#include <linux/atomic.h>
33 34 35
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
36
#include <linux/slab.h>
37 38 39 40
#include "drmP.h"
#include "drm.h"
#include "radeon_reg.h"
#include "radeon.h"
41
#include "radeon_trace.h"
42

43
static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
44 45
{
	if (rdev->wb.enabled) {
46 47
		*rdev->fence_drv[ring].cpu_addr = cpu_to_le32(seq);
	} else {
48
		WREG32(rdev->fence_drv[ring].scratch_reg, seq);
49
	}
50 51
}

52
static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
53
{
54
	u32 seq = 0;
55 56

	if (rdev->wb.enabled) {
57 58
		seq = le32_to_cpu(*rdev->fence_drv[ring].cpu_addr);
	} else {
59
		seq = RREG32(rdev->fence_drv[ring].scratch_reg);
60
	}
61 62 63
	return seq;
}

64 65
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
{
66
	/* we are protected by the ring emission mutex */
67
	if (fence->seq && fence->seq < RADEON_FENCE_NOTEMITED_SEQ) {
68 69
		return 0;
	}
70
	fence->seq = ++rdev->fence_drv[fence->ring].seq;
71
	radeon_fence_ring_emit(rdev, fence->ring, fence);
72
	trace_radeon_fence_emit(rdev->ddev, fence->seq);
73 74 75
	return 0;
}

76
void radeon_fence_process(struct radeon_device *rdev, int ring)
77
{
78 79
	uint64_t seq, last_seq;
	unsigned count_loop = 0;
80 81
	bool wake = false;

82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109
	/* Note there is a scenario here for an infinite loop but it's
	 * very unlikely to happen. For it to happen, the current polling
	 * process need to be interrupted by another process and another
	 * process needs to update the last_seq btw the atomic read and
	 * xchg of the current process.
	 *
	 * More over for this to go in infinite loop there need to be
	 * continuously new fence signaled ie radeon_fence_read needs
	 * to return a different value each time for both the currently
	 * polling process and the other process that xchg the last_seq
	 * btw atomic read and xchg of the current process. And the
	 * value the other process set as last seq must be higher than
	 * the seq value we just read. Which means that current process
	 * need to be interrupted after radeon_fence_read and before
	 * atomic xchg.
	 *
	 * To be even more safe we count the number of time we loop and
	 * we bail after 10 loop just accepting the fact that we might
	 * have temporarly set the last_seq not to the true real last
	 * seq but to an older one.
	 */
	last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
	do {
		seq = radeon_fence_read(rdev, ring);
		seq |= last_seq & 0xffffffff00000000LL;
		if (seq < last_seq) {
			seq += 0x100000000LL;
		}
110

111 112
		if (seq == last_seq) {
			break;
113 114 115 116 117 118
		}
		/* If we loop over we don't want to return without
		 * checking if a fence is signaled as it means that the
		 * seq we just read is different from the previous on.
		 */
		wake = true;
119
		last_seq = seq;
120 121 122 123 124 125 126 127 128 129
		if ((count_loop++) > 10) {
			/* We looped over too many time leave with the
			 * fact that we might have set an older fence
			 * seq then the current real last seq as signaled
			 * by the hw.
			 */
			break;
		}
	} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);

130 131
	if (wake) {
		rdev->fence_drv[ring].last_activity = jiffies;
132
		wake_up_all(&rdev->fence_queue);
133 134 135 136 137
	}
}

static void radeon_fence_destroy(struct kref *kref)
{
138
	struct radeon_fence *fence;
139 140

	fence = container_of(kref, struct radeon_fence, kref);
141
	fence->seq = RADEON_FENCE_NOTEMITED_SEQ;
142 143 144
	kfree(fence);
}

145 146 147
int radeon_fence_create(struct radeon_device *rdev,
			struct radeon_fence **fence,
			int ring)
148 149 150 151 152 153 154
{
	*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
	if ((*fence) == NULL) {
		return -ENOMEM;
	}
	kref_init(&((*fence)->kref));
	(*fence)->rdev = rdev;
155
	(*fence)->seq = RADEON_FENCE_NOTEMITED_SEQ;
156
	(*fence)->ring = ring;
157 158 159
	return 0;
}

160 161
static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
				      u64 seq, unsigned ring)
162
{
163 164 165 166 167 168
	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
		return true;
	}
	/* poll new last sequence at least once */
	radeon_fence_process(rdev, ring);
	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
169
		return true;
170 171 172
	}
	return false;
}
173

174 175 176 177
bool radeon_fence_signaled(struct radeon_fence *fence)
{
	if (!fence) {
		return true;
178
	}
179
	if (fence->seq == RADEON_FENCE_NOTEMITED_SEQ) {
180
		WARN(1, "Querying an unemitted fence : %p !\n", fence);
181
		return true;
182
	}
183 184
	if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
		return true;
185
	}
186 187 188 189 190
	if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
		fence->seq = RADEON_FENCE_SIGNALED_SEQ;
		return true;
	}
	return false;
191 192
}

193
static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 target_seq,
194
				 unsigned ring, bool intr, bool lock_ring)
195
{
196
	unsigned long timeout, last_activity;
197
	uint64_t seq;
198
	unsigned i;
199
	bool signaled;
200
	int r;
201

202 203 204 205
	while (target_seq > atomic64_read(&rdev->fence_drv[ring].last_seq)) {
		if (!rdev->ring[ring].ready) {
			return -EBUSY;
		}
206 207

		timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
208
		if (time_after(rdev->fence_drv[ring].last_activity, timeout)) {
209
			/* the normal case, timeout is somewhere before last_activity */
210
			timeout = rdev->fence_drv[ring].last_activity - timeout;
211 212
		} else {
			/* either jiffies wrapped around, or no fence was signaled in the last 500ms
213 214
			 * anyway we will just wait for the minimum amount and then check for a lockup
			 */
215 216
			timeout = 1;
		}
217
		seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
218
		/* Save current last activity valuee, used to check for GPU lockups */
219
		last_activity = rdev->fence_drv[ring].last_activity;
220 221

		trace_radeon_fence_wait_begin(rdev->ddev, seq);
222
		radeon_irq_kms_sw_irq_get(rdev, ring);
223
		if (intr) {
224
			r = wait_event_interruptible_timeout(rdev->fence_queue,
225 226 227
				(signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
				timeout);
                } else {
228
			r = wait_event_timeout(rdev->fence_queue,
229 230
				(signaled = radeon_fence_seq_signaled(rdev, target_seq, ring)),
				timeout);
231
		}
232
		radeon_irq_kms_sw_irq_put(rdev, ring);
233
		if (unlikely(r < 0)) {
234
			return r;
235
		}
236
		trace_radeon_fence_wait_end(rdev->ddev, seq);
237

238 239 240 241 242 243
		if (unlikely(!signaled)) {
			/* we were interrupted for some reason and fence
			 * isn't signaled yet, resume waiting */
			if (r) {
				continue;
			}
244

245 246 247 248
			/* check if sequence value has changed since last_activity */
			if (seq != atomic64_read(&rdev->fence_drv[ring].last_seq)) {
				continue;
			}
249 250 251 252 253

			if (lock_ring) {
				mutex_lock(&rdev->ring_lock);
			}

254
			/* test if somebody else has already decided that this is a lockup */
255
			if (last_activity != rdev->fence_drv[ring].last_activity) {
256 257 258
				if (lock_ring) {
					mutex_unlock(&rdev->ring_lock);
				}
259 260 261
				continue;
			}

262
			if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
263
				/* good news we believe it's a lockup */
264
				dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx last fence id 0x%016llx)\n",
265 266 267 268 269 270
					 target_seq, seq);

				/* change last activity so nobody else think there is a lockup */
				for (i = 0; i < RADEON_NUM_RINGS; ++i) {
					rdev->fence_drv[i].last_activity = jiffies;
				}
271

272
				/* mark the ring as not ready any more */
273
				rdev->ring[ring].ready = false;
274 275 276
				if (lock_ring) {
					mutex_unlock(&rdev->ring_lock);
				}
277
				return -EDEADLK;
278
			}
279 280 281 282

			if (lock_ring) {
				mutex_unlock(&rdev->ring_lock);
			}
283 284 285 286 287
		}
	}
	return 0;
}

288
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
289 290 291
{
	int r;

292 293 294
	if (fence == NULL) {
		WARN(1, "Querying an invalid fence : %p !\n", fence);
		return -EINVAL;
295
	}
296

297 298
	r = radeon_fence_wait_seq(fence->rdev, fence->seq,
				  fence->ring, intr, true);
299 300
	if (r) {
		return r;
301
	}
302 303
	fence->seq = RADEON_FENCE_SIGNALED_SEQ;
	return 0;
304 305
}

306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
{
	unsigned i;

	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
		if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i)) {
			return true;
		}
	}
	return false;
}

static int radeon_fence_wait_any_seq(struct radeon_device *rdev,
				     u64 *target_seq, bool intr)
{
	unsigned long timeout, last_activity, tmp;
	unsigned i, ring = RADEON_NUM_RINGS;
	bool signaled;
	int r;

	for (i = 0, last_activity = 0; i < RADEON_NUM_RINGS; ++i) {
		if (!target_seq[i]) {
			continue;
		}

		/* use the most recent one as indicator */
		if (time_after(rdev->fence_drv[i].last_activity, last_activity)) {
			last_activity = rdev->fence_drv[i].last_activity;
		}

		/* For lockup detection just pick the lowest ring we are
		 * actively waiting for
		 */
		if (i < ring) {
			ring = i;
		}
	}

	/* nothing to wait for ? */
	if (ring == RADEON_NUM_RINGS) {
		return 0;
	}

	while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {
		timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
		if (time_after(last_activity, timeout)) {
			/* the normal case, timeout is somewhere before last_activity */
			timeout = last_activity - timeout;
		} else {
			/* either jiffies wrapped around, or no fence was signaled in the last 500ms
			 * anyway we will just wait for the minimum amount and then check for a lockup
			 */
			timeout = 1;
		}

		trace_radeon_fence_wait_begin(rdev->ddev, target_seq[ring]);
		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
			if (target_seq[i]) {
				radeon_irq_kms_sw_irq_get(rdev, i);
			}
		}
		if (intr) {
			r = wait_event_interruptible_timeout(rdev->fence_queue,
				(signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
				timeout);
		} else {
			r = wait_event_timeout(rdev->fence_queue,
				(signaled = radeon_fence_any_seq_signaled(rdev, target_seq)),
				timeout);
		}
		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
			if (target_seq[i]) {
				radeon_irq_kms_sw_irq_put(rdev, i);
			}
		}
		if (unlikely(r < 0)) {
			return r;
		}
		trace_radeon_fence_wait_end(rdev->ddev, target_seq[ring]);

		if (unlikely(!signaled)) {
			/* we were interrupted for some reason and fence
			 * isn't signaled yet, resume waiting */
			if (r) {
				continue;
			}

			mutex_lock(&rdev->ring_lock);
			for (i = 0, tmp = 0; i < RADEON_NUM_RINGS; ++i) {
				if (time_after(rdev->fence_drv[i].last_activity, tmp)) {
					tmp = rdev->fence_drv[i].last_activity;
				}
			}
			/* test if somebody else has already decided that this is a lockup */
			if (last_activity != tmp) {
				last_activity = tmp;
				mutex_unlock(&rdev->ring_lock);
				continue;
			}

			if (radeon_ring_is_lockup(rdev, ring, &rdev->ring[ring])) {
				/* good news we believe it's a lockup */
				dev_warn(rdev->dev, "GPU lockup (waiting for 0x%016llx)\n",
					 target_seq[ring]);

				/* change last activity so nobody else think there is a lockup */
				for (i = 0; i < RADEON_NUM_RINGS; ++i) {
					rdev->fence_drv[i].last_activity = jiffies;
				}

				/* mark the ring as not ready any more */
				rdev->ring[ring].ready = false;
				mutex_unlock(&rdev->ring_lock);
				return -EDEADLK;
			}
			mutex_unlock(&rdev->ring_lock);
		}
	}
	return 0;
}

int radeon_fence_wait_any(struct radeon_device *rdev,
			  struct radeon_fence **fences,
			  bool intr)
{
	uint64_t seq[RADEON_NUM_RINGS];
	unsigned i;
	int r;

	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
		seq[i] = 0;

		if (!fences[i]) {
			continue;
		}

		if (fences[i]->seq == RADEON_FENCE_SIGNALED_SEQ) {
			/* something was allready signaled */
			return 0;
		}

		if (fences[i]->seq < RADEON_FENCE_NOTEMITED_SEQ) {
			seq[i] = fences[i]->seq;
		}
	}

	r = radeon_fence_wait_any_seq(rdev, seq, intr);
	if (r) {
		return r;
	}
	return 0;
}

459
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
460
{
461
	uint64_t seq;
462

463 464 465 466 467 468
	/* We are not protected by ring lock when reading current seq but
	 * it's ok as worst case is we return to early while we could have
	 * wait.
	 */
	seq = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
	if (seq >= rdev->fence_drv[ring].seq) {
469 470 471
		/* nothing to wait for, last_seq is
		   already the last emited fence */
		return -ENOENT;
472
	}
473
	return radeon_fence_wait_seq(rdev, seq, ring, false, false);
474 475
}

476
int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
477 478 479 480 481 482
{
	/* We are not protected by ring lock when reading current seq
	 * but it's ok as wait empty is call from place where no more
	 * activity can be scheduled so there won't be concurrent access
	 * to seq value.
	 */
483 484
	return radeon_fence_wait_seq(rdev, rdev->fence_drv[ring].seq,
				     ring, false, false);
485 486 487 488 489 490 491 492 493 494 495 496 497 498
}

struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
{
	kref_get(&fence->kref);
	return fence;
}

void radeon_fence_unref(struct radeon_fence **fence)
{
	struct radeon_fence *tmp = *fence;

	*fence = NULL;
	if (tmp) {
P
Paul Bolle 已提交
499
		kref_put(&tmp->kref, radeon_fence_destroy);
500 501 502
	}
}

503
unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
504
{
505
	uint64_t emitted;
506

507 508 509
	/* We are not protected by ring lock when reading the last sequence
	 * but it's ok to report slightly wrong fence count here.
	 */
510
	radeon_fence_process(rdev, ring);
511 512 513 514
	emitted = rdev->fence_drv[ring].seq - atomic64_read(&rdev->fence_drv[ring].last_seq);
	/* to avoid 32bits warp around */
	if (emitted > 0x10000000) {
		emitted = 0x10000000;
515
	}
516
	return (unsigned)emitted;
517 518
}

519
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
520
{
521 522
	uint64_t index;
	int r;
523

524 525 526 527 528
	radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
	if (rdev->wb.use_event) {
		rdev->fence_drv[ring].scratch_reg = 0;
		index = R600_WB_EVENT_OFFSET + ring * 4;
	} else {
529 530 531 532 533
		r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
		if (r) {
			dev_err(rdev->dev, "fence failed to get scratch register\n");
			return r;
		}
534 535 536
		index = RADEON_WB_SCRATCH_OFFSET +
			rdev->fence_drv[ring].scratch_reg -
			rdev->scratch.reg_base;
537
	}
538 539
	rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
	rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
540
	radeon_fence_write(rdev, rdev->fence_drv[ring].seq, ring);
541
	rdev->fence_drv[ring].initialized = true;
542
	dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
543 544 545 546 547 548 549 550 551
		 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
	return 0;
}

static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
{
	rdev->fence_drv[ring].scratch_reg = -1;
	rdev->fence_drv[ring].cpu_addr = NULL;
	rdev->fence_drv[ring].gpu_addr = 0;
552 553
	rdev->fence_drv[ring].seq = 0;
	atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
554
	rdev->fence_drv[ring].last_activity = jiffies;
555 556 557 558 559 560 561
	rdev->fence_drv[ring].initialized = false;
}

int radeon_fence_driver_init(struct radeon_device *rdev)
{
	int ring;

562
	init_waitqueue_head(&rdev->fence_queue);
563 564
	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
		radeon_fence_driver_init_ring(rdev, ring);
565 566
	}
	if (radeon_debugfs_fence_init(rdev)) {
567
		dev_err(rdev->dev, "fence debugfs file creation failed\n");
568 569 570 571 572 573
	}
	return 0;
}

void radeon_fence_driver_fini(struct radeon_device *rdev)
{
574 575
	int ring;

576
	mutex_lock(&rdev->ring_lock);
577 578 579
	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
		if (!rdev->fence_drv[ring].initialized)
			continue;
580
		radeon_fence_wait_empty_locked(rdev, ring);
581
		wake_up_all(&rdev->fence_queue);
582 583 584
		radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
		rdev->fence_drv[ring].initialized = false;
	}
585
	mutex_unlock(&rdev->ring_lock);
586 587 588 589 590 591 592 593 594 595 596 597
}


/*
 * Fence debugfs
 */
#if defined(CONFIG_DEBUG_FS)
static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *)m->private;
	struct drm_device *dev = node->minor->dev;
	struct radeon_device *rdev = dev->dev_private;
598 599 600 601 602 603 604
	int i;

	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
		if (!rdev->fence_drv[i].initialized)
			continue;

		seq_printf(m, "--- ring %d ---\n", i);
605 606
		seq_printf(m, "Last signaled fence 0x%016llx\n",
			   (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
607 608
		seq_printf(m, "Last emitted  0x%016llx\n",
			   rdev->fence_drv[i].seq);
609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625
	}
	return 0;
}

static struct drm_info_list radeon_debugfs_fence_list[] = {
	{"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
};
#endif

int radeon_debugfs_fence_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
	return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
#else
	return 0;
#endif
}