radeon_fence.c 22.9 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 * Copyright 2009 Jerome Glisse.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 */
/*
 * Authors:
 *    Jerome Glisse <glisse@freedesktop.org>
 *    Dave Airlie
 */
#include <linux/seq_file.h>
A
Arun Sharma 已提交
32
#include <linux/atomic.h>
33 34
#include <linux/wait.h>
#include <linux/kref.h>
35
#include <linux/slab.h>
C
Christian König 已提交
36
#include <linux/firmware.h>
37
#include <drm/drmP.h>
38 39
#include "radeon_reg.h"
#include "radeon.h"
40
#include "radeon_trace.h"
41

42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61
/*
 * Fences
 * Fences mark an event in the GPUs pipeline and are used
 * for GPU/CPU synchronization.  When the fence is written,
 * it is expected that all buffers associated with that fence
 * are no longer in use by the associated ring on the GPU and
 * that the the relevant GPU caches have been flushed.  Whether
 * we use a scratch register or memory location depends on the asic
 * and whether writeback is enabled.
 */

/**
 * radeon_fence_write - write a fence value
 *
 * @rdev: radeon_device pointer
 * @seq: sequence number to write
 * @ring: ring index the fence is associated with
 *
 * Writes a fence value to memory or a scratch register (all asics).
 */
62
static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
63
{
64 65
	struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
	if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
66 67 68
		if (drv->cpu_addr) {
			*drv->cpu_addr = cpu_to_le32(seq);
		}
69
	} else {
70
		WREG32(drv->scratch_reg, seq);
71
	}
72 73
}

74 75 76 77 78 79 80 81 82
/**
 * radeon_fence_read - read a fence value
 *
 * @rdev: radeon_device pointer
 * @ring: ring index the fence is associated with
 *
 * Reads a fence value from memory or a scratch register (all asics).
 * Returns the value of the fence read from memory or register.
 */
83
static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
84
{
85
	struct radeon_fence_driver *drv = &rdev->fence_drv[ring];
86
	u32 seq = 0;
87

88
	if (likely(rdev->wb.enabled || !drv->scratch_reg)) {
89 90 91 92 93
		if (drv->cpu_addr) {
			seq = le32_to_cpu(*drv->cpu_addr);
		} else {
			seq = lower_32_bits(atomic64_read(&drv->last_seq));
		}
94
	} else {
95
		seq = RREG32(drv->scratch_reg);
96
	}
97 98 99
	return seq;
}

100 101 102 103 104 105 106 107 108 109
/**
 * radeon_fence_emit - emit a fence on the requested ring
 *
 * @rdev: radeon_device pointer
 * @fence: radeon fence object
 * @ring: ring index the fence is associated with
 *
 * Emits a fence command on the requested ring (all asics).
 * Returns 0 on success, -ENOMEM on failure.
 */
110 111 112
int radeon_fence_emit(struct radeon_device *rdev,
		      struct radeon_fence **fence,
		      int ring)
113
{
114
	/* we are protected by the ring emission mutex */
115 116 117
	*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
	if ((*fence) == NULL) {
		return -ENOMEM;
118
	}
119 120
	kref_init(&((*fence)->kref));
	(*fence)->rdev = rdev;
121
	(*fence)->seq = ++rdev->fence_drv[ring].sync_seq[ring];
122 123 124
	(*fence)->ring = ring;
	radeon_fence_ring_emit(rdev, ring, *fence);
	trace_radeon_fence_emit(rdev->ddev, (*fence)->seq);
125 126 127
	return 0;
}

128 129 130 131 132 133 134 135 136
/**
 * radeon_fence_process - process a fence
 *
 * @rdev: radeon_device pointer
 * @ring: ring index the fence is associated with
 *
 * Checks the current fence value and wakes the fence queue
 * if the sequence number has increased (all asics).
 */
137
void radeon_fence_process(struct radeon_device *rdev, int ring)
138
{
139
	uint64_t seq, last_seq, last_emitted;
140
	unsigned count_loop = 0;
141 142
	bool wake = false;

143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165
	/* Note there is a scenario here for an infinite loop but it's
	 * very unlikely to happen. For it to happen, the current polling
	 * process need to be interrupted by another process and another
	 * process needs to update the last_seq btw the atomic read and
	 * xchg of the current process.
	 *
	 * More over for this to go in infinite loop there need to be
	 * continuously new fence signaled ie radeon_fence_read needs
	 * to return a different value each time for both the currently
	 * polling process and the other process that xchg the last_seq
	 * btw atomic read and xchg of the current process. And the
	 * value the other process set as last seq must be higher than
	 * the seq value we just read. Which means that current process
	 * need to be interrupted after radeon_fence_read and before
	 * atomic xchg.
	 *
	 * To be even more safe we count the number of time we loop and
	 * we bail after 10 loop just accepting the fact that we might
	 * have temporarly set the last_seq not to the true real last
	 * seq but to an older one.
	 */
	last_seq = atomic64_read(&rdev->fence_drv[ring].last_seq);
	do {
166
		last_emitted = rdev->fence_drv[ring].sync_seq[ring];
167 168 169
		seq = radeon_fence_read(rdev, ring);
		seq |= last_seq & 0xffffffff00000000LL;
		if (seq < last_seq) {
170 171
			seq &= 0xffffffff;
			seq |= last_emitted & 0xffffffff00000000LL;
172
		}
173

174
		if (seq <= last_seq || seq > last_emitted) {
175
			break;
176 177 178 179 180 181
		}
		/* If we loop over we don't want to return without
		 * checking if a fence is signaled as it means that the
		 * seq we just read is different from the previous on.
		 */
		wake = true;
182
		last_seq = seq;
183 184 185 186 187 188 189 190 191 192
		if ((count_loop++) > 10) {
			/* We looped over too many time leave with the
			 * fact that we might have set an older fence
			 * seq then the current real last seq as signaled
			 * by the hw.
			 */
			break;
		}
	} while (atomic64_xchg(&rdev->fence_drv[ring].last_seq, seq) > seq);

193
	if (wake)
194
		wake_up_all(&rdev->fence_queue);
195 196
}

197 198 199 200 201 202 203
/**
 * radeon_fence_destroy - destroy a fence
 *
 * @kref: fence kref
 *
 * Frees the fence object (all asics).
 */
204 205
static void radeon_fence_destroy(struct kref *kref)
{
206
	struct radeon_fence *fence;
207 208 209 210 211

	fence = container_of(kref, struct radeon_fence, kref);
	kfree(fence);
}

212
/**
213
 * radeon_fence_seq_signaled - check if a fence sequence number has signaled
214 215 216 217 218
 *
 * @rdev: radeon device pointer
 * @seq: sequence number
 * @ring: ring index the fence is associated with
 *
219
 * Check if the last signaled fence sequnce number is >= the requested
220 221 222 223 224 225
 * sequence number (all asics).
 * Returns true if the fence has signaled (current fence value
 * is >= requested value) or false if it has not (current fence
 * value is < the requested value.  Helper function for
 * radeon_fence_signaled().
 */
226 227
static bool radeon_fence_seq_signaled(struct radeon_device *rdev,
				      u64 seq, unsigned ring)
228
{
229 230 231 232 233 234
	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
		return true;
	}
	/* poll new last sequence at least once */
	radeon_fence_process(rdev, ring);
	if (atomic64_read(&rdev->fence_drv[ring].last_seq) >= seq) {
235
		return true;
236 237 238
	}
	return false;
}
239

240 241 242 243 244 245 246 247
/**
 * radeon_fence_signaled - check if a fence has signaled
 *
 * @fence: radeon fence object
 *
 * Check if the requested fence has signaled (all asics).
 * Returns true if the fence has signaled or false if it has not.
 */
248 249 250 251
bool radeon_fence_signaled(struct radeon_fence *fence)
{
	if (!fence) {
		return true;
252
	}
253 254
	if (fence->seq == RADEON_FENCE_SIGNALED_SEQ) {
		return true;
255
	}
256 257 258 259 260
	if (radeon_fence_seq_signaled(fence->rdev, fence->seq, fence->ring)) {
		fence->seq = RADEON_FENCE_SIGNALED_SEQ;
		return true;
	}
	return false;
261 262
}

263
/**
264
 * radeon_fence_any_seq_signaled - check if any sequence number is signaled
265 266
 *
 * @rdev: radeon device pointer
267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289
 * @seq: sequence numbers
 *
 * Check if the last signaled fence sequnce number is >= the requested
 * sequence number (all asics).
 * Returns true if any has signaled (current value is >= requested value)
 * or false if it has not. Helper function for radeon_fence_wait_seq.
 */
static bool radeon_fence_any_seq_signaled(struct radeon_device *rdev, u64 *seq)
{
	unsigned i;

	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
		if (seq[i] && radeon_fence_seq_signaled(rdev, seq[i], i))
			return true;
	}
	return false;
}

/**
 * radeon_fence_wait_seq - wait for a specific sequence numbers
 *
 * @rdev: radeon device pointer
 * @target_seq: sequence number(s) we want to wait for
290 291 292
 * @intr: use interruptable sleep
 * @lock_ring: whether the ring should be locked or not
 *
293 294
 * Wait for the requested sequence number(s) to be written by any ring
 * (all asics).  Sequnce number array is indexed by ring id.
295 296
 * @intr selects whether to use interruptable (true) or non-interruptable
 * (false) sleep when waiting for the sequence number.  Helper function
297
 * for radeon_fence_wait_*().
298
 * Returns 0 if the sequence number has passed, error for all other cases.
299
 * -EDEADLK is returned when a GPU lockup has been detected.
300
 */
301 302
static int radeon_fence_wait_seq(struct radeon_device *rdev, u64 *target_seq,
				 bool intr, bool lock_ring)
303
{
304
	uint64_t last_seq[RADEON_NUM_RINGS];
305
	bool signaled;
306 307 308 309 310 311 312 313
	int i, r;

	while (!radeon_fence_any_seq_signaled(rdev, target_seq)) {

		/* Save current sequence values, used to check for GPU lockups */
		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
			if (!target_seq[i])
				continue;
314

315 316 317
			last_seq[i] = atomic64_read(&rdev->fence_drv[i].last_seq);
			trace_radeon_fence_wait_begin(rdev->ddev, target_seq[i]);
			radeon_irq_kms_sw_irq_get(rdev, i);
318
		}
319

320 321 322 323
		if (intr) {
			r = wait_event_interruptible_timeout(rdev->fence_queue, (
				(signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
				 || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
324
		} else {
325 326 327
			r = wait_event_timeout(rdev->fence_queue, (
				(signaled = radeon_fence_any_seq_signaled(rdev, target_seq))
				 || rdev->needs_reset), RADEON_FENCE_JIFFIES_TIMEOUT);
328 329
		}

330 331 332 333 334 335
		for (i = 0; i < RADEON_NUM_RINGS; ++i) {
			if (!target_seq[i])
				continue;

			radeon_irq_kms_sw_irq_put(rdev, i);
			trace_radeon_fence_wait_end(rdev->ddev, target_seq[i]);
336
		}
337 338

		if (unlikely(r < 0))
339
			return r;
340

341
		if (unlikely(!signaled)) {
342 343 344
			if (rdev->needs_reset)
				return -EDEADLK;

345 346
			/* we were interrupted for some reason and fence
			 * isn't signaled yet, resume waiting */
347
			if (r)
348
				continue;
349 350 351 352 353 354 355

			for (i = 0; i < RADEON_NUM_RINGS; ++i) {
				if (!target_seq[i])
					continue;

				if (last_seq[i] != atomic64_read(&rdev->fence_drv[i].last_seq))
					break;
356
			}
357

358
			if (i != RADEON_NUM_RINGS)
359
				continue;
360

361
			if (lock_ring)
362 363
				mutex_lock(&rdev->ring_lock);

364 365 366 367 368 369
			for (i = 0; i < RADEON_NUM_RINGS; ++i) {
				if (!target_seq[i])
					continue;

				if (radeon_ring_is_lockup(rdev, i, &rdev->ring[i]))
					break;
370 371
			}

372
			if (i < RADEON_NUM_RINGS) {
373
				/* good news we believe it's a lockup */
374 375 376 377 378 379 380 381
				dev_warn(rdev->dev, "GPU lockup (waiting for "
					 "0x%016llx last fence id 0x%016llx on"
					 " ring %d)\n",
					 target_seq[i], last_seq[i], i);

				/* remember that we need an reset */
				rdev->needs_reset = true;
				if (lock_ring)
382
					mutex_unlock(&rdev->ring_lock);
383
				wake_up_all(&rdev->fence_queue);
384
				return -EDEADLK;
385
			}
386

387
			if (lock_ring)
388
				mutex_unlock(&rdev->ring_lock);
389 390 391 392 393
		}
	}
	return 0;
}

394 395 396 397 398 399 400 401 402 403 404
/**
 * radeon_fence_wait - wait for a fence to signal
 *
 * @fence: radeon fence object
 * @intr: use interruptable sleep
 *
 * Wait for the requested fence to signal (all asics).
 * @intr selects whether to use interruptable (true) or non-interruptable
 * (false) sleep when waiting for the fence.
 * Returns 0 if the fence has passed, error for all other cases.
 */
405
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
406
{
407
	uint64_t seq[RADEON_NUM_RINGS] = {};
408 409
	int r;

410 411 412
	if (fence == NULL) {
		WARN(1, "Querying an invalid fence : %p !\n", fence);
		return -EINVAL;
413
	}
414

415 416 417
	seq[fence->ring] = fence->seq;
	if (seq[fence->ring] == RADEON_FENCE_SIGNALED_SEQ)
		return 0;
418

419 420 421
	r = radeon_fence_wait_seq(fence->rdev, seq, intr, true);
	if (r)
		return r;
422

423
	fence->seq = RADEON_FENCE_SIGNALED_SEQ;
424 425 426
	return 0;
}

427 428 429 430 431 432 433 434 435 436 437 438 439
/**
 * radeon_fence_wait_any - wait for a fence to signal on any ring
 *
 * @rdev: radeon device pointer
 * @fences: radeon fence object(s)
 * @intr: use interruptable sleep
 *
 * Wait for any requested fence to signal (all asics).  Fence
 * array is indexed by ring id.  @intr selects whether to use
 * interruptable (true) or non-interruptable (false) sleep when
 * waiting for the fences. Used by the suballocator.
 * Returns 0 if any fence has passed, error for all other cases.
 */
440 441 442 443 444
int radeon_fence_wait_any(struct radeon_device *rdev,
			  struct radeon_fence **fences,
			  bool intr)
{
	uint64_t seq[RADEON_NUM_RINGS];
445
	unsigned i, num_rings = 0;
446 447 448 449 450 451 452 453 454
	int r;

	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
		seq[i] = 0;

		if (!fences[i]) {
			continue;
		}

455
		seq[i] = fences[i]->seq;
456 457 458 459 460
		++num_rings;

		/* test if something was allready signaled */
		if (seq[i] == RADEON_FENCE_SIGNALED_SEQ)
			return 0;
461 462
	}

463 464 465 466 467
	/* nothing to wait for ? */
	if (num_rings == 0)
		return -ENOENT;

	r = radeon_fence_wait_seq(rdev, seq, intr, true);
468 469 470 471 472 473
	if (r) {
		return r;
	}
	return 0;
}

474 475 476 477 478 479 480 481 482 483
/**
 * radeon_fence_wait_next_locked - wait for the next fence to signal
 *
 * @rdev: radeon device pointer
 * @ring: ring index the fence is associated with
 *
 * Wait for the next fence on the requested ring to signal (all asics).
 * Returns 0 if the next fence has passed, error for all other cases.
 * Caller must hold ring lock.
 */
484
int radeon_fence_wait_next_locked(struct radeon_device *rdev, int ring)
485
{
486
	uint64_t seq[RADEON_NUM_RINGS] = {};
487

488 489
	seq[ring] = atomic64_read(&rdev->fence_drv[ring].last_seq) + 1ULL;
	if (seq[ring] >= rdev->fence_drv[ring].sync_seq[ring]) {
490 491 492
		/* nothing to wait for, last_seq is
		   already the last emited fence */
		return -ENOENT;
493
	}
494
	return radeon_fence_wait_seq(rdev, seq, false, false);
495 496
}

497 498 499 500 501 502 503 504 505 506
/**
 * radeon_fence_wait_empty_locked - wait for all fences to signal
 *
 * @rdev: radeon device pointer
 * @ring: ring index the fence is associated with
 *
 * Wait for all fences on the requested ring to signal (all asics).
 * Returns 0 if the fences have passed, error for all other cases.
 * Caller must hold ring lock.
 */
507
int radeon_fence_wait_empty_locked(struct radeon_device *rdev, int ring)
508
{
509
	uint64_t seq[RADEON_NUM_RINGS] = {};
510
	int r;
511

512
	seq[ring] = rdev->fence_drv[ring].sync_seq[ring];
513 514 515
	if (!seq[ring])
		return 0;

516
	r = radeon_fence_wait_seq(rdev, seq, false, false);
517
	if (r) {
518
		if (r == -EDEADLK)
519
			return -EDEADLK;
520

521 522
		dev_err(rdev->dev, "error waiting for ring[%d] to become idle (%d)\n",
			ring, r);
523
	}
524
	return 0;
525 526
}

527 528 529 530 531 532 533 534
/**
 * radeon_fence_ref - take a ref on a fence
 *
 * @fence: radeon fence object
 *
 * Take a reference on a fence (all asics).
 * Returns the fence.
 */
535 536 537 538 539 540
struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
{
	kref_get(&fence->kref);
	return fence;
}

541 542 543 544 545 546 547
/**
 * radeon_fence_unref - remove a ref on a fence
 *
 * @fence: radeon fence object
 *
 * Remove a reference on a fence (all asics).
 */
548 549 550 551 552 553
void radeon_fence_unref(struct radeon_fence **fence)
{
	struct radeon_fence *tmp = *fence;

	*fence = NULL;
	if (tmp) {
P
Paul Bolle 已提交
554
		kref_put(&tmp->kref, radeon_fence_destroy);
555 556 557
	}
}

558 559 560 561 562 563 564 565 566 567
/**
 * radeon_fence_count_emitted - get the count of emitted fences
 *
 * @rdev: radeon device pointer
 * @ring: ring index the fence is associated with
 *
 * Get the number of fences emitted on the requested ring (all asics).
 * Returns the number of emitted fences on the ring.  Used by the
 * dynpm code to ring track activity.
 */
568
unsigned radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
569
{
570
	uint64_t emitted;
571

572 573 574
	/* We are not protected by ring lock when reading the last sequence
	 * but it's ok to report slightly wrong fence count here.
	 */
575
	radeon_fence_process(rdev, ring);
576 577
	emitted = rdev->fence_drv[ring].sync_seq[ring]
		- atomic64_read(&rdev->fence_drv[ring].last_seq);
578 579 580
	/* to avoid 32bits warp around */
	if (emitted > 0x10000000) {
		emitted = 0x10000000;
581
	}
582
	return (unsigned)emitted;
583 584
}

585 586 587 588 589 590 591 592 593 594 595
/**
 * radeon_fence_need_sync - do we need a semaphore
 *
 * @fence: radeon fence object
 * @dst_ring: which ring to check against
 *
 * Check if the fence needs to be synced against another ring
 * (all asics).  If so, we need to emit a semaphore.
 * Returns true if we need to sync with another ring, false if
 * not.
 */
596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616
bool radeon_fence_need_sync(struct radeon_fence *fence, int dst_ring)
{
	struct radeon_fence_driver *fdrv;

	if (!fence) {
		return false;
	}

	if (fence->ring == dst_ring) {
		return false;
	}

	/* we are protected by the ring mutex */
	fdrv = &fence->rdev->fence_drv[dst_ring];
	if (fence->seq <= fdrv->sync_seq[fence->ring]) {
		return false;
	}

	return true;
}

617 618 619 620 621 622 623 624 625
/**
 * radeon_fence_note_sync - record the sync point
 *
 * @fence: radeon fence object
 * @dst_ring: which ring to check against
 *
 * Note the sequence number at which point the fence will
 * be synced with the requested ring (all asics).
 */
626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649
void radeon_fence_note_sync(struct radeon_fence *fence, int dst_ring)
{
	struct radeon_fence_driver *dst, *src;
	unsigned i;

	if (!fence) {
		return;
	}

	if (fence->ring == dst_ring) {
		return;
	}

	/* we are protected by the ring mutex */
	src = &fence->rdev->fence_drv[fence->ring];
	dst = &fence->rdev->fence_drv[dst_ring];
	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
		if (i == dst_ring) {
			continue;
		}
		dst->sync_seq[i] = max(dst->sync_seq[i], src->sync_seq[i]);
	}
}

650 651 652 653 654 655 656 657 658 659 660 661
/**
 * radeon_fence_driver_start_ring - make the fence driver
 * ready for use on the requested ring.
 *
 * @rdev: radeon device pointer
 * @ring: ring index to start the fence driver on
 *
 * Make the fence driver ready for processing (all asics).
 * Not all asics have all rings, so each asic will only
 * start the fence driver on the rings it has.
 * Returns 0 for success, errors for failure.
 */
662
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
663
{
664 665
	uint64_t index;
	int r;
666

667
	radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
668
	if (rdev->wb.use_event || !radeon_ring_supports_scratch_reg(rdev, &rdev->ring[ring])) {
669
		rdev->fence_drv[ring].scratch_reg = 0;
C
Christian König 已提交
670 671 672 673 674 675 676 677
		if (ring != R600_RING_TYPE_UVD_INDEX) {
			index = R600_WB_EVENT_OFFSET + ring * 4;
			rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
			rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr +
							 index;

		} else {
			/* put fence directly behind firmware */
678
			index = ALIGN(rdev->uvd_fw->size, 8);
679 680
			rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index;
			rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index;
C
Christian König 已提交
681 682
		}

683
	} else {
684 685 686 687 688
		r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
		if (r) {
			dev_err(rdev->dev, "fence failed to get scratch register\n");
			return r;
		}
689 690 691
		index = RADEON_WB_SCRATCH_OFFSET +
			rdev->fence_drv[ring].scratch_reg -
			rdev->scratch.reg_base;
C
Christian König 已提交
692 693
		rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
		rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
694
	}
695
	radeon_fence_write(rdev, atomic64_read(&rdev->fence_drv[ring].last_seq), ring);
696
	rdev->fence_drv[ring].initialized = true;
697
	dev_info(rdev->dev, "fence driver on ring %d use gpu addr 0x%016llx and cpu addr 0x%p\n",
698 699 700 701
		 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
	return 0;
}

702 703 704 705 706 707 708 709 710 711
/**
 * radeon_fence_driver_init_ring - init the fence driver
 * for the requested ring.
 *
 * @rdev: radeon device pointer
 * @ring: ring index to start the fence driver on
 *
 * Init the fence driver for the requested ring (all asics).
 * Helper function for radeon_fence_driver_init().
 */
712 713
static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
{
714 715
	int i;

716 717 718
	rdev->fence_drv[ring].scratch_reg = -1;
	rdev->fence_drv[ring].cpu_addr = NULL;
	rdev->fence_drv[ring].gpu_addr = 0;
719 720
	for (i = 0; i < RADEON_NUM_RINGS; ++i)
		rdev->fence_drv[ring].sync_seq[i] = 0;
721
	atomic64_set(&rdev->fence_drv[ring].last_seq, 0);
722 723 724
	rdev->fence_drv[ring].initialized = false;
}

725 726 727 728 729 730 731 732 733 734 735 736
/**
 * radeon_fence_driver_init - init the fence driver
 * for all possible rings.
 *
 * @rdev: radeon device pointer
 *
 * Init the fence driver for all possible rings (all asics).
 * Not all asics have all rings, so each asic will only
 * start the fence driver on the rings it has using
 * radeon_fence_driver_start_ring().
 * Returns 0 for success.
 */
737 738 739 740
int radeon_fence_driver_init(struct radeon_device *rdev)
{
	int ring;

741
	init_waitqueue_head(&rdev->fence_queue);
742 743
	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
		radeon_fence_driver_init_ring(rdev, ring);
744 745
	}
	if (radeon_debugfs_fence_init(rdev)) {
746
		dev_err(rdev->dev, "fence debugfs file creation failed\n");
747 748 749 750
	}
	return 0;
}

751 752 753 754 755 756 757 758
/**
 * radeon_fence_driver_fini - tear down the fence driver
 * for all possible rings.
 *
 * @rdev: radeon device pointer
 *
 * Tear down the fence driver for all possible rings (all asics).
 */
759 760
void radeon_fence_driver_fini(struct radeon_device *rdev)
{
761
	int ring, r;
762

763
	mutex_lock(&rdev->ring_lock);
764 765 766
	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
		if (!rdev->fence_drv[ring].initialized)
			continue;
767 768 769 770 771
		r = radeon_fence_wait_empty_locked(rdev, ring);
		if (r) {
			/* no need to trigger GPU reset as we are unloading */
			radeon_fence_driver_force_completion(rdev);
		}
772
		wake_up_all(&rdev->fence_queue);
773 774 775
		radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
		rdev->fence_drv[ring].initialized = false;
	}
776
	mutex_unlock(&rdev->ring_lock);
777 778
}

779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797
/**
 * radeon_fence_driver_force_completion - force all fence waiter to complete
 *
 * @rdev: radeon device pointer
 *
 * In case of GPU reset failure make sure no process keep waiting on fence
 * that will never complete.
 */
void radeon_fence_driver_force_completion(struct radeon_device *rdev)
{
	int ring;

	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
		if (!rdev->fence_drv[ring].initialized)
			continue;
		radeon_fence_write(rdev, rdev->fence_drv[ring].sync_seq[ring], ring);
	}
}

798 799 800 801 802 803 804 805 806 807

/*
 * Fence debugfs
 */
#if defined(CONFIG_DEBUG_FS)
static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *)m->private;
	struct drm_device *dev = node->minor->dev;
	struct radeon_device *rdev = dev->dev_private;
808
	int i, j;
809 810 811 812 813 814

	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
		if (!rdev->fence_drv[i].initialized)
			continue;

		seq_printf(m, "--- ring %d ---\n", i);
815 816
		seq_printf(m, "Last signaled fence 0x%016llx\n",
			   (unsigned long long)atomic64_read(&rdev->fence_drv[i].last_seq));
817 818 819 820 821 822 823 824
		seq_printf(m, "Last emitted        0x%016llx\n",
			   rdev->fence_drv[i].sync_seq[i]);

		for (j = 0; j < RADEON_NUM_RINGS; ++j) {
			if (i != j && rdev->fence_drv[j].initialized)
				seq_printf(m, "Last sync to ring %d 0x%016llx\n",
					   j, rdev->fence_drv[i].sync_seq[j]);
		}
825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841
	}
	return 0;
}

static struct drm_info_list radeon_debugfs_fence_list[] = {
	{"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
};
#endif

int radeon_debugfs_fence_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
	return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
#else
	return 0;
#endif
}