radeon_fence.c 14.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 * Copyright 2009 Jerome Glisse.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 */
/*
 * Authors:
 *    Jerome Glisse <glisse@freedesktop.org>
 *    Dave Airlie
 */
#include <linux/seq_file.h>
A
Arun Sharma 已提交
32
#include <linux/atomic.h>
33 34 35
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
36
#include <linux/slab.h>
37 38 39 40
#include "drmP.h"
#include "drm.h"
#include "radeon_reg.h"
#include "radeon.h"
41
#include "radeon_trace.h"
42

43
static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
44 45
{
	if (rdev->wb.enabled) {
46 47
		*rdev->fence_drv[ring].cpu_addr = cpu_to_le32(seq);
	} else {
48
		WREG32(rdev->fence_drv[ring].scratch_reg, seq);
49
	}
50 51
}

52
static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
53
{
54
	u32 seq = 0;
55 56

	if (rdev->wb.enabled) {
57 58
		seq = le32_to_cpu(*rdev->fence_drv[ring].cpu_addr);
	} else {
59
		seq = RREG32(rdev->fence_drv[ring].scratch_reg);
60
	}
61 62 63
	return seq;
}

64 65 66 67
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
{
	unsigned long irq_flags;

68
	write_lock_irqsave(&rdev->fence_lock, irq_flags);
69
	if (fence->emitted) {
70
		write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
71 72
		return 0;
	}
73
	fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
74
	radeon_fence_ring_emit(rdev, fence->ring, fence);
75
	trace_radeon_fence_emit(rdev->ddev, fence->seq);
76
	fence->emitted = true;
77 78 79 80
	/* are we the first fence on a previusly idle ring? */
	if (list_empty(&rdev->fence_drv[fence->ring].emitted)) {
		rdev->fence_drv[fence->ring].last_activity = jiffies;
	}
81 82
	list_move_tail(&fence->list, &rdev->fence_drv[fence->ring].emitted);
	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
83 84 85
	return 0;
}

86
static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
87 88 89 90 91 92
{
	struct radeon_fence *fence;
	struct list_head *i, *n;
	uint32_t seq;
	bool wake = false;

93
	seq = radeon_fence_read(rdev, ring);
94
	if (seq == rdev->fence_drv[ring].last_seq)
95
		return false;
96 97 98 99

	rdev->fence_drv[ring].last_seq = seq;
	rdev->fence_drv[ring].last_activity = jiffies;

100
	n = NULL;
101
	list_for_each(i, &rdev->fence_drv[ring].emitted) {
102 103 104 105 106 107 108 109 110 111 112
		fence = list_entry(i, struct radeon_fence, list);
		if (fence->seq == seq) {
			n = i;
			break;
		}
	}
	/* all fence previous to this one are considered as signaled */
	if (n) {
		i = n;
		do {
			n = i->prev;
113
			list_move_tail(i, &rdev->fence_drv[ring].signaled);
114 115 116
			fence = list_entry(i, struct radeon_fence, list);
			fence->signaled = true;
			i = n;
117
		} while (i != &rdev->fence_drv[ring].emitted);
118 119 120 121 122 123 124 125 126 127 128
		wake = true;
	}
	return wake;
}

static void radeon_fence_destroy(struct kref *kref)
{
	unsigned long irq_flags;
        struct radeon_fence *fence;

	fence = container_of(kref, struct radeon_fence, kref);
129
	write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
130
	list_del(&fence->list);
131
	fence->emitted = false;
132
	write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
133 134
	if (fence->semaphore)
		radeon_semaphore_free(fence->rdev, fence->semaphore);
135 136 137
	kfree(fence);
}

138 139 140
int radeon_fence_create(struct radeon_device *rdev,
			struct radeon_fence **fence,
			int ring)
141 142 143 144 145 146 147 148 149
{
	unsigned long irq_flags;

	*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
	if ((*fence) == NULL) {
		return -ENOMEM;
	}
	kref_init(&((*fence)->kref));
	(*fence)->rdev = rdev;
150
	(*fence)->emitted = false;
151 152
	(*fence)->signaled = false;
	(*fence)->seq = 0;
153
	(*fence)->ring = ring;
154
	(*fence)->semaphore = NULL;
155 156
	INIT_LIST_HEAD(&(*fence)->list);

157 158 159
	write_lock_irqsave(&rdev->fence_lock, irq_flags);
	list_add_tail(&(*fence)->list, &rdev->fence_drv[ring].created);
	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
160 161 162 163 164 165 166 167
	return 0;
}

bool radeon_fence_signaled(struct radeon_fence *fence)
{
	unsigned long irq_flags;
	bool signaled = false;

168
	if (!fence)
169
		return true;
170

171
	write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
172 173 174 175 176
	signaled = fence->signaled;
	/* if we are shuting down report all fence as signaled */
	if (fence->rdev->shutdown) {
		signaled = true;
	}
177 178
	if (!fence->emitted) {
		WARN(1, "Querying an unemitted fence : %p !\n", fence);
179 180 181
		signaled = true;
	}
	if (!signaled) {
182
		radeon_fence_poll_locked(fence->rdev, fence->ring);
183 184
		signaled = fence->signaled;
	}
185
	write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
186 187 188
	return signaled;
}

189
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
190 191
{
	struct radeon_device *rdev;
192 193
	unsigned long irq_flags, timeout;
	u32 seq;
194 195
	int i, r;
	bool signaled;
196 197 198

	if (fence == NULL) {
		WARN(1, "Querying an invalid fence : %p !\n", fence);
199
		return -EINVAL;
200
	}
201

202
	rdev = fence->rdev;
203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219
	signaled = radeon_fence_signaled(fence);
	while (!signaled) {
		read_lock_irqsave(&rdev->fence_lock, irq_flags);
		timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
		if (time_after(rdev->fence_drv[fence->ring].last_activity, timeout)) {
			/* the normal case, timeout is somewhere before last_activity */
			timeout = rdev->fence_drv[fence->ring].last_activity - timeout;
		} else {
			/* either jiffies wrapped around, or no fence was signaled in the last 500ms
			 * anyway we will just wait for the minimum amount and then check for a lockup */
			timeout = 1;
		}
		/* save current sequence value used to check for GPU lockups */
		seq = rdev->fence_drv[fence->ring].last_seq;
		read_unlock_irqrestore(&rdev->fence_lock, irq_flags);

		trace_radeon_fence_wait_begin(rdev->ddev, seq);
220
		radeon_irq_kms_sw_irq_get(rdev, fence->ring);
221 222 223 224 225 226 227 228 229
		if (intr) {
			r = wait_event_interruptible_timeout(
				rdev->fence_drv[fence->ring].queue,
				(signaled = radeon_fence_signaled(fence)), timeout);
		} else {
			r = wait_event_timeout(
				rdev->fence_drv[fence->ring].queue,
				(signaled = radeon_fence_signaled(fence)), timeout);
		}
230
		radeon_irq_kms_sw_irq_put(rdev, fence->ring);
231
		if (unlikely(r < 0)) {
232
			return r;
233
		}
234
		trace_radeon_fence_wait_end(rdev->ddev, seq);
235

236 237 238 239 240 241
		if (unlikely(!signaled)) {
			/* we were interrupted for some reason and fence
			 * isn't signaled yet, resume waiting */
			if (r) {
				continue;
			}
242

243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
			write_lock_irqsave(&rdev->fence_lock, irq_flags);
			/* check if sequence value has changed since last_activity */
			if (seq != rdev->fence_drv[fence->ring].last_seq) {
				write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
				continue;
			}

			/* change sequence value on all rings, so nobody else things there is a lockup */
			for (i = 0; i < RADEON_NUM_RINGS; ++i)
				rdev->fence_drv[i].last_seq -= 0x10000;
			write_unlock_irqrestore(&rdev->fence_lock, irq_flags);

			if (radeon_ring_is_lockup(rdev, fence->ring, &rdev->ring[fence->ring])) {

				/* good news we believe it's a lockup */
				printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
				     fence->seq, seq);

				/* mark the ring as not ready any more */
				rdev->ring[fence->ring].ready = false;
				r = radeon_gpu_reset(rdev);
				if (r)
					return r;

				write_lock_irqsave(&rdev->fence_lock, irq_flags);
				rdev->fence_drv[fence->ring].last_activity = jiffies;
				write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
			}
271 272 273 274 275
		}
	}
	return 0;
}

276
int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
277 278 279 280 281
{
	unsigned long irq_flags;
	struct radeon_fence *fence;
	int r;

282
	write_lock_irqsave(&rdev->fence_lock, irq_flags);
283 284 285 286
	if (!rdev->ring[ring].ready) {
		write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
		return -EBUSY;
	}
287 288
	if (list_empty(&rdev->fence_drv[ring].emitted)) {
		write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
289 290
		return 0;
	}
291
	fence = list_entry(rdev->fence_drv[ring].emitted.next,
292 293
			   struct radeon_fence, list);
	radeon_fence_ref(fence);
294
	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
295 296 297 298 299
	r = radeon_fence_wait(fence, false);
	radeon_fence_unref(&fence);
	return r;
}

300
int radeon_fence_wait_last(struct radeon_device *rdev, int ring)
301 302 303 304 305
{
	unsigned long irq_flags;
	struct radeon_fence *fence;
	int r;

306
	write_lock_irqsave(&rdev->fence_lock, irq_flags);
307 308 309 310
	if (!rdev->ring[ring].ready) {
		write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
		return -EBUSY;
	}
311 312
	if (list_empty(&rdev->fence_drv[ring].emitted)) {
		write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
313 314
		return 0;
	}
315
	fence = list_entry(rdev->fence_drv[ring].emitted.prev,
316 317
			   struct radeon_fence, list);
	radeon_fence_ref(fence);
318
	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335
	r = radeon_fence_wait(fence, false);
	radeon_fence_unref(&fence);
	return r;
}

struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
{
	kref_get(&fence->kref);
	return fence;
}

void radeon_fence_unref(struct radeon_fence **fence)
{
	struct radeon_fence *tmp = *fence;

	*fence = NULL;
	if (tmp) {
P
Paul Bolle 已提交
336
		kref_put(&tmp->kref, radeon_fence_destroy);
337 338 339
	}
}

340
void radeon_fence_process(struct radeon_device *rdev, int ring)
341 342 343 344
{
	unsigned long irq_flags;
	bool wake;

345 346 347
	write_lock_irqsave(&rdev->fence_lock, irq_flags);
	wake = radeon_fence_poll_locked(rdev, ring);
	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
348
	if (wake) {
349
		wake_up_all(&rdev->fence_drv[ring].queue);
350 351 352
	}
}

353 354 355 356 357 358
int radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
{
	unsigned long irq_flags;
	int not_processed = 0;

	read_lock_irqsave(&rdev->fence_lock, irq_flags);
359 360
	if (!rdev->fence_drv[ring].initialized) {
		read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
361
		return 0;
362
	}
363 364 365 366 367 368 369 370 371 372 373 374 375

	if (!list_empty(&rdev->fence_drv[ring].emitted)) {
		struct list_head *ptr;
		list_for_each(ptr, &rdev->fence_drv[ring].emitted) {
			/* count up to 3, that's enought info */
			if (++not_processed >= 3)
				break;
		}
	}
	read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
	return not_processed;
}

376
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
377 378
{
	unsigned long irq_flags;
379 380
	uint64_t index;
	int r;
381

382 383 384 385 386 387
	write_lock_irqsave(&rdev->fence_lock, irq_flags);
	radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
	if (rdev->wb.use_event) {
		rdev->fence_drv[ring].scratch_reg = 0;
		index = R600_WB_EVENT_OFFSET + ring * 4;
	} else {
388 389 390 391 392 393
		r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
		if (r) {
			dev_err(rdev->dev, "fence failed to get scratch register\n");
			write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
			return r;
		}
394 395 396
		index = RADEON_WB_SCRATCH_OFFSET +
			rdev->fence_drv[ring].scratch_reg -
			rdev->scratch.reg_base;
397
	}
398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428
	rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
	rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
	radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
	rdev->fence_drv[ring].initialized = true;
	DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
		 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
	return 0;
}

static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
{
	rdev->fence_drv[ring].scratch_reg = -1;
	rdev->fence_drv[ring].cpu_addr = NULL;
	rdev->fence_drv[ring].gpu_addr = 0;
	atomic_set(&rdev->fence_drv[ring].seq, 0);
	INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
	INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
	INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
	init_waitqueue_head(&rdev->fence_drv[ring].queue);
	rdev->fence_drv[ring].initialized = false;
}

int radeon_fence_driver_init(struct radeon_device *rdev)
{
	unsigned long irq_flags;
	int ring;

	write_lock_irqsave(&rdev->fence_lock, irq_flags);
	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
		radeon_fence_driver_init_ring(rdev, ring);
429
	}
430
	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
431
	if (radeon_debugfs_fence_init(rdev)) {
432
		dev_err(rdev->dev, "fence debugfs file creation failed\n");
433 434 435 436 437 438 439
	}
	return 0;
}

void radeon_fence_driver_fini(struct radeon_device *rdev)
{
	unsigned long irq_flags;
440 441 442 443 444
	int ring;

	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
		if (!rdev->fence_drv[ring].initialized)
			continue;
445
		radeon_fence_wait_last(rdev, ring);
446 447 448 449 450 451
		wake_up_all(&rdev->fence_drv[ring].queue);
		write_lock_irqsave(&rdev->fence_lock, irq_flags);
		radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
		write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
		rdev->fence_drv[ring].initialized = false;
	}
452 453 454 455 456 457 458 459 460 461 462 463 464
}


/*
 * Fence debugfs
 */
#if defined(CONFIG_DEBUG_FS)
static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *)m->private;
	struct drm_device *dev = node->minor->dev;
	struct radeon_device *rdev = dev->dev_private;
	struct radeon_fence *fence;
465 466 467 468 469 470 471 472 473 474 475 476 477 478 479
	int i;

	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
		if (!rdev->fence_drv[i].initialized)
			continue;

		seq_printf(m, "--- ring %d ---\n", i);
		seq_printf(m, "Last signaled fence 0x%08X\n",
			   radeon_fence_read(rdev, i));
		if (!list_empty(&rdev->fence_drv[i].emitted)) {
			fence = list_entry(rdev->fence_drv[i].emitted.prev,
					   struct radeon_fence, list);
			seq_printf(m, "Last emitted fence %p with 0x%08X\n",
				   fence,  fence->seq);
		}
480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
	}
	return 0;
}

static struct drm_info_list radeon_debugfs_fence_list[] = {
	{"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
};
#endif

int radeon_debugfs_fence_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
	return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
#else
	return 0;
#endif
}