radeon_fence.c 14.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 * Copyright 2009 Jerome Glisse.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 */
/*
 * Authors:
 *    Jerome Glisse <glisse@freedesktop.org>
 *    Dave Airlie
 */
#include <linux/seq_file.h>
A
Arun Sharma 已提交
32
#include <linux/atomic.h>
33 34 35
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
36
#include <linux/slab.h>
37 38 39 40
#include "drmP.h"
#include "drm.h"
#include "radeon_reg.h"
#include "radeon.h"
41
#include "radeon_trace.h"
42

43
static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
44 45
{
	if (rdev->wb.enabled) {
46 47
		*rdev->fence_drv[ring].cpu_addr = cpu_to_le32(seq);
	} else {
48
		WREG32(rdev->fence_drv[ring].scratch_reg, seq);
49
	}
50 51
}

52
static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
53
{
54
	u32 seq = 0;
55 56

	if (rdev->wb.enabled) {
57 58
		seq = le32_to_cpu(*rdev->fence_drv[ring].cpu_addr);
	} else {
59
		seq = RREG32(rdev->fence_drv[ring].scratch_reg);
60
	}
61 62 63
	return seq;
}

64 65 66 67
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
{
	unsigned long irq_flags;

68
	write_lock_irqsave(&rdev->fence_lock, irq_flags);
69
	if (fence->emitted) {
70
		write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
71 72
		return 0;
	}
73
	fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
74
	if (!rdev->ring[fence->ring].ready)
75 76 77
		/* FIXME: cp is not running assume everythings is done right
		 * away
		 */
78
		radeon_fence_write(rdev, fence->seq, fence->ring);
79
	else
80
		radeon_fence_ring_emit(rdev, fence->ring, fence);
81

82
	trace_radeon_fence_emit(rdev->ddev, fence->seq);
83
	fence->emitted = true;
84 85
	list_move_tail(&fence->list, &rdev->fence_drv[fence->ring].emitted);
	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
86 87 88
	return 0;
}

89
static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
90 91 92 93 94
{
	struct radeon_fence *fence;
	struct list_head *i, *n;
	uint32_t seq;
	bool wake = false;
95
	unsigned long cjiffies;
96

97 98 99 100 101
	seq = radeon_fence_read(rdev, ring);
	if (seq != rdev->fence_drv[ring].last_seq) {
		rdev->fence_drv[ring].last_seq = seq;
		rdev->fence_drv[ring].last_jiffies = jiffies;
		rdev->fence_drv[ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
102 103
	} else {
		cjiffies = jiffies;
104 105 106
		if (time_after(cjiffies, rdev->fence_drv[ring].last_jiffies)) {
			cjiffies -= rdev->fence_drv[ring].last_jiffies;
			if (time_after(rdev->fence_drv[ring].last_timeout, cjiffies)) {
107
				/* update the timeout */
108
				rdev->fence_drv[ring].last_timeout -= cjiffies;
109 110 111 112
			} else {
				/* the 500ms timeout is elapsed we should test
				 * for GPU lockup
				 */
113
				rdev->fence_drv[ring].last_timeout = 1;
114 115 116 117 118
			}
		} else {
			/* wrap around update last jiffies, we will just wait
			 * a little longer
			 */
119
			rdev->fence_drv[ring].last_jiffies = cjiffies;
120 121 122
		}
		return false;
	}
123
	n = NULL;
124
	list_for_each(i, &rdev->fence_drv[ring].emitted) {
125 126 127 128 129 130 131 132 133 134 135
		fence = list_entry(i, struct radeon_fence, list);
		if (fence->seq == seq) {
			n = i;
			break;
		}
	}
	/* all fence previous to this one are considered as signaled */
	if (n) {
		i = n;
		do {
			n = i->prev;
136
			list_move_tail(i, &rdev->fence_drv[ring].signaled);
137 138 139
			fence = list_entry(i, struct radeon_fence, list);
			fence->signaled = true;
			i = n;
140
		} while (i != &rdev->fence_drv[ring].emitted);
141 142 143 144 145 146 147 148 149 150 151
		wake = true;
	}
	return wake;
}

static void radeon_fence_destroy(struct kref *kref)
{
	unsigned long irq_flags;
        struct radeon_fence *fence;

	fence = container_of(kref, struct radeon_fence, kref);
152
	write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
153
	list_del(&fence->list);
154
	fence->emitted = false;
155
	write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
156 157
	if (fence->semaphore)
		radeon_semaphore_free(fence->rdev, fence->semaphore);
158 159 160
	kfree(fence);
}

161 162 163
int radeon_fence_create(struct radeon_device *rdev,
			struct radeon_fence **fence,
			int ring)
164 165 166 167 168 169 170 171 172
{
	unsigned long irq_flags;

	*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
	if ((*fence) == NULL) {
		return -ENOMEM;
	}
	kref_init(&((*fence)->kref));
	(*fence)->rdev = rdev;
173
	(*fence)->emitted = false;
174 175
	(*fence)->signaled = false;
	(*fence)->seq = 0;
176
	(*fence)->ring = ring;
177
	(*fence)->semaphore = NULL;
178 179
	INIT_LIST_HEAD(&(*fence)->list);

180 181 182
	write_lock_irqsave(&rdev->fence_lock, irq_flags);
	list_add_tail(&(*fence)->list, &rdev->fence_drv[ring].created);
	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
183 184 185 186 187 188 189 190
	return 0;
}

bool radeon_fence_signaled(struct radeon_fence *fence)
{
	unsigned long irq_flags;
	bool signaled = false;

191
	if (!fence)
192
		return true;
193 194

	if (fence->rdev->gpu_lockup)
195
		return true;
196

197
	write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
198 199 200 201 202
	signaled = fence->signaled;
	/* if we are shuting down report all fence as signaled */
	if (fence->rdev->shutdown) {
		signaled = true;
	}
203 204
	if (!fence->emitted) {
		WARN(1, "Querying an unemitted fence : %p !\n", fence);
205 206 207
		signaled = true;
	}
	if (!signaled) {
208
		radeon_fence_poll_locked(fence->rdev, fence->ring);
209 210
		signaled = fence->signaled;
	}
211
	write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
212 213 214
	return signaled;
}

215
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
216 217
{
	struct radeon_device *rdev;
218 219
	unsigned long irq_flags, timeout;
	u32 seq;
220 221 222 223 224 225 226 227 228 229
	int r;

	if (fence == NULL) {
		WARN(1, "Querying an invalid fence : %p !\n", fence);
		return 0;
	}
	rdev = fence->rdev;
	if (radeon_fence_signaled(fence)) {
		return 0;
	}
230
	timeout = rdev->fence_drv[fence->ring].last_timeout;
231
retry:
232
	/* save current sequence used to check for GPU lockup */
233
	seq = rdev->fence_drv[fence->ring].last_seq;
234
	trace_radeon_fence_wait_begin(rdev->ddev, seq);
235
	if (intr) {
236
		radeon_irq_kms_sw_irq_get(rdev, fence->ring);
237
		r = wait_event_interruptible_timeout(rdev->fence_drv[fence->ring].queue,
238
				radeon_fence_signaled(fence), timeout);
239
		radeon_irq_kms_sw_irq_put(rdev, fence->ring);
240
		if (unlikely(r < 0)) {
241
			return r;
242
		}
243
	} else {
244
		radeon_irq_kms_sw_irq_get(rdev, fence->ring);
245
		r = wait_event_timeout(rdev->fence_drv[fence->ring].queue,
246
			 radeon_fence_signaled(fence), timeout);
247
		radeon_irq_kms_sw_irq_put(rdev, fence->ring);
248
	}
249
	trace_radeon_fence_wait_end(rdev->ddev, seq);
250
	if (unlikely(!radeon_fence_signaled(fence))) {
251 252 253 254 255 256
		/* we were interrupted for some reason and fence isn't
		 * isn't signaled yet, resume wait
		 */
		if (r) {
			timeout = r;
			goto retry;
257
		}
258
		/* don't protect read access to rdev->fence_drv[t].last_seq
259 260
		 * if we experiencing a lockup the value doesn't change
		 */
261
		if (seq == rdev->fence_drv[fence->ring].last_seq &&
262
		    radeon_gpu_is_lockup(rdev, &rdev->ring[fence->ring])) {
263
			/* good news we believe it's a lockup */
264
			printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
J
Joe Perches 已提交
265
			     fence->seq, seq);
266 267 268
			/* FIXME: what should we do ? marking everyone
			 * as signaled for now
			 */
269 270 271 272
			rdev->gpu_lockup = true;
			r = radeon_gpu_reset(rdev);
			if (r)
				return r;
273
			radeon_fence_write(rdev, fence->seq, fence->ring);
274
			rdev->gpu_lockup = false;
275
		}
276
		timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
277 278 279 280
		write_lock_irqsave(&rdev->fence_lock, irq_flags);
		rdev->fence_drv[fence->ring].last_timeout = RADEON_FENCE_JIFFIES_TIMEOUT;
		rdev->fence_drv[fence->ring].last_jiffies = jiffies;
		write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
281 282 283 284 285
		goto retry;
	}
	return 0;
}

286
int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
287 288 289 290 291 292 293 294
{
	unsigned long irq_flags;
	struct radeon_fence *fence;
	int r;

	if (rdev->gpu_lockup) {
		return 0;
	}
295 296 297
	write_lock_irqsave(&rdev->fence_lock, irq_flags);
	if (list_empty(&rdev->fence_drv[ring].emitted)) {
		write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
298 299
		return 0;
	}
300
	fence = list_entry(rdev->fence_drv[ring].emitted.next,
301 302
			   struct radeon_fence, list);
	radeon_fence_ref(fence);
303
	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
304 305 306 307 308
	r = radeon_fence_wait(fence, false);
	radeon_fence_unref(&fence);
	return r;
}

309
int radeon_fence_wait_last(struct radeon_device *rdev, int ring)
310 311 312 313 314 315 316 317
{
	unsigned long irq_flags;
	struct radeon_fence *fence;
	int r;

	if (rdev->gpu_lockup) {
		return 0;
	}
318 319 320
	write_lock_irqsave(&rdev->fence_lock, irq_flags);
	if (list_empty(&rdev->fence_drv[ring].emitted)) {
		write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
321 322
		return 0;
	}
323
	fence = list_entry(rdev->fence_drv[ring].emitted.prev,
324 325
			   struct radeon_fence, list);
	radeon_fence_ref(fence);
326
	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343
	r = radeon_fence_wait(fence, false);
	radeon_fence_unref(&fence);
	return r;
}

struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
{
	kref_get(&fence->kref);
	return fence;
}

void radeon_fence_unref(struct radeon_fence **fence)
{
	struct radeon_fence *tmp = *fence;

	*fence = NULL;
	if (tmp) {
P
Paul Bolle 已提交
344
		kref_put(&tmp->kref, radeon_fence_destroy);
345 346 347
	}
}

348
void radeon_fence_process(struct radeon_device *rdev, int ring)
349 350 351 352
{
	unsigned long irq_flags;
	bool wake;

353 354 355
	write_lock_irqsave(&rdev->fence_lock, irq_flags);
	wake = radeon_fence_poll_locked(rdev, ring);
	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
356
	if (wake) {
357
		wake_up_all(&rdev->fence_drv[ring].queue);
358 359 360
	}
}

361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381
int radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
{
	unsigned long irq_flags;
	int not_processed = 0;

	read_lock_irqsave(&rdev->fence_lock, irq_flags);
	if (!rdev->fence_drv[ring].initialized)
		return 0;

	if (!list_empty(&rdev->fence_drv[ring].emitted)) {
		struct list_head *ptr;
		list_for_each(ptr, &rdev->fence_drv[ring].emitted) {
			/* count up to 3, that's enought info */
			if (++not_processed >= 3)
				break;
		}
	}
	read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
	return not_processed;
}

382
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
383 384
{
	unsigned long irq_flags;
385 386
	uint64_t index;
	int r;
387

388 389 390 391 392 393
	write_lock_irqsave(&rdev->fence_lock, irq_flags);
	radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
	if (rdev->wb.use_event) {
		rdev->fence_drv[ring].scratch_reg = 0;
		index = R600_WB_EVENT_OFFSET + ring * 4;
	} else {
394 395 396 397 398 399
		r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
		if (r) {
			dev_err(rdev->dev, "fence failed to get scratch register\n");
			write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
			return r;
		}
400 401 402
		index = RADEON_WB_SCRATCH_OFFSET +
			rdev->fence_drv[ring].scratch_reg -
			rdev->scratch.reg_base;
403
	}
404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434
	rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
	rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
	radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
	rdev->fence_drv[ring].initialized = true;
	DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
		 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
	return 0;
}

static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
{
	rdev->fence_drv[ring].scratch_reg = -1;
	rdev->fence_drv[ring].cpu_addr = NULL;
	rdev->fence_drv[ring].gpu_addr = 0;
	atomic_set(&rdev->fence_drv[ring].seq, 0);
	INIT_LIST_HEAD(&rdev->fence_drv[ring].created);
	INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
	INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
	init_waitqueue_head(&rdev->fence_drv[ring].queue);
	rdev->fence_drv[ring].initialized = false;
}

int radeon_fence_driver_init(struct radeon_device *rdev)
{
	unsigned long irq_flags;
	int ring;

	write_lock_irqsave(&rdev->fence_lock, irq_flags);
	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
		radeon_fence_driver_init_ring(rdev, ring);
435
	}
436
	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
437
	if (radeon_debugfs_fence_init(rdev)) {
438
		dev_err(rdev->dev, "fence debugfs file creation failed\n");
439 440 441 442 443 444 445
	}
	return 0;
}

void radeon_fence_driver_fini(struct radeon_device *rdev)
{
	unsigned long irq_flags;
446 447 448 449 450
	int ring;

	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
		if (!rdev->fence_drv[ring].initialized)
			continue;
451
		radeon_fence_wait_last(rdev, ring);
452 453 454 455 456 457
		wake_up_all(&rdev->fence_drv[ring].queue);
		write_lock_irqsave(&rdev->fence_lock, irq_flags);
		radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
		write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
		rdev->fence_drv[ring].initialized = false;
	}
458 459 460 461 462 463 464 465 466 467 468 469 470
}


/*
 * Fence debugfs
 */
#if defined(CONFIG_DEBUG_FS)
static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *)m->private;
	struct drm_device *dev = node->minor->dev;
	struct radeon_device *rdev = dev->dev_private;
	struct radeon_fence *fence;
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
	int i;

	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
		if (!rdev->fence_drv[i].initialized)
			continue;

		seq_printf(m, "--- ring %d ---\n", i);
		seq_printf(m, "Last signaled fence 0x%08X\n",
			   radeon_fence_read(rdev, i));
		if (!list_empty(&rdev->fence_drv[i].emitted)) {
			fence = list_entry(rdev->fence_drv[i].emitted.prev,
					   struct radeon_fence, list);
			seq_printf(m, "Last emitted fence %p with 0x%08X\n",
				   fence,  fence->seq);
		}
486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
	}
	return 0;
}

static struct drm_info_list radeon_debugfs_fence_list[] = {
	{"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
};
#endif

int radeon_debugfs_fence_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
	return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
#else
	return 0;
#endif
}