radeon_fence.c 13.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 * Copyright 2009 Jerome Glisse.
 * All Rights Reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the
 * "Software"), to deal in the Software without restriction, including
 * without limitation the rights to use, copy, modify, merge, publish,
 * distribute, sub license, and/or sell copies of the Software, and to
 * permit persons to whom the Software is furnished to do so, subject to
 * the following conditions:
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
 * USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 * The above copyright notice and this permission notice (including the
 * next paragraph) shall be included in all copies or substantial portions
 * of the Software.
 *
 */
/*
 * Authors:
 *    Jerome Glisse <glisse@freedesktop.org>
 *    Dave Airlie
 */
#include <linux/seq_file.h>
A
Arun Sharma 已提交
32
#include <linux/atomic.h>
33 34 35
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/kref.h>
36
#include <linux/slab.h>
37 38 39 40
#include "drmP.h"
#include "drm.h"
#include "radeon_reg.h"
#include "radeon.h"
41
#include "radeon_trace.h"
42

43
static void radeon_fence_write(struct radeon_device *rdev, u32 seq, int ring)
44 45
{
	if (rdev->wb.enabled) {
46 47
		*rdev->fence_drv[ring].cpu_addr = cpu_to_le32(seq);
	} else {
48
		WREG32(rdev->fence_drv[ring].scratch_reg, seq);
49
	}
50 51
}

52
static u32 radeon_fence_read(struct radeon_device *rdev, int ring)
53
{
54
	u32 seq = 0;
55 56

	if (rdev->wb.enabled) {
57 58
		seq = le32_to_cpu(*rdev->fence_drv[ring].cpu_addr);
	} else {
59
		seq = RREG32(rdev->fence_drv[ring].scratch_reg);
60
	}
61 62 63
	return seq;
}

64 65 66 67
int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
{
	unsigned long irq_flags;

68
	write_lock_irqsave(&rdev->fence_lock, irq_flags);
69
	if (fence->emitted) {
70
		write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
71 72
		return 0;
	}
73
	fence->seq = atomic_add_return(1, &rdev->fence_drv[fence->ring].seq);
74
	radeon_fence_ring_emit(rdev, fence->ring, fence);
75
	trace_radeon_fence_emit(rdev->ddev, fence->seq);
76
	fence->emitted = true;
77 78 79 80
	/* are we the first fence on a previusly idle ring? */
	if (list_empty(&rdev->fence_drv[fence->ring].emitted)) {
		rdev->fence_drv[fence->ring].last_activity = jiffies;
	}
81 82
	list_move_tail(&fence->list, &rdev->fence_drv[fence->ring].emitted);
	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
83 84 85
	return 0;
}

86
static bool radeon_fence_poll_locked(struct radeon_device *rdev, int ring)
87 88 89 90 91 92
{
	struct radeon_fence *fence;
	struct list_head *i, *n;
	uint32_t seq;
	bool wake = false;

93
	seq = radeon_fence_read(rdev, ring);
94
	if (seq == rdev->fence_drv[ring].last_seq)
95
		return false;
96 97 98 99

	rdev->fence_drv[ring].last_seq = seq;
	rdev->fence_drv[ring].last_activity = jiffies;

100
	n = NULL;
101
	list_for_each(i, &rdev->fence_drv[ring].emitted) {
102 103 104 105 106 107 108 109 110 111 112
		fence = list_entry(i, struct radeon_fence, list);
		if (fence->seq == seq) {
			n = i;
			break;
		}
	}
	/* all fence previous to this one are considered as signaled */
	if (n) {
		i = n;
		do {
			n = i->prev;
113
			list_move_tail(i, &rdev->fence_drv[ring].signaled);
114 115 116
			fence = list_entry(i, struct radeon_fence, list);
			fence->signaled = true;
			i = n;
117
		} while (i != &rdev->fence_drv[ring].emitted);
118 119 120 121 122 123 124 125 126 127 128
		wake = true;
	}
	return wake;
}

static void radeon_fence_destroy(struct kref *kref)
{
	unsigned long irq_flags;
        struct radeon_fence *fence;

	fence = container_of(kref, struct radeon_fence, kref);
129
	write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
130
	list_del(&fence->list);
131
	fence->emitted = false;
132
	write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
133 134
	if (fence->semaphore)
		radeon_semaphore_free(fence->rdev, fence->semaphore);
135 136 137
	kfree(fence);
}

138 139 140
int radeon_fence_create(struct radeon_device *rdev,
			struct radeon_fence **fence,
			int ring)
141 142 143 144 145 146 147
{
	*fence = kmalloc(sizeof(struct radeon_fence), GFP_KERNEL);
	if ((*fence) == NULL) {
		return -ENOMEM;
	}
	kref_init(&((*fence)->kref));
	(*fence)->rdev = rdev;
148
	(*fence)->emitted = false;
149 150
	(*fence)->signaled = false;
	(*fence)->seq = 0;
151
	(*fence)->ring = ring;
152
	(*fence)->semaphore = NULL;
153 154 155 156 157 158 159 160 161
	INIT_LIST_HEAD(&(*fence)->list);
	return 0;
}

bool radeon_fence_signaled(struct radeon_fence *fence)
{
	unsigned long irq_flags;
	bool signaled = false;

162
	if (!fence)
163
		return true;
164

165
	write_lock_irqsave(&fence->rdev->fence_lock, irq_flags);
166 167 168 169 170
	signaled = fence->signaled;
	/* if we are shuting down report all fence as signaled */
	if (fence->rdev->shutdown) {
		signaled = true;
	}
171 172
	if (!fence->emitted) {
		WARN(1, "Querying an unemitted fence : %p !\n", fence);
173 174 175
		signaled = true;
	}
	if (!signaled) {
176
		radeon_fence_poll_locked(fence->rdev, fence->ring);
177 178
		signaled = fence->signaled;
	}
179
	write_unlock_irqrestore(&fence->rdev->fence_lock, irq_flags);
180 181 182
	return signaled;
}

183
int radeon_fence_wait(struct radeon_fence *fence, bool intr)
184 185
{
	struct radeon_device *rdev;
186 187
	unsigned long irq_flags, timeout;
	u32 seq;
188 189
	int i, r;
	bool signaled;
190 191 192

	if (fence == NULL) {
		WARN(1, "Querying an invalid fence : %p !\n", fence);
193
		return -EINVAL;
194
	}
195

196
	rdev = fence->rdev;
197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213
	signaled = radeon_fence_signaled(fence);
	while (!signaled) {
		read_lock_irqsave(&rdev->fence_lock, irq_flags);
		timeout = jiffies - RADEON_FENCE_JIFFIES_TIMEOUT;
		if (time_after(rdev->fence_drv[fence->ring].last_activity, timeout)) {
			/* the normal case, timeout is somewhere before last_activity */
			timeout = rdev->fence_drv[fence->ring].last_activity - timeout;
		} else {
			/* either jiffies wrapped around, or no fence was signaled in the last 500ms
			 * anyway we will just wait for the minimum amount and then check for a lockup */
			timeout = 1;
		}
		/* save current sequence value used to check for GPU lockups */
		seq = rdev->fence_drv[fence->ring].last_seq;
		read_unlock_irqrestore(&rdev->fence_lock, irq_flags);

		trace_radeon_fence_wait_begin(rdev->ddev, seq);
214
		radeon_irq_kms_sw_irq_get(rdev, fence->ring);
215 216 217 218 219 220 221 222 223
		if (intr) {
			r = wait_event_interruptible_timeout(
				rdev->fence_drv[fence->ring].queue,
				(signaled = radeon_fence_signaled(fence)), timeout);
		} else {
			r = wait_event_timeout(
				rdev->fence_drv[fence->ring].queue,
				(signaled = radeon_fence_signaled(fence)), timeout);
		}
224
		radeon_irq_kms_sw_irq_put(rdev, fence->ring);
225
		if (unlikely(r < 0)) {
226
			return r;
227
		}
228
		trace_radeon_fence_wait_end(rdev->ddev, seq);
229

230 231 232 233 234 235
		if (unlikely(!signaled)) {
			/* we were interrupted for some reason and fence
			 * isn't signaled yet, resume waiting */
			if (r) {
				continue;
			}
236

237 238 239 240 241 242 243 244 245 246
			write_lock_irqsave(&rdev->fence_lock, irq_flags);
			/* check if sequence value has changed since last_activity */
			if (seq != rdev->fence_drv[fence->ring].last_seq) {
				write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
				continue;
			}

			/* change sequence value on all rings, so nobody else things there is a lockup */
			for (i = 0; i < RADEON_NUM_RINGS; ++i)
				rdev->fence_drv[i].last_seq -= 0x10000;
247 248

			rdev->fence_drv[fence->ring].last_activity = jiffies;
249 250 251 252 253 254 255 256 257 258
			write_unlock_irqrestore(&rdev->fence_lock, irq_flags);

			if (radeon_ring_is_lockup(rdev, fence->ring, &rdev->ring[fence->ring])) {

				/* good news we believe it's a lockup */
				printk(KERN_WARNING "GPU lockup (waiting for 0x%08X last fence id 0x%08X)\n",
				     fence->seq, seq);

				/* mark the ring as not ready any more */
				rdev->ring[fence->ring].ready = false;
259
				return -EDEADLK;
260
			}
261 262 263 264 265
		}
	}
	return 0;
}

266
int radeon_fence_wait_next(struct radeon_device *rdev, int ring)
267 268 269 270 271
{
	unsigned long irq_flags;
	struct radeon_fence *fence;
	int r;

272
	write_lock_irqsave(&rdev->fence_lock, irq_flags);
273 274 275 276
	if (!rdev->ring[ring].ready) {
		write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
		return -EBUSY;
	}
277 278
	if (list_empty(&rdev->fence_drv[ring].emitted)) {
		write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
279
		return -ENOENT;
280
	}
281
	fence = list_entry(rdev->fence_drv[ring].emitted.next,
282 283
			   struct radeon_fence, list);
	radeon_fence_ref(fence);
284
	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
285 286 287 288 289
	r = radeon_fence_wait(fence, false);
	radeon_fence_unref(&fence);
	return r;
}

290
int radeon_fence_wait_empty(struct radeon_device *rdev, int ring)
291 292 293 294 295
{
	unsigned long irq_flags;
	struct radeon_fence *fence;
	int r;

296
	write_lock_irqsave(&rdev->fence_lock, irq_flags);
297 298 299 300
	if (!rdev->ring[ring].ready) {
		write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
		return -EBUSY;
	}
301 302
	if (list_empty(&rdev->fence_drv[ring].emitted)) {
		write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
303 304
		return 0;
	}
305
	fence = list_entry(rdev->fence_drv[ring].emitted.prev,
306 307
			   struct radeon_fence, list);
	radeon_fence_ref(fence);
308
	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325
	r = radeon_fence_wait(fence, false);
	radeon_fence_unref(&fence);
	return r;
}

struct radeon_fence *radeon_fence_ref(struct radeon_fence *fence)
{
	kref_get(&fence->kref);
	return fence;
}

void radeon_fence_unref(struct radeon_fence **fence)
{
	struct radeon_fence *tmp = *fence;

	*fence = NULL;
	if (tmp) {
P
Paul Bolle 已提交
326
		kref_put(&tmp->kref, radeon_fence_destroy);
327 328 329
	}
}

330
void radeon_fence_process(struct radeon_device *rdev, int ring)
331 332 333 334
{
	unsigned long irq_flags;
	bool wake;

335 336 337
	write_lock_irqsave(&rdev->fence_lock, irq_flags);
	wake = radeon_fence_poll_locked(rdev, ring);
	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
338
	if (wake) {
339
		wake_up_all(&rdev->fence_drv[ring].queue);
340 341 342
	}
}

343 344 345 346 347 348
int radeon_fence_count_emitted(struct radeon_device *rdev, int ring)
{
	unsigned long irq_flags;
	int not_processed = 0;

	read_lock_irqsave(&rdev->fence_lock, irq_flags);
349 350
	if (!rdev->fence_drv[ring].initialized) {
		read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
351
		return 0;
352
	}
353 354 355 356 357 358 359 360 361 362 363 364 365

	if (!list_empty(&rdev->fence_drv[ring].emitted)) {
		struct list_head *ptr;
		list_for_each(ptr, &rdev->fence_drv[ring].emitted) {
			/* count up to 3, that's enought info */
			if (++not_processed >= 3)
				break;
		}
	}
	read_unlock_irqrestore(&rdev->fence_lock, irq_flags);
	return not_processed;
}

366
int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring)
367 368
{
	unsigned long irq_flags;
369 370
	uint64_t index;
	int r;
371

372 373 374 375 376 377
	write_lock_irqsave(&rdev->fence_lock, irq_flags);
	radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
	if (rdev->wb.use_event) {
		rdev->fence_drv[ring].scratch_reg = 0;
		index = R600_WB_EVENT_OFFSET + ring * 4;
	} else {
378 379 380 381 382 383
		r = radeon_scratch_get(rdev, &rdev->fence_drv[ring].scratch_reg);
		if (r) {
			dev_err(rdev->dev, "fence failed to get scratch register\n");
			write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
			return r;
		}
384 385 386
		index = RADEON_WB_SCRATCH_OFFSET +
			rdev->fence_drv[ring].scratch_reg -
			rdev->scratch.reg_base;
387
	}
388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417
	rdev->fence_drv[ring].cpu_addr = &rdev->wb.wb[index/4];
	rdev->fence_drv[ring].gpu_addr = rdev->wb.gpu_addr + index;
	radeon_fence_write(rdev, atomic_read(&rdev->fence_drv[ring].seq), ring);
	rdev->fence_drv[ring].initialized = true;
	DRM_INFO("fence driver on ring %d use gpu addr 0x%08Lx and cpu addr 0x%p\n",
		 ring, rdev->fence_drv[ring].gpu_addr, rdev->fence_drv[ring].cpu_addr);
	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
	return 0;
}

static void radeon_fence_driver_init_ring(struct radeon_device *rdev, int ring)
{
	rdev->fence_drv[ring].scratch_reg = -1;
	rdev->fence_drv[ring].cpu_addr = NULL;
	rdev->fence_drv[ring].gpu_addr = 0;
	atomic_set(&rdev->fence_drv[ring].seq, 0);
	INIT_LIST_HEAD(&rdev->fence_drv[ring].emitted);
	INIT_LIST_HEAD(&rdev->fence_drv[ring].signaled);
	init_waitqueue_head(&rdev->fence_drv[ring].queue);
	rdev->fence_drv[ring].initialized = false;
}

int radeon_fence_driver_init(struct radeon_device *rdev)
{
	unsigned long irq_flags;
	int ring;

	write_lock_irqsave(&rdev->fence_lock, irq_flags);
	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
		radeon_fence_driver_init_ring(rdev, ring);
418
	}
419
	write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
420
	if (radeon_debugfs_fence_init(rdev)) {
421
		dev_err(rdev->dev, "fence debugfs file creation failed\n");
422 423 424 425 426 427 428
	}
	return 0;
}

void radeon_fence_driver_fini(struct radeon_device *rdev)
{
	unsigned long irq_flags;
429 430 431 432 433
	int ring;

	for (ring = 0; ring < RADEON_NUM_RINGS; ring++) {
		if (!rdev->fence_drv[ring].initialized)
			continue;
434
		radeon_fence_wait_empty(rdev, ring);
435 436 437 438 439 440
		wake_up_all(&rdev->fence_drv[ring].queue);
		write_lock_irqsave(&rdev->fence_lock, irq_flags);
		radeon_scratch_free(rdev, rdev->fence_drv[ring].scratch_reg);
		write_unlock_irqrestore(&rdev->fence_lock, irq_flags);
		rdev->fence_drv[ring].initialized = false;
	}
441 442 443 444 445 446 447 448 449 450 451 452 453
}


/*
 * Fence debugfs
 */
#if defined(CONFIG_DEBUG_FS)
static int radeon_debugfs_fence_info(struct seq_file *m, void *data)
{
	struct drm_info_node *node = (struct drm_info_node *)m->private;
	struct drm_device *dev = node->minor->dev;
	struct radeon_device *rdev = dev->dev_private;
	struct radeon_fence *fence;
454 455 456 457 458 459 460 461 462 463 464 465 466 467 468
	int i;

	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
		if (!rdev->fence_drv[i].initialized)
			continue;

		seq_printf(m, "--- ring %d ---\n", i);
		seq_printf(m, "Last signaled fence 0x%08X\n",
			   radeon_fence_read(rdev, i));
		if (!list_empty(&rdev->fence_drv[i].emitted)) {
			fence = list_entry(rdev->fence_drv[i].emitted.prev,
					   struct radeon_fence, list);
			seq_printf(m, "Last emitted fence %p with 0x%08X\n",
				   fence,  fence->seq);
		}
469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485
	}
	return 0;
}

static struct drm_info_list radeon_debugfs_fence_list[] = {
	{"radeon_fence_info", &radeon_debugfs_fence_info, 0, NULL},
};
#endif

int radeon_debugfs_fence_init(struct radeon_device *rdev)
{
#if defined(CONFIG_DEBUG_FS)
	return radeon_debugfs_add_files(rdev, radeon_debugfs_fence_list, 1);
#else
	return 0;
#endif
}