blktrace.c 11.7 KB
Newer Older
1
/*
2
 * Copyright (C) 2006 Jens Axboe <axboe@kernel.dk>
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
 *
 */
#include <linux/kernel.h>
#include <linux/blkdev.h>
#include <linux/blktrace_api.h>
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/debugfs.h>
25
#include <linux/time.h>
26 27 28 29
#include <asm/uaccess.h>

static unsigned int blktrace_seq __read_mostly = 1;

30 31 32
/*
 * Send out a notify message.
 */
33 34
static void trace_note(struct blk_trace *bt, pid_t pid, int action,
		       const void *data, size_t len)
35 36 37 38
{
	struct blk_io_trace *t;

	t = relay_reserve(bt->rchan, sizeof(*t) + len);
39 40 41 42
	if (t) {
		const int cpu = smp_processor_id();

		t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
I
Ingo Molnar 已提交
43
		t->time = ktime_to_ns(ktime_get());
44 45 46 47 48 49 50
		t->device = bt->dev;
		t->action = action;
		t->pid = pid;
		t->cpu = cpu;
		t->pdu_len = len;
		memcpy((void *) t + sizeof(*t), data, len);
	}
51 52
}

53 54 55 56 57 58
/*
 * Send out a notify for this process, if we haven't done so since a trace
 * started
 */
static void trace_note_tsk(struct blk_trace *bt, struct task_struct *tsk)
{
59 60
	tsk->btrace_seq = blktrace_seq;
	trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm, sizeof(tsk->comm));
61
}
62

63 64 65 66 67 68 69 70 71 72 73 74 75
static void trace_note_time(struct blk_trace *bt)
{
	struct timespec now;
	unsigned long flags;
	u32 words[2];

	getnstimeofday(&now);
	words[0] = now.tv_sec;
	words[1] = now.tv_nsec;

	local_irq_save(flags);
	trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words));
	local_irq_restore(flags);
76 77
}

78 79 80 81 82 83 84 85 86 87 88 89 90 91
void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
{
	int n;
	va_list args;
	static char bt_msg_buf[BLK_TN_MAX_MSG];

	va_start(args, fmt);
	n = vscnprintf(bt_msg_buf, BLK_TN_MAX_MSG, fmt, args);
	va_end(args);

	trace_note(bt, 0, BLK_TN_MESSAGE, bt_msg_buf, n);
}
EXPORT_SYMBOL_GPL(__trace_note_message);

92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112
static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
			 pid_t pid)
{
	if (((bt->act_mask << BLK_TC_SHIFT) & what) == 0)
		return 1;
	if (sector < bt->start_lba || sector > bt->end_lba)
		return 1;
	if (bt->pid && pid != bt->pid)
		return 1;

	return 0;
}

/*
 * Data direction bit lookup
 */
static u32 ddir_act[2] __read_mostly = { BLK_TC_ACT(BLK_TC_READ), BLK_TC_ACT(BLK_TC_WRITE) };

/*
 * Bio action bits of interest
 */
113
static u32 bio_act[9] __read_mostly = { 0, BLK_TC_ACT(BLK_TC_BARRIER), BLK_TC_ACT(BLK_TC_SYNC), 0, BLK_TC_ACT(BLK_TC_AHEAD), 0, 0, 0, BLK_TC_ACT(BLK_TC_META) };
114 115 116 117 118 119 120 121 122

/*
 * More could be added as needed, taking care to increment the decrementer
 * to get correct indexing
 */
#define trace_barrier_bit(rw)	\
	(((rw) & (1 << BIO_RW_BARRIER)) >> (BIO_RW_BARRIER - 0))
#define trace_sync_bit(rw)	\
	(((rw) & (1 << BIO_RW_SYNC)) >> (BIO_RW_SYNC - 1))
123
#define trace_ahead_bit(rw)	\
124
	(((rw) & (1 << BIO_RW_AHEAD)) << (2 - BIO_RW_AHEAD))
125 126
#define trace_meta_bit(rw)	\
	(((rw) & (1 << BIO_RW_META)) >> (BIO_RW_META - 3))
127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147

/*
 * The worker for the various blk_add_trace*() types. Fills out a
 * blk_io_trace structure and places it in a per-cpu subbuffer.
 */
void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
		     int rw, u32 what, int error, int pdu_len, void *pdu_data)
{
	struct task_struct *tsk = current;
	struct blk_io_trace *t;
	unsigned long flags;
	unsigned long *sequence;
	pid_t pid;
	int cpu;

	if (unlikely(bt->trace_state != Blktrace_running))
		return;

	what |= ddir_act[rw & WRITE];
	what |= bio_act[trace_barrier_bit(rw)];
	what |= bio_act[trace_sync_bit(rw)];
148
	what |= bio_act[trace_ahead_bit(rw)];
149
	what |= bio_act[trace_meta_bit(rw)];
150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174

	pid = tsk->pid;
	if (unlikely(act_log_check(bt, what, sector, pid)))
		return;

	/*
	 * A word about the locking here - we disable interrupts to reserve
	 * some space in the relay per-cpu buffer, to prevent an irq
	 * from coming in and stepping on our toes. Once reserved, it's
	 * enough to get preemption disabled to prevent read of this data
	 * before we are through filling it. get_cpu()/put_cpu() does this
	 * for us
	 */
	local_irq_save(flags);

	if (unlikely(tsk->btrace_seq != blktrace_seq))
		trace_note_tsk(bt, tsk);

	t = relay_reserve(bt->rchan, sizeof(*t) + pdu_len);
	if (t) {
		cpu = smp_processor_id();
		sequence = per_cpu_ptr(bt->sequence, cpu);

		t->magic = BLK_IO_TRACE_MAGIC | BLK_IO_TRACE_VERSION;
		t->sequence = ++(*sequence);
I
Ingo Molnar 已提交
175
		t->time = ktime_to_ns(ktime_get());
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194
		t->sector = sector;
		t->bytes = bytes;
		t->action = what;
		t->pid = pid;
		t->device = bt->dev;
		t->cpu = cpu;
		t->error = error;
		t->pdu_len = pdu_len;

		if (pdu_len)
			memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
	}

	local_irq_restore(flags);
}

EXPORT_SYMBOL_GPL(__blk_add_trace);

static struct dentry *blk_tree_root;
195
static DEFINE_MUTEX(blk_tree_mutex);
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
static unsigned int root_users;

static inline void blk_remove_root(void)
{
	if (blk_tree_root) {
		debugfs_remove(blk_tree_root);
		blk_tree_root = NULL;
	}
}

static void blk_remove_tree(struct dentry *dir)
{
	mutex_lock(&blk_tree_mutex);
	debugfs_remove(dir);
	if (--root_users == 0)
		blk_remove_root();
	mutex_unlock(&blk_tree_mutex);
}

static struct dentry *blk_create_tree(const char *blk_name)
{
	struct dentry *dir = NULL;
218
	int created = 0;
219 220 221 222 223 224 225

	mutex_lock(&blk_tree_mutex);

	if (!blk_tree_root) {
		blk_tree_root = debugfs_create_dir("block", NULL);
		if (!blk_tree_root)
			goto err;
226
		created = 1;
227 228 229 230 231
	}

	dir = debugfs_create_dir(blk_name, blk_tree_root);
	if (dir)
		root_users++;
232 233 234 235 236
	else {
		/* Delete root only if we created it */
		if (created)
			blk_remove_root();
	}
237 238 239 240 241 242 243 244 245 246 247 248 249 250 251

err:
	mutex_unlock(&blk_tree_mutex);
	return dir;
}

static void blk_trace_cleanup(struct blk_trace *bt)
{
	relay_close(bt->rchan);
	debugfs_remove(bt->dropped_file);
	blk_remove_tree(bt->dir);
	free_percpu(bt->sequence);
	kfree(bt);
}

252
int blk_trace_remove(struct request_queue *q)
253 254 255 256 257 258 259 260 261 262 263 264 265
{
	struct blk_trace *bt;

	bt = xchg(&q->blk_trace, NULL);
	if (!bt)
		return -EINVAL;

	if (bt->trace_state == Blktrace_setup ||
	    bt->trace_state == Blktrace_stopped)
		blk_trace_cleanup(bt);

	return 0;
}
266
EXPORT_SYMBOL_GPL(blk_trace_remove);
267 268 269

static int blk_dropped_open(struct inode *inode, struct file *filp)
{
270
	filp->private_data = inode->i_private;
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285

	return 0;
}

static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
				size_t count, loff_t *ppos)
{
	struct blk_trace *bt = filp->private_data;
	char buf[16];

	snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));

	return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
}

286
static const struct file_operations blk_dropped_fops = {
287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333
	.owner =	THIS_MODULE,
	.open =		blk_dropped_open,
	.read =		blk_dropped_read,
};

/*
 * Keep track of how many times we encountered a full subbuffer, to aid
 * the user space app in telling how many lost events there were.
 */
static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
				     void *prev_subbuf, size_t prev_padding)
{
	struct blk_trace *bt;

	if (!relay_buf_full(buf))
		return 1;

	bt = buf->chan->private_data;
	atomic_inc(&bt->dropped);
	return 0;
}

static int blk_remove_buf_file_callback(struct dentry *dentry)
{
	debugfs_remove(dentry);
	return 0;
}

static struct dentry *blk_create_buf_file_callback(const char *filename,
						   struct dentry *parent,
						   int mode,
						   struct rchan_buf *buf,
						   int *is_global)
{
	return debugfs_create_file(filename, mode, parent, buf,
					&relay_file_operations);
}

static struct rchan_callbacks blk_relay_callbacks = {
	.subbuf_start		= blk_subbuf_start_callback,
	.create_buf_file	= blk_create_buf_file_callback,
	.remove_buf_file	= blk_remove_buf_file_callback,
};

/*
 * Setup everything required to start tracing
 */
334
int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
335
			struct blk_user_trace_setup *buts)
336 337 338 339 340
{
	struct blk_trace *old_bt, *bt = NULL;
	struct dentry *dir = NULL;
	int ret, i;

341
	if (!buts->buf_size || !buts->buf_nr)
342 343
		return -EINVAL;

344
	strcpy(buts->name, name);
345 346 347 348 349

	/*
	 * some device names have larger paths - convert the slashes
	 * to underscores for this to work as expected
	 */
350 351 352
	for (i = 0; i < strlen(buts->name); i++)
		if (buts->name[i] == '/')
			buts->name[i] = '_';
353 354 355 356 357 358 359 360 361 362 363

	ret = -ENOMEM;
	bt = kzalloc(sizeof(*bt), GFP_KERNEL);
	if (!bt)
		goto err;

	bt->sequence = alloc_percpu(unsigned long);
	if (!bt->sequence)
		goto err;

	ret = -ENOENT;
364
	dir = blk_create_tree(buts->name);
365 366 367 368
	if (!dir)
		goto err;

	bt->dir = dir;
369
	bt->dev = dev;
370 371 372 373 374 375 376
	atomic_set(&bt->dropped, 0);

	ret = -EIO;
	bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, &blk_dropped_fops);
	if (!bt->dropped_file)
		goto err;

377 378
	bt->rchan = relay_open("trace", dir, buts->buf_size,
				buts->buf_nr, &blk_relay_callbacks, bt);
379 380 381
	if (!bt->rchan)
		goto err;

382
	bt->act_mask = buts->act_mask;
383 384 385
	if (!bt->act_mask)
		bt->act_mask = (u16) -1;

386 387
	bt->start_lba = buts->start_lba;
	bt->end_lba = buts->end_lba;
388 389 390
	if (!bt->end_lba)
		bt->end_lba = -1ULL;

391
	bt->pid = buts->pid;
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407
	bt->trace_state = Blktrace_setup;

	ret = -EBUSY;
	old_bt = xchg(&q->blk_trace, bt);
	if (old_bt) {
		(void) xchg(&q->blk_trace, old_bt);
		goto err;
	}

	return 0;
err:
	if (dir)
		blk_remove_tree(dir);
	if (bt) {
		if (bt->dropped_file)
			debugfs_remove(bt->dropped_file);
408
		free_percpu(bt->sequence);
409 410 411 412 413 414
		if (bt->rchan)
			relay_close(bt->rchan);
		kfree(bt);
	}
	return ret;
}
415

416 417
int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
		    char __user *arg)
418 419 420 421 422 423 424 425
{
	struct blk_user_trace_setup buts;
	int ret;

	ret = copy_from_user(&buts, arg, sizeof(buts));
	if (ret)
		return -EFAULT;

426
	ret = do_blk_trace_setup(q, name, dev, &buts);
427 428 429 430 431 432 433 434
	if (ret)
		return ret;

	if (copy_to_user(arg, &buts, sizeof(buts)))
		return -EFAULT;

	return 0;
}
435
EXPORT_SYMBOL_GPL(blk_trace_setup);
436

437
int blk_trace_startstop(struct request_queue *q, int start)
438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455
{
	struct blk_trace *bt;
	int ret;

	if ((bt = q->blk_trace) == NULL)
		return -EINVAL;

	/*
	 * For starting a trace, we can transition from a setup or stopped
	 * trace. For stopping a trace, the state must be running
	 */
	ret = -EINVAL;
	if (start) {
		if (bt->trace_state == Blktrace_setup ||
		    bt->trace_state == Blktrace_stopped) {
			blktrace_seq++;
			smp_mb();
			bt->trace_state = Blktrace_running;
456 457

			trace_note_time(bt);
458 459 460 461 462 463 464 465 466 467 468 469
			ret = 0;
		}
	} else {
		if (bt->trace_state == Blktrace_running) {
			bt->trace_state = Blktrace_stopped;
			relay_flush(bt->rchan);
			ret = 0;
		}
	}

	return ret;
}
470
EXPORT_SYMBOL_GPL(blk_trace_startstop);
471 472 473 474 475 476 477 478 479 480

/**
 * blk_trace_ioctl: - handle the ioctls associated with tracing
 * @bdev:	the block device
 * @cmd: 	the ioctl cmd
 * @arg:	the argument data, if any
 *
 **/
int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
{
481
	struct request_queue *q;
482
	int ret, start = 0;
483
	char b[BDEVNAME_SIZE];
484 485 486 487 488 489 490 491 492

	q = bdev_get_queue(bdev);
	if (!q)
		return -ENXIO;

	mutex_lock(&bdev->bd_mutex);

	switch (cmd) {
	case BLKTRACESETUP:
J
Jean Delvare 已提交
493
		bdevname(bdev, b);
494
		ret = blk_trace_setup(q, b, bdev->bd_dev, arg);
495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517
		break;
	case BLKTRACESTART:
		start = 1;
	case BLKTRACESTOP:
		ret = blk_trace_startstop(q, start);
		break;
	case BLKTRACETEARDOWN:
		ret = blk_trace_remove(q);
		break;
	default:
		ret = -ENOTTY;
		break;
	}

	mutex_unlock(&bdev->bd_mutex);
	return ret;
}

/**
 * blk_trace_shutdown: - stop and cleanup trace structures
 * @q:    the request queue associated with the device
 *
 **/
518
void blk_trace_shutdown(struct request_queue *q)
519
{
520 521 522 523
	if (q->blk_trace) {
		blk_trace_startstop(q, 0);
		blk_trace_remove(q);
	}
524
}