trace_syscalls.c 15.4 KB
Newer Older
1
#include <trace/syscall.h>
2
#include <trace/events/syscalls.h>
3
#include <linux/kernel.h>
4
#include <linux/ftrace.h>
5
#include <linux/perf_event.h>
6 7 8 9 10
#include <asm/syscall.h>

#include "trace_output.h"
#include "trace.h"

11
static DEFINE_MUTEX(syscall_trace_lock);
12 13
static int sys_refcount_enter;
static int sys_refcount_exit;
14 15
static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
16

17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53
extern unsigned long __start_syscalls_metadata[];
extern unsigned long __stop_syscalls_metadata[];

static struct syscall_metadata **syscalls_metadata;

static struct syscall_metadata *find_syscall_meta(unsigned long syscall)
{
	struct syscall_metadata *start;
	struct syscall_metadata *stop;
	char str[KSYM_SYMBOL_LEN];


	start = (struct syscall_metadata *)__start_syscalls_metadata;
	stop = (struct syscall_metadata *)__stop_syscalls_metadata;
	kallsyms_lookup(syscall, NULL, NULL, NULL, str);

	for ( ; start < stop; start++) {
		/*
		 * Only compare after the "sys" prefix. Archs that use
		 * syscall wrappers may have syscalls symbols aliases prefixed
		 * with "SyS" instead of "sys", leading to an unwanted
		 * mismatch.
		 */
		if (start->name && !strcmp(start->name + 3, str + 3))
			return start;
	}
	return NULL;
}

static struct syscall_metadata *syscall_nr_to_meta(int nr)
{
	if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
		return NULL;

	return syscalls_metadata[nr];
}

54
static int syscall_name_to_nr(const char *name)
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69
{
	int i;

	if (!syscalls_metadata)
		return -1;

	for (i = 0; i < NR_syscalls; i++) {
		if (syscalls_metadata[i]) {
			if (!strcmp(syscalls_metadata[i]->name, name))
				return i;
		}
	}
	return -1;
}

70 71 72 73 74 75 76 77 78
enum print_line_t
print_syscall_enter(struct trace_iterator *iter, int flags)
{
	struct trace_seq *s = &iter->seq;
	struct trace_entry *ent = iter->ent;
	struct syscall_trace_enter *trace;
	struct syscall_metadata *entry;
	int i, ret, syscall;

79
	trace = (typeof(trace))ent;
80 81
	syscall = trace->nr;
	entry = syscall_nr_to_meta(syscall);
82

83 84 85
	if (!entry)
		goto end;

86
	if (entry->enter_event->id != ent->type) {
87 88 89 90
		WARN_ON_ONCE(1);
		goto end;
	}

91 92 93 94 95 96
	ret = trace_seq_printf(s, "%s(", entry->name);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	for (i = 0; i < entry->nb_args; i++) {
		/* parameter types */
97
		if (trace_flags & TRACE_ITER_VERBOSE) {
98 99 100 101 102
			ret = trace_seq_printf(s, "%s ", entry->types[i]);
			if (!ret)
				return TRACE_TYPE_PARTIAL_LINE;
		}
		/* parameter values */
103
		ret = trace_seq_printf(s, "%s: %lx%s", entry->args[i],
104
				       trace->args[i],
105
				       i == entry->nb_args - 1 ? "" : ", ");
106 107 108 109
		if (!ret)
			return TRACE_TYPE_PARTIAL_LINE;
	}

110 111 112 113
	ret = trace_seq_putc(s, ')');
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

114
end:
115 116 117 118
	ret =  trace_seq_putc(s, '\n');
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

119 120 121 122 123 124 125 126 127 128 129 130 131
	return TRACE_TYPE_HANDLED;
}

enum print_line_t
print_syscall_exit(struct trace_iterator *iter, int flags)
{
	struct trace_seq *s = &iter->seq;
	struct trace_entry *ent = iter->ent;
	struct syscall_trace_exit *trace;
	int syscall;
	struct syscall_metadata *entry;
	int ret;

132
	trace = (typeof(trace))ent;
133 134
	syscall = trace->nr;
	entry = syscall_nr_to_meta(syscall);
135

136 137 138 139 140
	if (!entry) {
		trace_seq_printf(s, "\n");
		return TRACE_TYPE_HANDLED;
	}

141
	if (entry->exit_event->id != ent->type) {
142 143 144 145
		WARN_ON_ONCE(1);
		return TRACE_TYPE_UNHANDLED;
	}

146 147 148 149 150 151 152 153
	ret = trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
				trace->ret);
	if (!ret)
		return TRACE_TYPE_PARTIAL_LINE;

	return TRACE_TYPE_HANDLED;
}

154 155 156 157 158
extern char *__bad_type_size(void);

#define SYSCALL_FIELD(type, name)					\
	sizeof(type) != sizeof(trace.name) ?				\
		__bad_type_size() :					\
159 160
		#type, #name, offsetof(typeof(trace), name),		\
		sizeof(trace.name), is_signed_type(type)
161

162
int syscall_enter_format(struct ftrace_event_call *call, struct trace_seq *s)
163 164
{
	int i;
165
	int ret;
166
	struct syscall_metadata *entry = call->data;
167 168
	struct syscall_trace_enter trace;
	int offset = offsetof(struct syscall_trace_enter, args);
169

170 171
	ret = trace_seq_printf(s, "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
			       "\tsigned:%u;\n",
172 173 174
			       SYSCALL_FIELD(int, nr));
	if (!ret)
		return 0;
175 176 177 178 179 180

	for (i = 0; i < entry->nb_args; i++) {
		ret = trace_seq_printf(s, "\tfield:%s %s;", entry->types[i],
				        entry->args[i]);
		if (!ret)
			return 0;
181 182 183 184
		ret = trace_seq_printf(s, "\toffset:%d;\tsize:%zu;"
				       "\tsigned:%u;\n", offset,
				       sizeof(unsigned long),
				       is_signed_type(unsigned long));
185 186 187 188 189
		if (!ret)
			return 0;
		offset += sizeof(unsigned long);
	}

190
	trace_seq_puts(s, "\nprint fmt: \"");
191
	for (i = 0; i < entry->nb_args; i++) {
192
		ret = trace_seq_printf(s, "%s: 0x%%0%zulx%s", entry->args[i],
193
				        sizeof(unsigned long),
194
					i == entry->nb_args - 1 ? "" : ", ");
195 196 197
		if (!ret)
			return 0;
	}
198
	trace_seq_putc(s, '"');
199 200

	for (i = 0; i < entry->nb_args; i++) {
201 202
		ret = trace_seq_printf(s, ", ((unsigned long)(REC->%s))",
				       entry->args[i]);
203 204 205 206
		if (!ret)
			return 0;
	}

207
	return trace_seq_putc(s, '\n');
208 209
}

210 211 212 213 214 215
int syscall_exit_format(struct ftrace_event_call *call, struct trace_seq *s)
{
	int ret;
	struct syscall_trace_exit trace;

	ret = trace_seq_printf(s,
216 217 218 219
			       "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
			       "\tsigned:%u;\n"
			       "\tfield:%s %s;\toffset:%zu;\tsize:%zu;"
			       "\tsigned:%u;\n",
220
			       SYSCALL_FIELD(int, nr),
221
			       SYSCALL_FIELD(long, ret));
222 223 224 225 226 227
	if (!ret)
		return 0;

	return trace_seq_printf(s, "\nprint fmt: \"0x%%lx\", REC->ret\n");
}

228 229 230
int syscall_enter_define_fields(struct ftrace_event_call *call)
{
	struct syscall_trace_enter trace;
231
	struct syscall_metadata *meta = call->data;
232 233 234 235 236 237 238 239
	int ret;
	int i;
	int offset = offsetof(typeof(trace), args);

	ret = trace_define_common_fields(call);
	if (ret)
		return ret;

240 241 242 243
	ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
	if (ret)
		return ret;

244
	for (i = 0; i < meta->nb_args; i++) {
245 246
		ret = trace_define_field(call, meta->types[i],
					 meta->args[i], offset,
247 248
					 sizeof(unsigned long), 0,
					 FILTER_OTHER);
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263
		offset += sizeof(unsigned long);
	}

	return ret;
}

int syscall_exit_define_fields(struct ftrace_event_call *call)
{
	struct syscall_trace_exit trace;
	int ret;

	ret = trace_define_common_fields(call);
	if (ret)
		return ret;

264 265 266 267
	ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
	if (ret)
		return ret;

268
	ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
269
				 FILTER_OTHER);
270 271 272 273

	return ret;
}

274
void ftrace_syscall_enter(struct pt_regs *regs, long id)
275
{
276 277 278
	struct syscall_trace_enter *entry;
	struct syscall_metadata *sys_data;
	struct ring_buffer_event *event;
279
	struct ring_buffer *buffer;
280
	int size;
281 282 283
	int syscall_nr;

	syscall_nr = syscall_get_nr(current, regs);
284 285
	if (syscall_nr < 0)
		return;
286 287
	if (!test_bit(syscall_nr, enabled_enter_syscalls))
		return;
288

289 290 291 292 293 294
	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

	size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;

295 296
	event = trace_current_buffer_lock_reserve(&buffer,
			sys_data->enter_event->id, size, 0, 0);
297 298 299 300 301 302 303
	if (!event)
		return;

	entry = ring_buffer_event_data(event);
	entry->nr = syscall_nr;
	syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);

304 305 306
	if (!filter_current_check_discard(buffer, sys_data->enter_event,
					  entry, event))
		trace_current_buffer_unlock_commit(buffer, event, 0, 0);
307 308
}

309
void ftrace_syscall_exit(struct pt_regs *regs, long ret)
310
{
311 312 313
	struct syscall_trace_exit *entry;
	struct syscall_metadata *sys_data;
	struct ring_buffer_event *event;
314
	struct ring_buffer *buffer;
315 316 317
	int syscall_nr;

	syscall_nr = syscall_get_nr(current, regs);
318 319
	if (syscall_nr < 0)
		return;
320 321
	if (!test_bit(syscall_nr, enabled_exit_syscalls))
		return;
322

323 324 325 326
	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

327 328
	event = trace_current_buffer_lock_reserve(&buffer,
			sys_data->exit_event->id, sizeof(*entry), 0, 0);
329 330 331 332 333 334 335
	if (!event)
		return;

	entry = ring_buffer_event_data(event);
	entry->nr = syscall_nr;
	entry->ret = syscall_get_return_value(current, regs);

336 337 338
	if (!filter_current_check_discard(buffer, sys_data->exit_event,
					  entry, event))
		trace_current_buffer_unlock_commit(buffer, event, 0, 0);
339 340
}

341
int reg_event_syscall_enter(struct ftrace_event_call *call)
342
{
343 344 345
	int ret = 0;
	int num;

346
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
347
	if (num < 0 || num >= NR_syscalls)
348 349 350
		return -ENOSYS;
	mutex_lock(&syscall_trace_lock);
	if (!sys_refcount_enter)
351
		ret = register_trace_sys_enter(ftrace_syscall_enter);
352 353 354 355 356 357 358 359 360
	if (ret) {
		pr_info("event trace: Could not activate"
				"syscall entry trace point");
	} else {
		set_bit(num, enabled_enter_syscalls);
		sys_refcount_enter++;
	}
	mutex_unlock(&syscall_trace_lock);
	return ret;
361 362
}

363
void unreg_event_syscall_enter(struct ftrace_event_call *call)
364
{
365
	int num;
366

367
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
368
	if (num < 0 || num >= NR_syscalls)
369 370 371 372 373
		return;
	mutex_lock(&syscall_trace_lock);
	sys_refcount_enter--;
	clear_bit(num, enabled_enter_syscalls);
	if (!sys_refcount_enter)
374
		unregister_trace_sys_enter(ftrace_syscall_enter);
375 376
	mutex_unlock(&syscall_trace_lock);
}
377

378
int reg_event_syscall_exit(struct ftrace_event_call *call)
379
{
380 381 382
	int ret = 0;
	int num;

383
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
384
	if (num < 0 || num >= NR_syscalls)
385 386 387
		return -ENOSYS;
	mutex_lock(&syscall_trace_lock);
	if (!sys_refcount_exit)
388
		ret = register_trace_sys_exit(ftrace_syscall_exit);
389 390 391 392 393 394
	if (ret) {
		pr_info("event trace: Could not activate"
				"syscall exit trace point");
	} else {
		set_bit(num, enabled_exit_syscalls);
		sys_refcount_exit++;
395
	}
396 397 398
	mutex_unlock(&syscall_trace_lock);
	return ret;
}
399

400
void unreg_event_syscall_exit(struct ftrace_event_call *call)
401 402
{
	int num;
403

404
	num = ((struct syscall_metadata *)call->data)->syscall_nr;
405
	if (num < 0 || num >= NR_syscalls)
406 407 408 409 410
		return;
	mutex_lock(&syscall_trace_lock);
	sys_refcount_exit--;
	clear_bit(num, enabled_exit_syscalls);
	if (!sys_refcount_exit)
411
		unregister_trace_sys_exit(ftrace_syscall_exit);
412
	mutex_unlock(&syscall_trace_lock);
413
}
414

415 416 417 418 419 420 421 422 423 424 425 426
int init_syscall_trace(struct ftrace_event_call *call)
{
	int id;

	id = register_ftrace_event(call->event);
	if (!id)
		return -ENODEV;
	call->id = id;
	INIT_LIST_HEAD(&call->fields);
	return 0;
}

427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
int __init init_ftrace_syscalls(void)
{
	struct syscall_metadata *meta;
	unsigned long addr;
	int i;

	syscalls_metadata = kzalloc(sizeof(*syscalls_metadata) *
					NR_syscalls, GFP_KERNEL);
	if (!syscalls_metadata) {
		WARN_ON(1);
		return -ENOMEM;
	}

	for (i = 0; i < NR_syscalls; i++) {
		addr = arch_syscall_addr(i);
		meta = find_syscall_meta(addr);
443 444 445 446
		if (!meta)
			continue;

		meta->syscall_nr = i;
447 448 449 450 451 452 453
		syscalls_metadata[i] = meta;
	}

	return 0;
}
core_initcall(init_ftrace_syscalls);

454
#ifdef CONFIG_EVENT_PROFILE
455

456 457
static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls);
static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls);
458 459 460 461 462 463
static int sys_prof_refcount_enter;
static int sys_prof_refcount_exit;

static void prof_syscall_enter(struct pt_regs *regs, long id)
{
	struct syscall_metadata *sys_data;
464 465
	struct syscall_trace_enter *rec;
	unsigned long flags;
466
	char *trace_buf;
467
	char *raw_data;
468
	int syscall_nr;
469
	int rctx;
470
	int size;
471
	int cpu;
472 473 474 475 476 477 478 479 480

	syscall_nr = syscall_get_nr(current, regs);
	if (!test_bit(syscall_nr, enabled_prof_enter_syscalls))
		return;

	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

481 482 483 484 485
	/* get the size after alignment with the u32 buffer size field */
	size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
	size = ALIGN(size + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);

486 487 488 489 490 491
	if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
		      "profile buffer not large enough"))
		return;

	/* Protect the per cpu buffer, begin the rcu read side */
	local_irq_save(flags);
492

493 494
	rctx = perf_swevent_get_recursion_context();
	if (rctx < 0)
495 496
		goto end_recursion;

497 498
	cpu = smp_processor_id();

499
	trace_buf = rcu_dereference(perf_trace_buf);
500

501
	if (!trace_buf)
502
		goto end;
503

504
	raw_data = per_cpu_ptr(trace_buf, cpu);
505 506 507 508 509 510

	/* zero the dead bytes from align to not leak stack to user */
	*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;

	rec = (struct syscall_trace_enter *) raw_data;
	tracing_generic_entry_update(&rec->ent, 0, 0);
511
	rec->ent.type = sys_data->enter_event->id;
512 513 514
	rec->nr = syscall_nr;
	syscall_get_arguments(current, regs, 0, sys_data->nb_args,
			       (unsigned long *)&rec->args);
515
	perf_tp_event(sys_data->enter_event->id, 0, 1, rec, size);
516 517

end:
518
	perf_swevent_put_recursion_context(rctx);
519
end_recursion:
520
	local_irq_restore(flags);
521 522 523 524 525 526 527 528
}

int reg_prof_syscall_enter(char *name)
{
	int ret = 0;
	int num;

	num = syscall_name_to_nr(name);
529
	if (num < 0 || num >= NR_syscalls)
530 531 532 533
		return -ENOSYS;

	mutex_lock(&syscall_trace_lock);
	if (!sys_prof_refcount_enter)
534
		ret = register_trace_sys_enter(prof_syscall_enter);
535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550
	if (ret) {
		pr_info("event trace: Could not activate"
				"syscall entry trace point");
	} else {
		set_bit(num, enabled_prof_enter_syscalls);
		sys_prof_refcount_enter++;
	}
	mutex_unlock(&syscall_trace_lock);
	return ret;
}

void unreg_prof_syscall_enter(char *name)
{
	int num;

	num = syscall_name_to_nr(name);
551
	if (num < 0 || num >= NR_syscalls)
552 553 554 555 556 557
		return;

	mutex_lock(&syscall_trace_lock);
	sys_prof_refcount_enter--;
	clear_bit(num, enabled_prof_enter_syscalls);
	if (!sys_prof_refcount_enter)
558
		unregister_trace_sys_enter(prof_syscall_enter);
559 560 561 562 563 564
	mutex_unlock(&syscall_trace_lock);
}

static void prof_syscall_exit(struct pt_regs *regs, long ret)
{
	struct syscall_metadata *sys_data;
565 566
	struct syscall_trace_exit *rec;
	unsigned long flags;
567
	int syscall_nr;
568
	char *trace_buf;
569
	char *raw_data;
570
	int rctx;
571 572
	int size;
	int cpu;
573 574 575 576 577 578 579 580 581

	syscall_nr = syscall_get_nr(current, regs);
	if (!test_bit(syscall_nr, enabled_prof_exit_syscalls))
		return;

	sys_data = syscall_nr_to_meta(syscall_nr);
	if (!sys_data)
		return;

582 583 584
	/* We can probably do that at build time */
	size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
	size -= sizeof(u32);
585

586 587 588 589 590 591 592 593 594 595
	/*
	 * Impossible, but be paranoid with the future
	 * How to put this check outside runtime?
	 */
	if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
		"exit event has grown above profile buffer size"))
		return;

	/* Protect the per cpu buffer, begin the rcu read side */
	local_irq_save(flags);
596

597 598
	rctx = perf_swevent_get_recursion_context();
	if (rctx < 0)
599 600
		goto end_recursion;

601 602
	cpu = smp_processor_id();

603
	trace_buf = rcu_dereference(perf_trace_buf);
604

605
	if (!trace_buf)
606 607
		goto end;

608
	raw_data = per_cpu_ptr(trace_buf, cpu);
609 610 611 612 613 614 615

	/* zero the dead bytes from align to not leak stack to user */
	*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;

	rec = (struct syscall_trace_exit *)raw_data;

	tracing_generic_entry_update(&rec->ent, 0, 0);
616
	rec->ent.type = sys_data->exit_event->id;
617 618 619
	rec->nr = syscall_nr;
	rec->ret = syscall_get_return_value(current, regs);

620
	perf_tp_event(sys_data->exit_event->id, 0, 1, rec, size);
621 622

end:
623
	perf_swevent_put_recursion_context(rctx);
624
end_recursion:
625
	local_irq_restore(flags);
626 627 628 629 630 631 632 633
}

int reg_prof_syscall_exit(char *name)
{
	int ret = 0;
	int num;

	num = syscall_name_to_nr(name);
634
	if (num < 0 || num >= NR_syscalls)
635 636 637 638
		return -ENOSYS;

	mutex_lock(&syscall_trace_lock);
	if (!sys_prof_refcount_exit)
639
		ret = register_trace_sys_exit(prof_syscall_exit);
640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655
	if (ret) {
		pr_info("event trace: Could not activate"
				"syscall entry trace point");
	} else {
		set_bit(num, enabled_prof_exit_syscalls);
		sys_prof_refcount_exit++;
	}
	mutex_unlock(&syscall_trace_lock);
	return ret;
}

void unreg_prof_syscall_exit(char *name)
{
	int num;

	num = syscall_name_to_nr(name);
656
	if (num < 0 || num >= NR_syscalls)
657 658 659 660 661 662
		return;

	mutex_lock(&syscall_trace_lock);
	sys_prof_refcount_exit--;
	clear_bit(num, enabled_prof_exit_syscalls);
	if (!sys_prof_refcount_exit)
663
		unregister_trace_sys_exit(prof_syscall_exit);
664 665 666 667 668 669
	mutex_unlock(&syscall_trace_lock);
}

#endif