taskstats.c 13.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * taskstats.c - Export per-task statistics to userland
 *
 * Copyright (C) Shailabh Nagar, IBM Corp. 2006
 *           (C) Balbir Singh,   IBM Corp. 2006
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 */

#include <linux/kernel.h>
#include <linux/taskstats_kern.h>
21
#include <linux/tsacct_kern.h>
22
#include <linux/delayacct.h>
23
#include <linux/tsacct_kern.h>
24 25
#include <linux/cpumask.h>
#include <linux/percpu.h>
26 27 28
#include <net/genetlink.h>
#include <asm/atomic.h>

29 30 31 32 33 34
/*
 * Maximum length of a cpumask that can be specified in
 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
 */
#define TASKSTATS_CPUMASK_MAXLEN	(100+6*NR_CPUS)

35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
static DEFINE_PER_CPU(__u32, taskstats_seqnum) = { 0 };
static int family_registered;
kmem_cache_t *taskstats_cache;

static struct genl_family family = {
	.id		= GENL_ID_GENERATE,
	.name		= TASKSTATS_GENL_NAME,
	.version	= TASKSTATS_GENL_VERSION,
	.maxattr	= TASKSTATS_CMD_ATTR_MAX,
};

static struct nla_policy taskstats_cmd_get_policy[TASKSTATS_CMD_ATTR_MAX+1]
__read_mostly = {
	[TASKSTATS_CMD_ATTR_PID]  = { .type = NLA_U32 },
	[TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 },
50 51 52 53 54 55
	[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING },
	[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },};

struct listener {
	struct list_head list;
	pid_t pid;
56
	char valid;
57 58
};

59 60 61 62 63 64 65 66 67 68 69
struct listener_list {
	struct rw_semaphore sem;
	struct list_head list;
};
static DEFINE_PER_CPU(struct listener_list, listener_array);

enum actions {
	REGISTER,
	DEREGISTER,
	CPU_DONT_CARE
};
70 71 72 73 74 75 76 77 78 79

static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp,
			void **replyp, size_t size)
{
	struct sk_buff *skb;
	void *reply;

	/*
	 * If new attributes are added, please revisit this allocation
	 */
80
	skb = nlmsg_new(genlmsg_total_size(size), GFP_KERNEL);
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104
	if (!skb)
		return -ENOMEM;

	if (!info) {
		int seq = get_cpu_var(taskstats_seqnum)++;
		put_cpu_var(taskstats_seqnum);

		reply = genlmsg_put(skb, 0, seq,
				family.id, 0, 0,
				cmd, family.version);
	} else
		reply = genlmsg_put(skb, info->snd_pid, info->snd_seq,
				family.id, 0, 0,
				cmd, family.version);
	if (reply == NULL) {
		nlmsg_free(skb);
		return -EINVAL;
	}

	*skbp = skb;
	*replyp = reply;
	return 0;
}

105 106 107 108
/*
 * Send taskstats data in @skb to listener with nl_pid @pid
 */
static int send_reply(struct sk_buff *skb, pid_t pid)
109 110
{
	struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
111
	void *reply = genlmsg_data(genlhdr);
112 113 114 115 116 117 118 119 120 121 122
	int rc;

	rc = genlmsg_end(skb, reply);
	if (rc < 0) {
		nlmsg_free(skb);
		return rc;
	}

	return genlmsg_unicast(skb, pid);
}

123 124 125
/*
 * Send taskstats data in @skb to listeners registered for @cpu's exit data
 */
126
static void send_cpu_listeners(struct sk_buff *skb, unsigned int cpu)
127 128 129 130 131 132
{
	struct genlmsghdr *genlhdr = nlmsg_data((struct nlmsghdr *)skb->data);
	struct listener_list *listeners;
	struct listener *s, *tmp;
	struct sk_buff *skb_next, *skb_cur = skb;
	void *reply = genlmsg_data(genlhdr);
133
	int rc, delcount = 0;
134 135 136 137

	rc = genlmsg_end(skb, reply);
	if (rc < 0) {
		nlmsg_free(skb);
138
		return;
139 140 141 142
	}

	rc = 0;
	listeners = &per_cpu(listener_array, cpu);
143
	down_read(&listeners->sem);
144
	list_for_each_entry(s, &listeners->list, list) {
145 146 147
		skb_next = NULL;
		if (!list_is_last(&s->list, &listeners->list)) {
			skb_next = skb_clone(skb_cur, GFP_KERNEL);
148
			if (!skb_next)
149 150
				break;
		}
151 152
		rc = genlmsg_unicast(skb_cur, s->pid);
		if (rc == -ECONNREFUSED) {
153 154
			s->valid = 0;
			delcount++;
155 156 157
		}
		skb_cur = skb_next;
	}
158
	up_read(&listeners->sem);
159

160 161 162
	if (skb_cur)
		nlmsg_free(skb_cur);

163
	if (!delcount)
164
		return;
165 166 167 168 169 170 171 172 173 174

	/* Delete invalidated entries */
	down_write(&listeners->sem);
	list_for_each_entry_safe(s, tmp, &listeners->list, list) {
		if (!s->valid) {
			list_del(&s->list);
			kfree(s);
		}
	}
	up_write(&listeners->sem);
175 176
}

177 178 179
static int fill_pid(pid_t pid, struct task_struct *pidtsk,
		struct taskstats *stats)
{
180
	int rc = 0;
181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198
	struct task_struct *tsk = pidtsk;

	if (!pidtsk) {
		read_lock(&tasklist_lock);
		tsk = find_task_by_pid(pid);
		if (!tsk) {
			read_unlock(&tasklist_lock);
			return -ESRCH;
		}
		get_task_struct(tsk);
		read_unlock(&tasklist_lock);
	} else
		get_task_struct(tsk);

	/*
	 * Each accounting subsystem adds calls to its functions to
	 * fill in relevant parts of struct taskstsats as follows
	 *
199
	 *	per-task-foo(stats, tsk);
200 201
	 */

202
	delayacct_add_tsk(stats, tsk);
203 204

	/* fill in basic acct fields */
205
	stats->version = TASKSTATS_VERSION;
206
	bacct_add_tsk(stats, tsk);
207

208 209 210
	/* fill in extended acct fields */
	xacct_add_tsk(stats, tsk);

211
	/* Define err: label here if needed */
212 213 214 215 216 217 218 219 220
	put_task_struct(tsk);
	return rc;

}

static int fill_tgid(pid_t tgid, struct task_struct *tgidtsk,
		struct taskstats *stats)
{
	struct task_struct *tsk, *first;
221
	unsigned long flags;
222

223 224 225 226
	/*
	 * Add additional stats from live tasks except zombie thread group
	 * leaders who are already counted with the dead tasks
	 */
227 228
	first = tgidtsk;
	if (!first) {
229
		read_lock(&tasklist_lock);
230 231 232 233 234
		first = find_task_by_pid(tgid);
		if (!first) {
			read_unlock(&tasklist_lock);
			return -ESRCH;
		}
235 236 237 238 239 240
		get_task_struct(first);
		read_unlock(&tasklist_lock);
	} else
		get_task_struct(first);


241
	tsk = first;
242
	read_lock(&tasklist_lock);
243 244 245 246 247 248 249 250
	/* Start with stats from dead tasks */
	if (first->signal) {
		spin_lock_irqsave(&first->signal->stats_lock, flags);
		if (first->signal->stats)
			memcpy(stats, first->signal->stats, sizeof(*stats));
		spin_unlock_irqrestore(&first->signal->stats_lock, flags);
	}

251
	do {
252 253
		if (tsk->exit_state == EXIT_ZOMBIE && thread_group_leader(tsk))
			continue;
254
		/*
255
		 * Accounting subsystem can call its functions here to
256 257
		 * fill in relevant parts of struct taskstsats as follows
		 *
258
		 *	per-task-foo(stats, tsk);
259
		 */
260
		delayacct_add_tsk(stats, tsk);
261

262 263
	} while_each_thread(first, tsk);
	read_unlock(&tasklist_lock);
264 265
	stats->version = TASKSTATS_VERSION;

266
	/*
267 268
	 * Accounting subsytems can also add calls here to modify
	 * fields of taskstats.
269
	 */
270
	put_task_struct(first);
271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
	return 0;
}


static void fill_tgid_exit(struct task_struct *tsk)
{
	unsigned long flags;

	spin_lock_irqsave(&tsk->signal->stats_lock, flags);
	if (!tsk->signal->stats)
		goto ret;

	/*
	 * Each accounting subsystem calls its functions here to
	 * accumalate its per-task stats for tsk, into the per-tgid structure
	 *
	 *	per-task-foo(tsk->signal->stats, tsk);
	 */
	delayacct_add_tsk(tsk->signal->stats, tsk);
ret:
	spin_unlock_irqrestore(&tsk->signal->stats_lock, flags);
	return;
293 294
}

295 296 297 298 299 300
static int add_del_listener(pid_t pid, cpumask_t *maskp, int isadd)
{
	struct listener_list *listeners;
	struct listener *s, *tmp;
	unsigned int cpu;
	cpumask_t mask = *maskp;
301

302 303 304 305 306 307 308 309 310 311 312
	if (!cpus_subset(mask, cpu_possible_map))
		return -EINVAL;

	if (isadd == REGISTER) {
		for_each_cpu_mask(cpu, mask) {
			s = kmalloc_node(sizeof(struct listener), GFP_KERNEL,
					 cpu_to_node(cpu));
			if (!s)
				goto cleanup;
			s->pid = pid;
			INIT_LIST_HEAD(&s->list);
313
			s->valid = 1;
314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362

			listeners = &per_cpu(listener_array, cpu);
			down_write(&listeners->sem);
			list_add(&s->list, &listeners->list);
			up_write(&listeners->sem);
		}
		return 0;
	}

	/* Deregister or cleanup */
cleanup:
	for_each_cpu_mask(cpu, mask) {
		listeners = &per_cpu(listener_array, cpu);
		down_write(&listeners->sem);
		list_for_each_entry_safe(s, tmp, &listeners->list, list) {
			if (s->pid == pid) {
				list_del(&s->list);
				kfree(s);
				break;
			}
		}
		up_write(&listeners->sem);
	}
	return 0;
}

static int parse(struct nlattr *na, cpumask_t *mask)
{
	char *data;
	int len;
	int ret;

	if (na == NULL)
		return 1;
	len = nla_len(na);
	if (len > TASKSTATS_CPUMASK_MAXLEN)
		return -E2BIG;
	if (len < 1)
		return -EINVAL;
	data = kmalloc(len, GFP_KERNEL);
	if (!data)
		return -ENOMEM;
	nla_strlcpy(data, na, len);
	ret = cpulist_parse(data, *mask);
	kfree(data);
	return ret;
}

static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
363 364 365 366 367 368 369
{
	int rc = 0;
	struct sk_buff *rep_skb;
	struct taskstats stats;
	void *reply;
	size_t size;
	struct nlattr *na;
370 371 372 373 374 375 376 377 378 379 380 381 382
	cpumask_t mask;

	rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], &mask);
	if (rc < 0)
		return rc;
	if (rc == 0)
		return add_del_listener(info->snd_pid, &mask, REGISTER);

	rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], &mask);
	if (rc < 0)
		return rc;
	if (rc == 0)
		return add_del_listener(info->snd_pid, &mask, DEREGISTER);
383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421

	/*
	 * Size includes space for nested attributes
	 */
	size = nla_total_size(sizeof(u32)) +
		nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);

	memset(&stats, 0, sizeof(stats));
	rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, &reply, size);
	if (rc < 0)
		return rc;

	if (info->attrs[TASKSTATS_CMD_ATTR_PID]) {
		u32 pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]);
		rc = fill_pid(pid, NULL, &stats);
		if (rc < 0)
			goto err;

		na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_PID);
		NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_PID, pid);
		NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
				stats);
	} else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) {
		u32 tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]);
		rc = fill_tgid(tgid, NULL, &stats);
		if (rc < 0)
			goto err;

		na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_TGID);
		NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_TGID, tgid);
		NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
				stats);
	} else {
		rc = -EINVAL;
		goto err;
	}

	nla_nest_end(rep_skb, na);

422
	return send_reply(rep_skb, info->snd_pid);
423 424 425 426 427 428 429 430

nla_put_failure:
	return genlmsg_cancel(rep_skb, reply);
err:
	nlmsg_free(rep_skb);
	return rc;
}

431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
void taskstats_exit_alloc(struct taskstats **ptidstats, unsigned int *mycpu)
{
	struct listener_list *listeners;
	struct taskstats *tmp;
	/*
	 * This is the cpu on which the task is exiting currently and will
	 * be the one for which the exit event is sent, even if the cpu
	 * on which this function is running changes later.
	 */
	*mycpu = raw_smp_processor_id();

	*ptidstats = NULL;
	tmp = kmem_cache_zalloc(taskstats_cache, SLAB_KERNEL);
	if (!tmp)
		return;

	listeners = &per_cpu(listener_array, *mycpu);
	down_read(&listeners->sem);
	if (!list_empty(&listeners->list)) {
		*ptidstats = tmp;
		tmp = NULL;
	}
	up_read(&listeners->sem);
	kfree(tmp);
}

457 458
/* Send pid data out on exit */
void taskstats_exit_send(struct task_struct *tsk, struct taskstats *tidstats,
459
			int group_dead, unsigned int mycpu)
460 461 462 463 464 465 466
{
	int rc;
	struct sk_buff *rep_skb;
	void *reply;
	size_t size;
	int is_thread_group;
	struct nlattr *na;
467
	unsigned long flags;
468 469 470 471

	if (!family_registered || !tidstats)
		return;

472 473 474
	spin_lock_irqsave(&tsk->signal->stats_lock, flags);
	is_thread_group = tsk->signal->stats ? 1 : 0;
	spin_unlock_irqrestore(&tsk->signal->stats_lock, flags);
475

476
	rc = 0;
477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499
	/*
	 * Size includes space for nested attributes
	 */
	size = nla_total_size(sizeof(u32)) +
		nla_total_size(sizeof(struct taskstats)) + nla_total_size(0);

	if (is_thread_group)
		size = 2 * size;	/* PID + STATS + TGID + STATS */

	rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, &reply, size);
	if (rc < 0)
		goto ret;

	rc = fill_pid(tsk->pid, tsk, tidstats);
	if (rc < 0)
		goto err_skb;

	na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_PID);
	NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_PID, (u32)tsk->pid);
	NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
			*tidstats);
	nla_nest_end(rep_skb, na);

500 501
	if (!is_thread_group)
		goto send;
502 503

	/*
504 505
	 * tsk has/had a thread group so fill the tsk->signal->stats structure
	 * Doesn't matter if tsk is the leader or the last group member leaving
506
	 */
507 508 509 510

	fill_tgid_exit(tsk);
	if (!group_dead)
		goto send;
511 512 513

	na = nla_nest_start(rep_skb, TASKSTATS_TYPE_AGGR_TGID);
	NLA_PUT_U32(rep_skb, TASKSTATS_TYPE_TGID, (u32)tsk->tgid);
514
	/* No locking needed for tsk->signal->stats since group is dead */
515
	NLA_PUT_TYPE(rep_skb, struct taskstats, TASKSTATS_TYPE_STATS,
516
			*tsk->signal->stats);
517 518
	nla_nest_end(rep_skb, na);

519
send:
520
	send_cpu_listeners(rep_skb, mycpu);
521
	return;
522 523 524 525 526 527 528 529 530 531 532 533

nla_put_failure:
	genlmsg_cancel(rep_skb, reply);
	goto ret;
err_skb:
	nlmsg_free(rep_skb);
ret:
	return;
}

static struct genl_ops taskstats_ops = {
	.cmd		= TASKSTATS_CMD_GET,
534
	.doit		= taskstats_user_cmd,
535 536 537 538 539 540
	.policy		= taskstats_cmd_get_policy,
};

/* Needed early in initialization */
void __init taskstats_init_early(void)
{
541 542
	unsigned int i;

543 544 545
	taskstats_cache = kmem_cache_create("taskstats_cache",
						sizeof(struct taskstats),
						0, SLAB_PANIC, NULL, NULL);
546 547 548 549
	for_each_possible_cpu(i) {
		INIT_LIST_HEAD(&(per_cpu(listener_array, i).list));
		init_rwsem(&(per_cpu(listener_array, i).sem));
	}
550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575
}

static int __init taskstats_init(void)
{
	int rc;

	rc = genl_register_family(&family);
	if (rc)
		return rc;

	rc = genl_register_ops(&family, &taskstats_ops);
	if (rc < 0)
		goto err;

	family_registered = 1;
	return 0;
err:
	genl_unregister_family(&family);
	return rc;
}

/*
 * late initcall ensures initialization of statistics collection
 * mechanisms precedes initialization of the taskstats interface
 */
late_initcall(taskstats_init);