cpuacct.c 15.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7
/*
 * CPU accounting code for task groups.
 *
 * Based on the work by Paul Menage (menage@google.com) and Balbir Singh
 * (balbir@in.ibm.com).
 */
8
#include "sched.h"
9

10
/* Time spent by the tasks of the CPU accounting group executing in ... */
L
Li Zefan 已提交
11 12 13 14 15 16 17
enum cpuacct_stat_index {
	CPUACCT_STAT_USER,	/* ... user mode */
	CPUACCT_STAT_SYSTEM,	/* ... kernel mode */

	CPUACCT_STAT_NSTATS,
};

18 19 20
static const char * const cpuacct_stat_desc[] = {
	[CPUACCT_STAT_USER] = "user",
	[CPUACCT_STAT_SYSTEM] = "system",
21 22 23
};

struct cpuacct_usage {
24
	u64	usages[CPUACCT_STAT_NSTATS];
25 26 27
	struct prev_cputime prev_cputime1; /* utime and stime */
	struct prev_cputime prev_cputime2; /* user and nice */
} ____cacheline_aligned;
28

29 30 31 32 33 34 35
#ifdef CONFIG_SCHED_SLI
/* Maintain various statistics */
struct cpuacct_alistats {
	u64		nr_migrations;
} ____cacheline_aligned;
#endif

36
/* track CPU usage of a group of tasks and its child groups */
L
Li Zefan 已提交
37
struct cpuacct {
38 39 40
	struct cgroup_subsys_state	css;
	/* cpuusage holds pointer to a u64-type object on every CPU */
	struct cpuacct_usage __percpu	*cpuusage;
41 42 43
#ifdef CONFIG_SCHED_SLI
	struct cpuacct_alistats __percpu *alistats;
#endif
44
	struct kernel_cpustat __percpu	*cpustat;
45 46 47 48 49

	ALI_HOTFIX_RESERVE(1)
	ALI_HOTFIX_RESERVE(2)
	ALI_HOTFIX_RESERVE(3)
	ALI_HOTFIX_RESERVE(4)
L
Li Zefan 已提交
50 51
};

52 53 54 55 56
static inline struct cpuacct *css_ca(struct cgroup_subsys_state *css)
{
	return css ? container_of(css, struct cpuacct, css) : NULL;
}

57
/* Return CPU accounting group to which this task belongs */
L
Li Zefan 已提交
58 59
static inline struct cpuacct *task_ca(struct task_struct *tsk)
{
60
	return css_ca(task_css(tsk, cpuacct_cgrp_id));
L
Li Zefan 已提交
61 62 63 64
}

static inline struct cpuacct *parent_ca(struct cpuacct *ca)
{
T
Tejun Heo 已提交
65
	return css_ca(ca->css.parent);
L
Li Zefan 已提交
66 67
}

68
static DEFINE_PER_CPU(struct cpuacct_usage, root_cpuacct_cpuusage);
69 70 71
#ifdef CONFIG_SCHED_SLI
static DEFINE_PER_CPU(struct cpuacct_alistats, root_alistats);
#endif
72 73 74
static struct cpuacct root_cpuacct = {
	.cpustat	= &kernel_cpustat,
	.cpuusage	= &root_cpuacct_cpuusage,
75 76 77
#ifdef CONFIG_SCHED_SLI
	.alistats	= &root_alistats,
#endif
78
};
79

80 81 82 83 84 85 86 87 88 89
void task_ca_increase_nr_migrations(struct task_struct *tsk)
{
	struct cpuacct *ca;

	rcu_read_lock();
	ca = task_ca(tsk);
	this_cpu_ptr(ca->alistats)->nr_migrations++;
	rcu_read_unlock();
}

90
/* Create a new CPU accounting group */
91 92
static struct cgroup_subsys_state *
cpuacct_css_alloc(struct cgroup_subsys_state *parent_css)
93 94
{
	struct cpuacct *ca;
95
	int i;
96

97
	if (!parent_css)
98 99 100 101 102 103
		return &root_cpuacct.css;

	ca = kzalloc(sizeof(*ca), GFP_KERNEL);
	if (!ca)
		goto out;

104
	ca->cpuusage = alloc_percpu(struct cpuacct_usage);
105 106 107 108 109 110 111
	if (!ca->cpuusage)
		goto out_free_ca;

	ca->cpustat = alloc_percpu(struct kernel_cpustat);
	if (!ca->cpustat)
		goto out_free_cpuusage;

112 113 114 115 116 117
#ifdef CONFIG_SCHED_SLI
	ca->alistats = alloc_percpu(struct cpuacct_alistats);
	if (!ca->alistats)
		goto out_free_cpustat;
#endif

118 119 120 121 122
	for_each_possible_cpu(i) {
		prev_cputime_init(&per_cpu_ptr(ca->cpuusage, i)->prev_cputime1);
		prev_cputime_init(&per_cpu_ptr(ca->cpuusage, i)->prev_cputime2);
	}

123 124
	return &ca->css;

125 126 127 128
#ifdef CONFIG_SCHED_SLI
out_free_cpustat:
	free_percpu(ca->cpustat);
#endif
129 130 131 132 133 134 135 136
out_free_cpuusage:
	free_percpu(ca->cpuusage);
out_free_ca:
	kfree(ca);
out:
	return ERR_PTR(-ENOMEM);
}

137
/* Destroy an existing CPU accounting group */
138
static void cpuacct_css_free(struct cgroup_subsys_state *css)
139
{
140
	struct cpuacct *ca = css_ca(css);
141 142 143

	free_percpu(ca->cpustat);
	free_percpu(ca->cpuusage);
144 145 146
#ifdef CONFIG_SCHED_SLI
	free_percpu(ca->alistats);
#endif
147 148 149
	kfree(ca);
}

150
static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu,
151
				 enum cpuacct_stat_index index)
152
{
153
	struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
154 155
	u64 data;

156
	/*
157
	 * We allow index == CPUACCT_STAT_NSTATS here to read
158 159
	 * the sum of suages.
	 */
160
	BUG_ON(index > CPUACCT_STAT_NSTATS);
161

162 163 164 165 166
#ifndef CONFIG_64BIT
	/*
	 * Take rq->lock to make 64-bit read safe on 32-bit platforms.
	 */
	raw_spin_lock_irq(&cpu_rq(cpu)->lock);
167 168
#endif

169
	if (index == CPUACCT_STAT_NSTATS) {
170 171 172
		int i = 0;

		data = 0;
173
		for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
174 175 176 177 178 179
			data += cpuusage->usages[i];
	} else {
		data = cpuusage->usages[index];
	}

#ifndef CONFIG_64BIT
180 181 182 183 184 185 186 187
	raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
#endif

	return data;
}

static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
{
188 189
	struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
	int i;
190 191 192 193 194 195

#ifndef CONFIG_64BIT
	/*
	 * Take rq->lock to make 64-bit write safe on 32-bit platforms.
	 */
	raw_spin_lock_irq(&cpu_rq(cpu)->lock);
196 197
#endif

198
	for (i = 0; i < CPUACCT_STAT_NSTATS; i++)
199 200 201
		cpuusage->usages[i] = val;

#ifndef CONFIG_64BIT
202 203 204 205
	raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
#endif
}

206
/* Return total CPU usage (in nanoseconds) of a group */
207
static u64 __cpuusage_read(struct cgroup_subsys_state *css,
208
			   enum cpuacct_stat_index index)
209
{
210
	struct cpuacct *ca = css_ca(css);
211 212 213
	u64 totalcpuusage = 0;
	int i;

214
	for_each_possible_cpu(i)
215
		totalcpuusage += cpuacct_cpuusage_read(ca, i, index);
216 217 218 219

	return totalcpuusage;
}

220 221 222
static u64 cpuusage_user_read(struct cgroup_subsys_state *css,
			      struct cftype *cft)
{
223
	return __cpuusage_read(css, CPUACCT_STAT_USER);
224 225 226 227 228
}

static u64 cpuusage_sys_read(struct cgroup_subsys_state *css,
			     struct cftype *cft)
{
229
	return __cpuusage_read(css, CPUACCT_STAT_SYSTEM);
230 231 232 233
}

static u64 cpuusage_read(struct cgroup_subsys_state *css, struct cftype *cft)
{
234
	return __cpuusage_read(css, CPUACCT_STAT_NSTATS);
235 236
}

237
static int cpuusage_write(struct cgroup_subsys_state *css, struct cftype *cft,
238
			  u64 val)
239
{
240
	struct cpuacct *ca = css_ca(css);
241
	int cpu;
242

243 244 245
	/*
	 * Only allow '0' here to do a reset.
	 */
246 247
	if (val)
		return -EINVAL;
248

249 250
	for_each_possible_cpu(cpu)
		cpuacct_cpuusage_write(ca, cpu, 0);
251

252
	return 0;
253 254
}

255
static int __cpuacct_percpu_seq_show(struct seq_file *m,
256
				     enum cpuacct_stat_index index)
257
{
258
	struct cpuacct *ca = css_ca(seq_css(m));
259 260 261
	u64 percpu;
	int i;

262
	for_each_possible_cpu(i) {
263
		percpu = cpuacct_cpuusage_read(ca, i, index);
264 265 266 267 268 269
		seq_printf(m, "%llu ", (unsigned long long) percpu);
	}
	seq_printf(m, "\n");
	return 0;
}

270 271
static int cpuacct_percpu_user_seq_show(struct seq_file *m, void *V)
{
272
	return __cpuacct_percpu_seq_show(m, CPUACCT_STAT_USER);
273 274 275 276
}

static int cpuacct_percpu_sys_seq_show(struct seq_file *m, void *V)
{
277
	return __cpuacct_percpu_seq_show(m, CPUACCT_STAT_SYSTEM);
278 279 280 281
}

static int cpuacct_percpu_seq_show(struct seq_file *m, void *V)
{
282
	return __cpuacct_percpu_seq_show(m, CPUACCT_STAT_NSTATS);
283 284
}

285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320
static int cpuacct_all_seq_show(struct seq_file *m, void *V)
{
	struct cpuacct *ca = css_ca(seq_css(m));
	int index;
	int cpu;

	seq_puts(m, "cpu");
	for (index = 0; index < CPUACCT_STAT_NSTATS; index++)
		seq_printf(m, " %s", cpuacct_stat_desc[index]);
	seq_puts(m, "\n");

	for_each_possible_cpu(cpu) {
		struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);

		seq_printf(m, "%d", cpu);

		for (index = 0; index < CPUACCT_STAT_NSTATS; index++) {
#ifndef CONFIG_64BIT
			/*
			 * Take rq->lock to make 64-bit read safe on 32-bit
			 * platforms.
			 */
			raw_spin_lock_irq(&cpu_rq(cpu)->lock);
#endif

			seq_printf(m, " %llu", cpuusage->usages[index]);

#ifndef CONFIG_64BIT
			raw_spin_unlock_irq(&cpu_rq(cpu)->lock);
#endif
		}
		seq_puts(m, "\n");
	}
	return 0;
}

321
static int cpuacct_stats_show(struct seq_file *sf, void *v)
322
{
323
	struct cpuacct *ca = css_ca(seq_css(sf));
324
	s64 val[CPUACCT_STAT_NSTATS];
325
	int cpu;
326
	int stat;
327

328
	memset(val, 0, sizeof(val));
329
	for_each_possible_cpu(cpu) {
330
		u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat;
331

332 333 334 335 336
		val[CPUACCT_STAT_USER]   += cpustat[CPUTIME_USER];
		val[CPUACCT_STAT_USER]   += cpustat[CPUTIME_NICE];
		val[CPUACCT_STAT_SYSTEM] += cpustat[CPUTIME_SYSTEM];
		val[CPUACCT_STAT_SYSTEM] += cpustat[CPUTIME_IRQ];
		val[CPUACCT_STAT_SYSTEM] += cpustat[CPUTIME_SOFTIRQ];
337 338
	}

339 340 341
	for (stat = 0; stat < CPUACCT_STAT_NSTATS; stat++) {
		seq_printf(sf, "%s %lld\n",
			   cpuacct_stat_desc[stat],
342
			   (long long)nsec_to_clock_t(val[stat]));
343
	}
344 345 346 347

	return 0;
}

348 349 350 351 352 353 354 355 356 357 358
#ifdef CONFIG_SCHED_SLI
#ifndef arch_idle_time
#define arch_idle_time(cpu) 0
#endif

static inline struct task_group *cgroup_tg(struct cgroup *cgrp)
{
	return container_of(global_cgroup_css(cgrp, cpu_cgrp_id),
				struct task_group, css);
}

359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
static inline unsigned long nr_uninterruptible(void)
{
	unsigned long i, sum = 0;

	for_each_possible_cpu(i)
		sum += cpu_rq(i)->nr_uninterruptible;

	/*
	 * Since we read the counters lockless, it might be slightly
	 * inaccurate. Do not allow it to go below zero though:
	 */
	if (unlikely((long)sum < 0))
		sum = 0;

	return sum;
}

#ifdef CONFIG_CFS_BANDWIDTH
static inline bool tg_cfs_throttled(struct task_group *tg, int cpu)
{
	return tg->cfs_rq[cpu]->throttle_count;
}
#else
static inline bool tg_cfs_throttled(struct task_group *tg, int cpu)
{
	return false;
}
#endif

#ifdef CONFIG_RT_GROUP_SCHED
static inline bool tg_rt_throttled(struct task_group *tg, int cpu)
{
	return tg->rt_rq[cpu]->rt_throttled && !tg->rt_rq[cpu]->rt_nr_boosted;
}
#endif

static unsigned long ca_running(struct cpuacct *ca, int cpu)
{
	unsigned long nr_running = 0;
	struct cgroup *cgrp = ca->css.cgroup;
	struct task_group *tg;

	/* Make sure it is only called for non-root cpuacct */
	if (ca == &root_cpuacct)
		return 0;

	rcu_read_lock();
	tg = cgroup_tg(cgrp);
	if (unlikely(!tg))
		goto out;

	if (!tg_cfs_throttled(tg, cpu))
		nr_running += tg->cfs_rq[cpu]->h_nr_running;
#ifdef CONFIG_RT_GROUP_SCHED
	if (!tg_rt_throttled(tg, cpu))
		nr_running += tg->rt_rq[cpu]->rt_nr_running;
#endif
	/* SCHED_DEADLINE doesn't support cgroup yet */

out:
	rcu_read_unlock();
	return nr_running;
}

static unsigned long ca_uninterruptible(struct cpuacct *ca, int cpu)
{
	unsigned long nr = 0;
	struct cgroup *cgrp = ca->css.cgroup;
	struct task_group *tg;

	/* Make sure it is only called for non-root cpuacct */
	if (ca == &root_cpuacct)
		return nr;

	rcu_read_lock();
	tg = cgroup_tg(cgrp);
	if (unlikely(!tg))
		goto out_rcu_unlock;

	nr = tg->cfs_rq[cpu]->nr_uninterruptible;
#ifdef CONFIG_RT_GROUP_SCHED
	nr += tg->rt_rq[cpu]->nr_uninterruptible;
#endif

out_rcu_unlock:
	rcu_read_unlock();
	return nr;
}

448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490
static void __cpuacct_get_usage_result(struct cpuacct *ca, int cpu,
		struct task_group *tg, struct cpuacct_usage_result *res)
{
	struct kernel_cpustat *kcpustat;
	struct cpuacct_usage *cpuusage;
	struct task_cputime cputime;
	u64 tick_user, tick_nice, tick_sys, left, right;
	struct sched_entity *se;

	kcpustat = per_cpu_ptr(ca->cpustat, cpu);
	if (unlikely(!tg)) {
		memset(res, 0, sizeof(*res));
		return;
	}

	se = tg->se[cpu];
	cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
	tick_user = kcpustat->cpustat[CPUTIME_USER];
	tick_nice = kcpustat->cpustat[CPUTIME_NICE];
	tick_sys = kcpustat->cpustat[CPUTIME_SYSTEM];

	/* Calculate system run time */
	cputime.sum_exec_runtime = cpuusage->usages[CPUACCT_STAT_USER] +
			cpuusage->usages[CPUACCT_STAT_SYSTEM];
	cputime.utime = tick_user + tick_nice;
	cputime.stime = tick_sys;
	cputime_adjust(&cputime, &cpuusage->prev_cputime1, &left, &right);
	res->system = right;

	/* Calculate user and nice run time */
	cputime.sum_exec_runtime = left; /* user + nice */
	cputime.utime = tick_user;
	cputime.stime = tick_nice;
	cputime_adjust(&cputime, &cpuusage->prev_cputime2, &left, &right);
	res->user = left;
	res->nice = right;

	res->irq = kcpustat->cpustat[CPUTIME_IRQ];
	res->softirq = kcpustat->cpustat[CPUTIME_SOFTIRQ];
	if (se)
		res->steal = se->statistics.wait_sum;
	else
		res->steal = 0;
491 492
	res->guest = kcpustat->cpustat[CPUTIME_GUEST];
	res->guest_nice = kcpustat->cpustat[CPUTIME_GUEST_NICE];
493 494 495 496 497 498 499
}

static int cpuacct_proc_stats_show(struct seq_file *sf, void *v)
{
	struct cpuacct *ca = css_ca(seq_css(sf));
	struct cgroup *cgrp = seq_css(sf)->cgroup;
	u64 user, nice, system, idle, iowait, irq, softirq, steal, guest;
500 501
	u64 nr_migrations = 0;
	struct cpuacct_alistats *alistats;
502
	unsigned long nr_run = 0, nr_uninter = 0;
503 504 505 506 507 508 509 510 511
	int cpu;

	user = nice = system = idle = iowait =
		irq = softirq = steal = guest = 0;

	if (ca != &root_cpuacct) {
		struct cpuacct_usage_result res;

		for_each_possible_cpu(cpu) {
512 513 514
			if (!housekeeping_cpu(cpu, HK_FLAG_DOMAIN))
				continue;

515 516 517 518 519 520 521 522 523 524 525 526
			rcu_read_lock();
			__cpuacct_get_usage_result(ca, cpu,
					cgroup_tg(cgrp), &res);
			rcu_read_unlock();

			user += res.user;
			nice += res.nice;
			system += res.system;
			irq += res.irq;
			softirq += res.softirq;
			steal += res.steal;
			guest += res.guest;
527
			guest += res.guest_nice;
528 529
			iowait += res.iowait;
			idle += res.idle;
530

531 532
			alistats = per_cpu_ptr(ca->alistats, cpu);
			nr_migrations += alistats->nr_migrations;
533 534
			nr_run += ca_running(ca, cpu);
			nr_uninter += ca_uninterruptible(ca, cpu);
535 536 537 538 539 540 541 542 543 544 545 546
		}
	} else {
		struct kernel_cpustat *kcpustat;

		for_each_possible_cpu(cpu) {
			kcpustat = per_cpu_ptr(ca->cpustat, cpu);
			user += kcpustat->cpustat[CPUTIME_USER];
			nice += kcpustat->cpustat[CPUTIME_NICE];
			system += kcpustat->cpustat[CPUTIME_SYSTEM];
			irq += kcpustat->cpustat[CPUTIME_IRQ];
			softirq += kcpustat->cpustat[CPUTIME_SOFTIRQ];
			guest += kcpustat->cpustat[CPUTIME_GUEST];
547
			guest += kcpustat->cpustat[CPUTIME_GUEST_NICE];
548 549 550
			idle += get_idle_time(cpu);
			iowait += get_iowait_time(cpu);
			steal += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
551 552
			alistats = per_cpu_ptr(ca->alistats, cpu);
			nr_migrations += alistats->nr_migrations;
553
		}
554 555 556

		nr_run = nr_running();
		nr_uninter = nr_uninterruptible();
557 558 559 560 561 562 563 564 565 566 567 568
	}

	seq_printf(sf, "user %lld\n", nsec_to_clock_t(user));
	seq_printf(sf, "nice %lld\n", nsec_to_clock_t(nice));
	seq_printf(sf, "system %lld\n", nsec_to_clock_t(system));
	seq_printf(sf, "idle %lld\n", nsec_to_clock_t(idle));
	seq_printf(sf, "iowait %lld\n", nsec_to_clock_t(iowait));
	seq_printf(sf, "irq %lld\n", nsec_to_clock_t(irq));
	seq_printf(sf, "softirq %lld\n", nsec_to_clock_t(softirq));
	seq_printf(sf, "steal %lld\n", nsec_to_clock_t(steal));
	seq_printf(sf, "guest %lld\n", nsec_to_clock_t(guest));

569 570 571 572
	seq_printf(sf, "nr_running %lld\n", (u64)nr_run);
	if ((long) nr_uninter < 0)
		nr_uninter = 0;
	seq_printf(sf, "nr_uninterruptible %lld\n", (u64)nr_uninter);
573
	seq_printf(sf, "nr_migrations %lld\n", (u64)nr_migrations);
574

575 576 577 578
	return 0;
}
#endif

579 580 581 582 583 584
static struct cftype files[] = {
	{
		.name = "usage",
		.read_u64 = cpuusage_read,
		.write_u64 = cpuusage_write,
	},
585 586 587 588 589 590 591 592
	{
		.name = "usage_user",
		.read_u64 = cpuusage_user_read,
	},
	{
		.name = "usage_sys",
		.read_u64 = cpuusage_sys_read,
	},
593 594
	{
		.name = "usage_percpu",
595
		.seq_show = cpuacct_percpu_seq_show,
596
	},
597 598 599 600 601 602 603 604
	{
		.name = "usage_percpu_user",
		.seq_show = cpuacct_percpu_user_seq_show,
	},
	{
		.name = "usage_percpu_sys",
		.seq_show = cpuacct_percpu_sys_seq_show,
	},
605 606 607 608
	{
		.name = "usage_all",
		.seq_show = cpuacct_all_seq_show,
	},
609 610
	{
		.name = "stat",
611
		.seq_show = cpuacct_stats_show,
612
	},
613 614 615 616 617 618
#ifdef CONFIG_SCHED_SLI
	{
		.name = "proc_stat",
		.seq_show = cpuacct_proc_stats_show,
	},
#endif
619 620 621 622 623 624 625 626 627 628 629
	{ }	/* terminate */
};

/*
 * charge this task's execution time to its accounting group.
 *
 * called with rq->lock held.
 */
void cpuacct_charge(struct task_struct *tsk, u64 cputime)
{
	struct cpuacct *ca;
630
	int index = CPUACCT_STAT_SYSTEM;
631
	struct pt_regs *regs = task_pt_regs(tsk);
632

633
	if (regs && user_mode(regs))
634
		index = CPUACCT_STAT_USER;
635 636

	rcu_read_lock();
637

638
	for (ca = task_ca(tsk); ca; ca = parent_ca(ca))
639 640
		this_cpu_ptr(ca->cpuusage)->usages[index] += cputime;

641 642 643
	rcu_read_unlock();
}

644 645 646 647 648
/*
 * Add user/system time to cpuacct.
 *
 * Note: it's the caller that updates the account of the root cgroup.
 */
649
void cpuacct_account_field(struct task_struct *tsk, int index, u64 val)
650 651 652 653
{
	struct cpuacct *ca;

	rcu_read_lock();
654 655
	for (ca = task_ca(tsk); ca != &root_cpuacct; ca = parent_ca(ca))
		this_cpu_ptr(ca->cpustat)->cpustat[index] += val;
656 657 658
	rcu_read_unlock();
}

659
struct cgroup_subsys cpuacct_cgrp_subsys = {
660 661
	.css_alloc	= cpuacct_css_alloc,
	.css_free	= cpuacct_css_free,
662
	.legacy_cftypes	= files,
663
	.early_init	= true,
664
};
665 666

#ifdef CONFIG_PSI
667 668 669 670 671 672 673 674

static bool psi_v1_enable;
static int __init setup_psi_v1(char *str)
{
	return kstrtobool(str, &psi_v1_enable) == 0;
}
__setup("psi_v1=", setup_psi_v1);

675 676
static int __init cgroup_v1_psi_init(void)
{
677 678 679 680 681
	if (!psi_v1_enable) {
		static_branch_enable(&psi_v1_disabled);
		return 0;
	}

682 683 684 685 686 687
	cgroup_add_legacy_cftypes(&cpuacct_cgrp_subsys, cgroup_v1_psi_files);
	return 0;
}

late_initcall_sync(cgroup_v1_psi_init);
#endif