cputime.c 24.6 KB
Newer Older
1 2 3 4 5
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/tsacct_kern.h>
#include <linux/kernel_stat.h>
#include <linux/static_key.h>
6
#include <linux/context_tracking.h>
7
#include "sched.h"
8 9 10
#ifdef CONFIG_PARAVIRT
#include <asm/paravirt.h>
#endif
11 12 13 14 15 16


#ifdef CONFIG_IRQ_TIME_ACCOUNTING

/*
 * There are no locks covering percpu hardirq/softirq time.
17
 * They are only modified in vtime_account, on corresponding CPU
18 19 20
 * with interrupts disabled. So, writes are safe.
 * They are read and saved off onto struct rq in update_rq_clock().
 * This may result in other CPU reading this CPU's irq time and can
21
 * race with irq/vtime_account on this CPU. We would either get old
22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49
 * or new value with a side effect of accounting a slice of irq time to wrong
 * task when irq is in progress while we read rq->clock. That is a worthy
 * compromise in place of having locks on each irq in account_system_time.
 */
DEFINE_PER_CPU(u64, cpu_hardirq_time);
DEFINE_PER_CPU(u64, cpu_softirq_time);

static DEFINE_PER_CPU(u64, irq_start_time);
static int sched_clock_irqtime;

void enable_sched_clock_irqtime(void)
{
	sched_clock_irqtime = 1;
}

void disable_sched_clock_irqtime(void)
{
	sched_clock_irqtime = 0;
}

#ifndef CONFIG_64BIT
DEFINE_PER_CPU(seqcount_t, irq_time_seq);
#endif /* CONFIG_64BIT */

/*
 * Called before incrementing preempt_count on {soft,}irq_enter
 * and before decrementing preempt_count on {soft,}irq_exit.
 */
50
void irqtime_account_irq(struct task_struct *curr)
51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75
{
	s64 delta;
	int cpu;

	if (!sched_clock_irqtime)
		return;

	cpu = smp_processor_id();
	delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time);
	__this_cpu_add(irq_start_time, delta);

	irq_time_write_begin();
	/*
	 * We do not account for softirq time from ksoftirqd here.
	 * We want to continue accounting softirq time to ksoftirqd thread
	 * in that case, so as not to confuse scheduler with a special task
	 * that do not consume any time, but still wants to run.
	 */
	if (hardirq_count())
		__this_cpu_add(cpu_hardirq_time, delta);
	else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
		__this_cpu_add(cpu_softirq_time, delta);

	irq_time_write_end();
}
76
EXPORT_SYMBOL_GPL(irqtime_account_irq);
77

78
static cputime_t irqtime_account_hi_update(cputime_t maxtime)
79 80 81
{
	u64 *cpustat = kcpustat_this_cpu->cpustat;
	unsigned long flags;
82
	cputime_t irq_cputime;
83 84

	local_irq_save(flags);
85 86 87 88
	irq_cputime = nsecs_to_cputime64(this_cpu_read(cpu_hardirq_time)) -
		      cpustat[CPUTIME_IRQ];
	irq_cputime = min(irq_cputime, maxtime);
	cpustat[CPUTIME_IRQ] += irq_cputime;
89
	local_irq_restore(flags);
90
	return irq_cputime;
91 92
}

93
static cputime_t irqtime_account_si_update(cputime_t maxtime)
94 95 96
{
	u64 *cpustat = kcpustat_this_cpu->cpustat;
	unsigned long flags;
97
	cputime_t softirq_cputime;
98 99

	local_irq_save(flags);
100 101 102 103
	softirq_cputime = nsecs_to_cputime64(this_cpu_read(cpu_softirq_time)) -
			  cpustat[CPUTIME_SOFTIRQ];
	softirq_cputime = min(softirq_cputime, maxtime);
	cpustat[CPUTIME_SOFTIRQ] += softirq_cputime;
104
	local_irq_restore(flags);
105
	return softirq_cputime;
106 107 108 109 110 111
}

#else /* CONFIG_IRQ_TIME_ACCOUNTING */

#define sched_clock_irqtime	(0)

112 113 114 115 116 117 118 119 120 121
static cputime_t irqtime_account_hi_update(cputime_t dummy)
{
	return 0;
}

static cputime_t irqtime_account_si_update(cputime_t dummy)
{
	return 0;
}

122 123 124 125 126 127 128 129 130 131 132
#endif /* !CONFIG_IRQ_TIME_ACCOUNTING */

static inline void task_group_account_field(struct task_struct *p, int index,
					    u64 tmp)
{
	/*
	 * Since all updates are sure to touch the root cgroup, we
	 * get ourselves ahead and touch it first. If the root cgroup
	 * is the only cgroup, then nothing else should be necessary.
	 *
	 */
133
	__this_cpu_add(kernel_cpustat.cpustat[index], tmp);
134

135
	cpuacct_account_field(p, index, tmp);
136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153
}

/*
 * Account user cpu time to a process.
 * @p: the process that the cpu time gets accounted to
 * @cputime: the cpu time spent in user space since the last update
 * @cputime_scaled: cputime scaled by cpu frequency
 */
void account_user_time(struct task_struct *p, cputime_t cputime,
		       cputime_t cputime_scaled)
{
	int index;

	/* Add user time to process. */
	p->utime += cputime;
	p->utimescaled += cputime_scaled;
	account_group_user_time(p, cputime);

154
	index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
155 156 157 158 159

	/* Add user time to cpustat. */
	task_group_account_field(p, index, (__force u64) cputime);

	/* Account for user time used */
160
	acct_account_cputime(p);
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180
}

/*
 * Account guest cpu time to a process.
 * @p: the process that the cpu time gets accounted to
 * @cputime: the cpu time spent in virtual machine since the last update
 * @cputime_scaled: cputime scaled by cpu frequency
 */
static void account_guest_time(struct task_struct *p, cputime_t cputime,
			       cputime_t cputime_scaled)
{
	u64 *cpustat = kcpustat_this_cpu->cpustat;

	/* Add guest time to process. */
	p->utime += cputime;
	p->utimescaled += cputime_scaled;
	account_group_user_time(p, cputime);
	p->gtime += cputime;

	/* Add guest time to cpustat. */
181
	if (task_nice(p) > 0) {
182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209
		cpustat[CPUTIME_NICE] += (__force u64) cputime;
		cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
	} else {
		cpustat[CPUTIME_USER] += (__force u64) cputime;
		cpustat[CPUTIME_GUEST] += (__force u64) cputime;
	}
}

/*
 * Account system cpu time to a process and desired cpustat field
 * @p: the process that the cpu time gets accounted to
 * @cputime: the cpu time spent in kernel space since the last update
 * @cputime_scaled: cputime scaled by cpu frequency
 * @target_cputime64: pointer to cpustat field that has to be updated
 */
static inline
void __account_system_time(struct task_struct *p, cputime_t cputime,
			cputime_t cputime_scaled, int index)
{
	/* Add system time to process. */
	p->stime += cputime;
	p->stimescaled += cputime_scaled;
	account_group_system_time(p, cputime);

	/* Add system time to cpustat. */
	task_group_account_field(p, index, (__force u64) cputime);

	/* Account for system time used */
210
	acct_account_cputime(p);
211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265
}

/*
 * Account system cpu time to a process.
 * @p: the process that the cpu time gets accounted to
 * @hardirq_offset: the offset to subtract from hardirq_count()
 * @cputime: the cpu time spent in kernel space since the last update
 * @cputime_scaled: cputime scaled by cpu frequency
 */
void account_system_time(struct task_struct *p, int hardirq_offset,
			 cputime_t cputime, cputime_t cputime_scaled)
{
	int index;

	if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) {
		account_guest_time(p, cputime, cputime_scaled);
		return;
	}

	if (hardirq_count() - hardirq_offset)
		index = CPUTIME_IRQ;
	else if (in_serving_softirq())
		index = CPUTIME_SOFTIRQ;
	else
		index = CPUTIME_SYSTEM;

	__account_system_time(p, cputime, cputime_scaled, index);
}

/*
 * Account for involuntary wait time.
 * @cputime: the cpu time spent in involuntary wait
 */
void account_steal_time(cputime_t cputime)
{
	u64 *cpustat = kcpustat_this_cpu->cpustat;

	cpustat[CPUTIME_STEAL] += (__force u64) cputime;
}

/*
 * Account for idle time.
 * @cputime: the cpu time spent in idle wait
 */
void account_idle_time(cputime_t cputime)
{
	u64 *cpustat = kcpustat_this_cpu->cpustat;
	struct rq *rq = this_rq();

	if (atomic_read(&rq->nr_iowait) > 0)
		cpustat[CPUTIME_IOWAIT] += (__force u64) cputime;
	else
		cpustat[CPUTIME_IDLE] += (__force u64) cputime;
}

266 267 268 269 270
/*
 * When a guest is interrupted for a longer amount of time, missed clock
 * ticks are not redelivered later. Due to that, this function may on
 * occasion account more time than the calling functions think elapsed.
 */
271
static __always_inline cputime_t steal_account_process_time(cputime_t maxtime)
272 273 274
{
#ifdef CONFIG_PARAVIRT
	if (static_key_false(&paravirt_steal_enabled)) {
275
		cputime_t steal_cputime;
276
		u64 steal;
277 278 279 280

		steal = paravirt_steal_clock(smp_processor_id());
		steal -= this_rq()->prev_steal_time;

281 282 283
		steal_cputime = min(nsecs_to_cputime(steal), maxtime);
		account_steal_time(steal_cputime);
		this_rq()->prev_steal_time += cputime_to_nsecs(steal_cputime);
284

285
		return steal_cputime;
286 287
	}
#endif
288
	return 0;
289 290
}

291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
/*
 * Account how much elapsed time was spent in steal, irq, or softirq time.
 */
static inline cputime_t account_other_time(cputime_t max)
{
	cputime_t accounted;

	accounted = steal_account_process_time(max);

	if (accounted < max)
		accounted += irqtime_account_hi_update(max - accounted);

	if (accounted < max)
		accounted += irqtime_account_si_update(max - accounted);

	return accounted;
}

309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328
#ifdef CONFIG_64BIT
static inline u64 read_sum_exec_runtime(struct task_struct *t)
{
	return t->se.sum_exec_runtime;
}
#else
static u64 read_sum_exec_runtime(struct task_struct *t)
{
	u64 ns;
	struct rq_flags rf;
	struct rq *rq;

	rq = task_rq_lock(t, &rf);
	ns = t->se.sum_exec_runtime;
	task_rq_unlock(rq, t, &rf);

	return ns;
}
#endif

329 330 331 332 333 334 335
/*
 * Accumulate raw cputime values of dead tasks (sig->[us]time) and live
 * tasks (sum on group iteration) belonging to @tsk's group.
 */
void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
{
	struct signal_struct *sig = tsk->signal;
336
	cputime_t utime, stime;
337
	struct task_struct *t;
338
	unsigned int seq, nextseq;
339
	unsigned long flags;
340

341 342 343 344 345 346 347 348 349 350 351
	/*
	 * Update current task runtime to account pending time since last
	 * scheduler action or thread_group_cputime() call. This thread group
	 * might have other running tasks on different CPUs, but updating
	 * their runtime can affect syscall performance, so we skip account
	 * those pending times and rely only on values updated on tick or
	 * other scheduler action.
	 */
	if (same_thread_group(current, tsk))
		(void) task_sched_runtime(current);

352
	rcu_read_lock();
353 354 355 356
	/* Attempt a lockless read on the first round. */
	nextseq = 0;
	do {
		seq = nextseq;
357
		flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
358 359 360 361 362 363 364 365
		times->utime = sig->utime;
		times->stime = sig->stime;
		times->sum_exec_runtime = sig->sum_sched_runtime;

		for_each_thread(tsk, t) {
			task_cputime(t, &utime, &stime);
			times->utime += utime;
			times->stime += stime;
366
			times->sum_exec_runtime += read_sum_exec_runtime(t);
367 368 369 370
		}
		/* If lockless access failed, take the lock. */
		nextseq = 1;
	} while (need_seqretry(&sig->stats_lock, seq));
371
	done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
372 373 374
	rcu_read_unlock();
}

375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
/*
 * Account a tick to a process and cpustat
 * @p: the process that the cpu time gets accounted to
 * @user_tick: is the tick from userspace
 * @rq: the pointer to rq
 *
 * Tick demultiplexing follows the order
 * - pending hardirq update
 * - pending softirq update
 * - user_time
 * - idle_time
 * - system time
 *   - check for guest_time
 *   - else account as system_time
 *
 * Check for hardirq is done both for system and user time as there is
 * no timer going off while we are on hardirq and hence we may never get an
 * opportunity to update it solely in system time.
 * p->stime and friends are only updated on system time and not on irq
 * softirq as those do not count in task exec_runtime any more.
 */
static void irqtime_account_process_tick(struct task_struct *p, int user_tick,
398
					 struct rq *rq, int ticks)
399
{
400 401
	u64 cputime = (__force u64) cputime_one_jiffy * ticks;
	cputime_t scaled, other;
402

403 404 405 406 407 408 409
	/*
	 * When returning from idle, many ticks can get accounted at
	 * once, including some ticks of steal, irq, and softirq time.
	 * Subtract those ticks from the amount of time accounted to
	 * idle, or potentially user or system time. Due to rounding,
	 * other time can exceed ticks occasionally.
	 */
410
	other = account_other_time(ULONG_MAX);
411
	if (other >= cputime)
412
		return;
413 414
	cputime -= other;
	scaled = cputime_to_scaled(cputime);
415

416
	if (this_cpu_ksoftirqd() == p) {
417 418 419 420 421
		/*
		 * ksoftirqd time do not get accounted in cpu_softirq_time.
		 * So, we have to handle it separately here.
		 * Also, p->stime needs to be updated for ksoftirqd.
		 */
422
		__account_system_time(p, cputime, scaled, CPUTIME_SOFTIRQ);
423
	} else if (user_tick) {
424
		account_user_time(p, cputime, scaled);
425
	} else if (p == rq->idle) {
426
		account_idle_time(cputime);
427
	} else if (p->flags & PF_VCPU) { /* System time or guest time */
428
		account_guest_time(p, cputime, scaled);
429
	} else {
430
		__account_system_time(p, cputime, scaled,	CPUTIME_SYSTEM);
431 432 433 434 435 436 437
	}
}

static void irqtime_account_idle_ticks(int ticks)
{
	struct rq *rq = this_rq();

438
	irqtime_account_process_tick(current, 0, rq, ticks);
439 440
}
#else /* CONFIG_IRQ_TIME_ACCOUNTING */
441 442
static inline void irqtime_account_idle_ticks(int ticks) {}
static inline void irqtime_account_process_tick(struct task_struct *p, int user_tick,
443
						struct rq *rq, int nr_ticks) {}
444 445 446 447 448 449
#endif /* CONFIG_IRQ_TIME_ACCOUNTING */

/*
 * Use precise platform statistics if available:
 */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
450

451
#ifndef __ARCH_HAS_VTIME_TASK_SWITCH
452
void vtime_common_task_switch(struct task_struct *prev)
453 454 455 456 457 458
{
	if (is_idle_task(prev))
		vtime_account_idle(prev);
	else
		vtime_account_system(prev);

459
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
460
	vtime_account_user(prev);
461
#endif
462 463 464
	arch_vtime_task_switch(prev);
}
#endif
465

466 467 468 469
#endif /* CONFIG_VIRT_CPU_ACCOUNTING */


#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
470 471 472
/*
 * Archs that account the whole time spent in the idle task
 * (outside irq) as idle time can rely on this and just implement
473
 * vtime_account_system() and vtime_account_idle(). Archs that
474 475 476 477 478
 * have other meaning of the idle time (s390 only includes the
 * time spent by the CPU when it's in low power mode) must override
 * vtime_account().
 */
#ifndef __ARCH_HAS_VTIME_ACCOUNT
479
void vtime_account_irq_enter(struct task_struct *tsk)
480
{
481 482 483 484
	if (!in_interrupt() && is_idle_task(tsk))
		vtime_account_idle(tsk);
	else
		vtime_account_system(tsk);
485
}
486
EXPORT_SYMBOL_GPL(vtime_account_irq_enter);
487
#endif /* __ARCH_HAS_VTIME_ACCOUNT */
488 489 490 491 492 493

void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
	*ut = p->utime;
	*st = p->stime;
}
494
EXPORT_SYMBOL_GPL(task_cputime_adjusted);
495

496 497 498
void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
	struct task_cputime cputime;
499

500 501 502 503 504 505 506 507 508 509 510 511
	thread_group_cputime(p, &cputime);

	*ut = cputime.utime;
	*st = cputime.stime;
}
#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
/*
 * Account a single tick of cpu time.
 * @p: the process that the cpu time gets accounted to
 * @user_tick: indicates if the tick is a user or a system tick
 */
void account_process_tick(struct task_struct *p, int user_tick)
512
{
513
	cputime_t cputime, scaled, steal;
514
	struct rq *rq = this_rq();
515

516
	if (vtime_accounting_cpu_enabled())
517 518 519
		return;

	if (sched_clock_irqtime) {
520
		irqtime_account_process_tick(p, user_tick, rq, 1);
521 522 523
		return;
	}

524
	cputime = cputime_one_jiffy;
525
	steal = steal_account_process_time(ULONG_MAX);
526 527

	if (steal >= cputime)
528
		return;
529

530 531 532
	cputime -= steal;
	scaled = cputime_to_scaled(cputime);

533
	if (user_tick)
534
		account_user_time(p, cputime, scaled);
535
	else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET))
536
		account_system_time(p, HARDIRQ_OFFSET, cputime, scaled);
537
	else
538
		account_idle_time(cputime);
539
}
540

541 542 543 544 545 546
/*
 * Account multiple ticks of idle time.
 * @ticks: number of stolen ticks
 */
void account_idle_ticks(unsigned long ticks)
{
547
	cputime_t cputime, steal;
548

549 550 551 552 553
	if (sched_clock_irqtime) {
		irqtime_account_idle_ticks(ticks);
		return;
	}

554
	cputime = jiffies_to_cputime(ticks);
555
	steal = steal_account_process_time(ULONG_MAX);
556 557 558 559 560 561

	if (steal >= cputime)
		return;

	cputime -= steal;
	account_idle_time(cputime);
562
}
563

564
/*
565 566
 * Perform (stime * rtime) / total, but avoid multiplication overflow by
 * loosing precision when the numbers are big.
567 568
 */
static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
569
{
570
	u64 scaled;
571

572 573
	for (;;) {
		/* Make sure "rtime" is the bigger of stime/rtime */
574 575
		if (stime > rtime)
			swap(rtime, stime);
576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597

		/* Make sure 'total' fits in 32 bits */
		if (total >> 32)
			goto drop_precision;

		/* Does rtime (and thus stime) fit in 32 bits? */
		if (!(rtime >> 32))
			break;

		/* Can we just balance rtime/stime rather than dropping bits? */
		if (stime >> 31)
			goto drop_precision;

		/* We can grow stime and shrink rtime and try to make them both fit */
		stime <<= 1;
		rtime >>= 1;
		continue;

drop_precision:
		/* We drop from rtime, it has more bits than stime */
		rtime >>= 1;
		total >>= 1;
598
	}
599

600 601 602 603 604
	/*
	 * Make sure gcc understands that this is a 32x32->64 multiply,
	 * followed by a 64/32->64 divide.
	 */
	scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total);
605
	return (__force cputime_t) scaled;
606 607
}

608
/*
609 610
 * Adjust tick based cputime random precision against scheduler runtime
 * accounting.
611
 *
612 613 614 615 616 617 618 619 620 621 622 623 624 625 626
 * Tick based cputime accounting depend on random scheduling timeslices of a
 * task to be interrupted or not by the timer.  Depending on these
 * circumstances, the number of these interrupts may be over or
 * under-optimistic, matching the real user and system cputime with a variable
 * precision.
 *
 * Fix this by scaling these tick based values against the total runtime
 * accounted by the CFS scheduler.
 *
 * This code provides the following guarantees:
 *
 *   stime + utime == rtime
 *   stime_i+1 >= stime_i, utime_i+1 >= utime_i
 *
 * Assuming that rtime_i+1 >= rtime_i.
627
 */
628
static void cputime_adjust(struct task_cputime *curr,
629
			   struct prev_cputime *prev,
630
			   cputime_t *ut, cputime_t *st)
631
{
632
	cputime_t rtime, stime, utime;
633
	unsigned long flags;
634

635 636
	/* Serialize concurrent callers such that we can honour our guarantees */
	raw_spin_lock_irqsave(&prev->lock, flags);
637
	rtime = nsecs_to_cputime(curr->sum_exec_runtime);
638

639
	/*
640 641 642 643 644 645
	 * This is possible under two circumstances:
	 *  - rtime isn't monotonic after all (a bug);
	 *  - we got reordered by the lock.
	 *
	 * In both cases this acts as a filter such that the rest of the code
	 * can assume it is monotonic regardless of anything else.
646 647 648 649
	 */
	if (prev->stime + prev->utime >= rtime)
		goto out;

650 651 652
	stime = curr->stime;
	utime = curr->utime;

653 654 655 656 657 658 659
	/*
	 * If either stime or both stime and utime are 0, assume all runtime is
	 * userspace. Once a task gets some ticks, the monotonicy code at
	 * 'update' will ensure things converge to the observed ratio.
	 */
	if (stime == 0) {
		utime = rtime;
660 661
		goto update;
	}
662

663 664
	if (utime == 0) {
		stime = rtime;
665
		goto update;
666
	}
667

668 669 670
	stime = scale_stime((__force u64)stime, (__force u64)rtime,
			    (__force u64)(stime + utime));

671
update:
672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692
	/*
	 * Make sure stime doesn't go backwards; this preserves monotonicity
	 * for utime because rtime is monotonic.
	 *
	 *  utime_i+1 = rtime_i+1 - stime_i
	 *            = rtime_i+1 - (rtime_i - utime_i)
	 *            = (rtime_i+1 - rtime_i) + utime_i
	 *            >= utime_i
	 */
	if (stime < prev->stime)
		stime = prev->stime;
	utime = rtime - stime;

	/*
	 * Make sure utime doesn't go backwards; this still preserves
	 * monotonicity for stime, analogous argument to above.
	 */
	if (utime < prev->utime) {
		utime = prev->utime;
		stime = rtime - utime;
	}
693

694 695
	prev->stime = stime;
	prev->utime = utime;
696
out:
697 698
	*ut = prev->utime;
	*st = prev->stime;
699
	raw_spin_unlock_irqrestore(&prev->lock, flags);
700
}
701

702 703 704 705 706 707
void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
{
	struct task_cputime cputime = {
		.sum_exec_runtime = p->se.sum_exec_runtime,
	};

708
	task_cputime(p, &cputime.utime, &cputime.stime);
709
	cputime_adjust(&cputime, &p->prev_cputime, ut, st);
710
}
711
EXPORT_SYMBOL_GPL(task_cputime_adjusted);
712

713
void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st)
714 715 716 717
{
	struct task_cputime cputime;

	thread_group_cputime(p, &cputime);
718
	cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
719
}
720
#endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
721 722

#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
723
static cputime_t vtime_delta(struct task_struct *tsk)
724
{
725
	unsigned long now = READ_ONCE(jiffies);
726

727
	if (time_before(now, (unsigned long)tsk->vtime_snap))
728
		return 0;
729

730
	return jiffies_to_cputime(now - tsk->vtime_snap);
731 732 733
}

static cputime_t get_vtime_delta(struct task_struct *tsk)
734
{
735
	unsigned long now = READ_ONCE(jiffies);
736
	cputime_t delta, other;
737

738 739 740 741 742 743 744
	/*
	 * Unlike tick based timing, vtime based timing never has lost
	 * ticks, and no need for steal time accounting to make up for
	 * lost ticks. Vtime accounts a rounded version of actual
	 * elapsed time. Limit account_other_time to prevent rounding
	 * errors from causing elapsed vtime to go negative.
	 */
745
	delta = jiffies_to_cputime(now - tsk->vtime_snap);
746
	other = account_other_time(delta);
747
	WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE);
748
	tsk->vtime_snap = now;
749

750
	return delta - other;
751 752
}

753 754 755 756 757 758 759
static void __vtime_account_system(struct task_struct *tsk)
{
	cputime_t delta_cpu = get_vtime_delta(tsk);

	account_system_time(tsk, irq_count(), delta_cpu, cputime_to_scaled(delta_cpu));
}

760 761
void vtime_account_system(struct task_struct *tsk)
{
762 763 764
	if (!vtime_delta(tsk))
		return;

765
	write_seqcount_begin(&tsk->vtime_seqcount);
766
	__vtime_account_system(tsk);
767
	write_seqcount_end(&tsk->vtime_seqcount);
768
}
769

770 771
void vtime_account_user(struct task_struct *tsk)
{
772 773
	cputime_t delta_cpu;

774
	write_seqcount_begin(&tsk->vtime_seqcount);
775
	tsk->vtime_snap_whence = VTIME_SYS;
776 777 778 779
	if (vtime_delta(tsk)) {
		delta_cpu = get_vtime_delta(tsk);
		account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu));
	}
780
	write_seqcount_end(&tsk->vtime_seqcount);
781 782 783 784
}

void vtime_user_enter(struct task_struct *tsk)
{
785
	write_seqcount_begin(&tsk->vtime_seqcount);
786 787
	if (vtime_delta(tsk))
		__vtime_account_system(tsk);
788
	tsk->vtime_snap_whence = VTIME_USER;
789
	write_seqcount_end(&tsk->vtime_seqcount);
790 791 792 793
}

void vtime_guest_enter(struct task_struct *tsk)
{
794 795 796 797 798 799 800
	/*
	 * The flags must be updated under the lock with
	 * the vtime_snap flush and update.
	 * That enforces a right ordering and update sequence
	 * synchronization against the reader (task_gtime())
	 * that can thus safely catch up with a tickless delta.
	 */
801
	write_seqcount_begin(&tsk->vtime_seqcount);
802 803
	if (vtime_delta(tsk))
		__vtime_account_system(tsk);
804
	current->flags |= PF_VCPU;
805
	write_seqcount_end(&tsk->vtime_seqcount);
806
}
807
EXPORT_SYMBOL_GPL(vtime_guest_enter);
808 809 810

void vtime_guest_exit(struct task_struct *tsk)
{
811
	write_seqcount_begin(&tsk->vtime_seqcount);
812 813
	__vtime_account_system(tsk);
	current->flags &= ~PF_VCPU;
814
	write_seqcount_end(&tsk->vtime_seqcount);
815
}
816
EXPORT_SYMBOL_GPL(vtime_guest_exit);
817 818 819

void vtime_account_idle(struct task_struct *tsk)
{
820
	cputime_t delta_cpu = get_vtime_delta(tsk);
821 822 823

	account_idle_time(delta_cpu);
}
824

825 826
void arch_vtime_task_switch(struct task_struct *prev)
{
827
	write_seqcount_begin(&prev->vtime_seqcount);
828
	prev->vtime_snap_whence = VTIME_INACTIVE;
829
	write_seqcount_end(&prev->vtime_seqcount);
830

831
	write_seqcount_begin(&current->vtime_seqcount);
832
	current->vtime_snap_whence = VTIME_SYS;
833
	current->vtime_snap = jiffies;
834
	write_seqcount_end(&current->vtime_seqcount);
835 836
}

837
void vtime_init_idle(struct task_struct *t, int cpu)
838 839 840
{
	unsigned long flags;

841 842
	local_irq_save(flags);
	write_seqcount_begin(&t->vtime_seqcount);
843
	t->vtime_snap_whence = VTIME_SYS;
844
	t->vtime_snap = jiffies;
845 846
	write_seqcount_end(&t->vtime_seqcount);
	local_irq_restore(flags);
847 848 849 850 851 852 853
}

cputime_t task_gtime(struct task_struct *t)
{
	unsigned int seq;
	cputime_t gtime;

854
	if (!vtime_accounting_enabled())
855 856
		return t->gtime;

857
	do {
858
		seq = read_seqcount_begin(&t->vtime_seqcount);
859 860

		gtime = t->gtime;
861
		if (t->vtime_snap_whence == VTIME_SYS && t->flags & PF_VCPU)
862 863
			gtime += vtime_delta(t);

864
	} while (read_seqcount_retry(&t->vtime_seqcount, seq));
865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886

	return gtime;
}

/*
 * Fetch cputime raw values from fields of task_struct and
 * add up the pending nohz execution time since the last
 * cputime snapshot.
 */
static void
fetch_task_cputime(struct task_struct *t,
		   cputime_t *u_dst, cputime_t *s_dst,
		   cputime_t *u_src, cputime_t *s_src,
		   cputime_t *udelta, cputime_t *sdelta)
{
	unsigned int seq;
	unsigned long long delta;

	do {
		*udelta = 0;
		*sdelta = 0;

887
		seq = read_seqcount_begin(&t->vtime_seqcount);
888 889 890 891 892 893 894

		if (u_dst)
			*u_dst = *u_src;
		if (s_dst)
			*s_dst = *s_src;

		/* Task is sleeping, nothing to add */
895
		if (t->vtime_snap_whence == VTIME_INACTIVE ||
896 897 898 899 900 901 902 903 904 905 906 907 908 909 910
		    is_idle_task(t))
			continue;

		delta = vtime_delta(t);

		/*
		 * Task runs either in user or kernel space, add pending nohz time to
		 * the right place.
		 */
		if (t->vtime_snap_whence == VTIME_USER || t->flags & PF_VCPU) {
			*udelta = delta;
		} else {
			if (t->vtime_snap_whence == VTIME_SYS)
				*sdelta = delta;
		}
911
	} while (read_seqcount_retry(&t->vtime_seqcount, seq));
912 913 914 915 916 917 918
}


void task_cputime(struct task_struct *t, cputime_t *utime, cputime_t *stime)
{
	cputime_t udelta, sdelta;

919
	if (!vtime_accounting_enabled()) {
920 921 922 923 924 925 926
		if (utime)
			*utime = t->utime;
		if (stime)
			*stime = t->stime;
		return;
	}

927 928 929 930 931 932 933 934 935 936 937 938 939
	fetch_task_cputime(t, utime, stime, &t->utime,
			   &t->stime, &udelta, &sdelta);
	if (utime)
		*utime += udelta;
	if (stime)
		*stime += sdelta;
}

void task_cputime_scaled(struct task_struct *t,
			 cputime_t *utimescaled, cputime_t *stimescaled)
{
	cputime_t udelta, sdelta;

940
	if (!vtime_accounting_enabled()) {
941 942 943 944 945 946 947
		if (utimescaled)
			*utimescaled = t->utimescaled;
		if (stimescaled)
			*stimescaled = t->stimescaled;
		return;
	}

948 949 950 951 952 953 954
	fetch_task_cputime(t, utimescaled, stimescaled,
			   &t->utimescaled, &t->stimescaled, &udelta, &sdelta);
	if (utimescaled)
		*utimescaled += cputime_to_scaled(udelta);
	if (stimescaled)
		*stimescaled += cputime_to_scaled(sdelta);
}
955
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */