提交 347abad9 编写于 作者: R Rik van Riel 提交者: Ingo Molnar

sched, time: Fix build error with 64 bit cputime_t on 32 bit systems

On 32 bit systems cmpxchg cannot handle 64 bit values, so
some additional magic is required to allow a 32 bit system
with CONFIG_VIRT_CPU_ACCOUNTING_GEN=y enabled to build.

Make sure the correct cmpxchg function is used when doing
an atomic swap of a cputime_t.
Reported-by: NArnd Bergmann <arnd@arndb.de>
Signed-off-by: NRik van Riel <riel@redhat.com>
Acked-by: NArnd Bergmann <arnd@arndb.de>
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: umgwanakikbuti@gmail.com
Cc: fweisbec@gmail.com
Cc: srao@redhat.com
Cc: lwoodman@redhat.com
Cc: atheurer@redhat.com
Cc: oleg@redhat.com
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Mackerras <paulus@samba.org>
Cc: linux390@de.ibm.com
Cc: linux-arch@vger.kernel.org
Cc: linuxppc-dev@lists.ozlabs.org
Cc: linux-s390@vger.kernel.org
Link: http://lkml.kernel.org/r/20140930155947.070cdb1f@annuminas.surriel.comSigned-off-by: NIngo Molnar <mingo@kernel.org>
上级 43f4d666
......@@ -32,6 +32,8 @@ static inline void setup_cputime_one_jiffy(void) { }
typedef u64 __nocast cputime_t;
typedef u64 __nocast cputime64_t;
#define cmpxchg_cputime(ptr, old, new) cmpxchg(ptr, old, new)
#ifdef __KERNEL__
/*
......
......@@ -18,6 +18,8 @@
typedef unsigned long long __nocast cputime_t;
typedef unsigned long long __nocast cputime64_t;
#define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new)
static inline unsigned long __div(unsigned long long n, unsigned long base)
{
#ifndef CONFIG_64BIT
......
......@@ -3,6 +3,8 @@
typedef unsigned long __nocast cputime_t;
#define cmpxchg_cputime(ptr, old, new) cmpxchg(ptr, old, new)
#define cputime_one_jiffy jiffies_to_cputime(1)
#define cputime_to_jiffies(__ct) (__force unsigned long)(__ct)
#define cputime_to_scaled(__ct) (__ct)
......
......@@ -21,6 +21,8 @@
typedef u64 __nocast cputime_t;
typedef u64 __nocast cputime64_t;
#define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new)
#define cputime_one_jiffy jiffies_to_cputime(1)
#define cputime_div(__ct, divisor) div_u64((__force u64)__ct, divisor)
......
......@@ -554,6 +554,23 @@ static cputime_t scale_stime(u64 stime, u64 rtime, u64 total)
return (__force cputime_t) scaled;
}
/*
* Atomically advance counter to the new value. Interrupts, vcpu
* scheduling, and scaling inaccuracies can cause cputime_advance
* to be occasionally called with a new value smaller than counter.
* Let's enforce atomicity.
*
* Normally a caller will only go through this loop once, or not
* at all in case a previous caller updated counter the same jiffy.
*/
static void cputime_advance(cputime_t *counter, cputime_t new)
{
cputime_t old;
while (new > (old = ACCESS_ONCE(*counter)))
cmpxchg_cputime(counter, old, new);
}
/*
* Adjust tick based cputime random precision against scheduler
* runtime accounting.
......@@ -599,16 +616,8 @@ static void cputime_adjust(struct task_cputime *curr,
utime = rtime - stime;
}
/*
* If the tick based count grows faster than the scheduler one,
* the result of the scaling may go backward.
* Let's enforce monotonicity.
* Atomic exchange protects against concurrent cputime_adjust().
*/
while (stime > (rtime = ACCESS_ONCE(prev->stime)))
cmpxchg(&prev->stime, rtime, stime);
while (utime > (rtime = ACCESS_ONCE(prev->utime)))
cmpxchg(&prev->utime, rtime, utime);
cputime_advance(&prev->stime, stime);
cputime_advance(&prev->utime, utime);
out:
*ut = prev->utime;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册