提交 38ca9c92 编写于 作者: I Ingo Molnar

Merge tag 'cputime-cleanups-for-mingo' of...

Merge tag 'cputime-cleanups-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks into sched/core

Pull cputime cleanups and optimizations from Frederic Weisbecker:

 * Gather vtime headers that were a bit scattered around

 * Separate irqtime and vtime namespaces that were
   colliding, resulting in useless calls to irqtime accounting.

 * Slightly optimize irq and guest vtime accounting.
Signed-off-by: NFrederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: NIngo Molnar <mingo@kernel.org>
...@@ -106,9 +106,9 @@ void vtime_task_switch(struct task_struct *prev) ...@@ -106,9 +106,9 @@ void vtime_task_switch(struct task_struct *prev)
struct thread_info *ni = task_thread_info(current); struct thread_info *ni = task_thread_info(current);
if (idle_task(smp_processor_id()) != prev) if (idle_task(smp_processor_id()) != prev)
vtime_account_system(prev); __vtime_account_system(prev);
else else
vtime_account_idle(prev); __vtime_account_idle(prev);
vtime_account_user(prev); vtime_account_user(prev);
...@@ -135,14 +135,14 @@ static cputime_t vtime_delta(struct task_struct *tsk) ...@@ -135,14 +135,14 @@ static cputime_t vtime_delta(struct task_struct *tsk)
return delta_stime; return delta_stime;
} }
void vtime_account_system(struct task_struct *tsk) void __vtime_account_system(struct task_struct *tsk)
{ {
cputime_t delta = vtime_delta(tsk); cputime_t delta = vtime_delta(tsk);
account_system_time(tsk, 0, delta, delta); account_system_time(tsk, 0, delta, delta);
} }
void vtime_account_idle(struct task_struct *tsk) void __vtime_account_idle(struct task_struct *tsk)
{ {
account_idle_time(vtime_delta(tsk)); account_idle_time(vtime_delta(tsk));
} }
......
...@@ -336,7 +336,7 @@ static u64 vtime_delta(struct task_struct *tsk, ...@@ -336,7 +336,7 @@ static u64 vtime_delta(struct task_struct *tsk,
return delta; return delta;
} }
void vtime_account_system(struct task_struct *tsk) void __vtime_account_system(struct task_struct *tsk)
{ {
u64 delta, sys_scaled, stolen; u64 delta, sys_scaled, stolen;
...@@ -346,7 +346,7 @@ void vtime_account_system(struct task_struct *tsk) ...@@ -346,7 +346,7 @@ void vtime_account_system(struct task_struct *tsk)
account_steal_time(stolen); account_steal_time(stolen);
} }
void vtime_account_idle(struct task_struct *tsk) void __vtime_account_idle(struct task_struct *tsk)
{ {
u64 delta, sys_scaled, stolen; u64 delta, sys_scaled, stolen;
......
...@@ -140,6 +140,10 @@ void vtime_account(struct task_struct *tsk) ...@@ -140,6 +140,10 @@ void vtime_account(struct task_struct *tsk)
} }
EXPORT_SYMBOL_GPL(vtime_account); EXPORT_SYMBOL_GPL(vtime_account);
void __vtime_account_system(struct task_struct *tsk)
__attribute__((alias("vtime_account")));
EXPORT_SYMBOL_GPL(__vtime_account_system);
void __kprobes vtime_stop_cpu(void) void __kprobes vtime_stop_cpu(void)
{ {
struct s390_idle_data *idle = &__get_cpu_var(s390_idle); struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
......
...@@ -608,9 +608,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) ...@@ -608,9 +608,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
kvm_s390_deliver_pending_interrupts(vcpu); kvm_s390_deliver_pending_interrupts(vcpu);
vcpu->arch.sie_block->icptcode = 0; vcpu->arch.sie_block->icptcode = 0;
local_irq_disable();
kvm_guest_enter(); kvm_guest_enter();
local_irq_enable();
VCPU_EVENT(vcpu, 6, "entering sie flags %x", VCPU_EVENT(vcpu, 6, "entering sie flags %x",
atomic_read(&vcpu->arch.sie_block->cpuflags)); atomic_read(&vcpu->arch.sie_block->cpuflags));
trace_kvm_s390_sie_enter(vcpu, trace_kvm_s390_sie_enter(vcpu,
...@@ -629,9 +627,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) ...@@ -629,9 +627,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
vcpu->arch.sie_block->icptcode); vcpu->arch.sie_block->icptcode);
trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode); trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
local_irq_disable();
kvm_guest_exit(); kvm_guest_exit();
local_irq_enable();
memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16); memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
return rc; return rc;
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <linux/preempt.h> #include <linux/preempt.h>
#include <linux/lockdep.h> #include <linux/lockdep.h>
#include <linux/ftrace_irq.h> #include <linux/ftrace_irq.h>
#include <linux/vtime.h>
#include <asm/hardirq.h> #include <asm/hardirq.h>
/* /*
...@@ -129,16 +130,6 @@ extern void synchronize_irq(unsigned int irq); ...@@ -129,16 +130,6 @@ extern void synchronize_irq(unsigned int irq);
# define synchronize_irq(irq) barrier() # define synchronize_irq(irq) barrier()
#endif #endif
struct task_struct;
#if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING)
static inline void vtime_account(struct task_struct *tsk)
{
}
#else
extern void vtime_account(struct task_struct *tsk);
#endif
#if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) #if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
static inline void rcu_nmi_enter(void) static inline void rcu_nmi_enter(void)
...@@ -162,7 +153,7 @@ extern void rcu_nmi_exit(void); ...@@ -162,7 +153,7 @@ extern void rcu_nmi_exit(void);
*/ */
#define __irq_enter() \ #define __irq_enter() \
do { \ do { \
vtime_account(current); \ vtime_account_irq_enter(current); \
add_preempt_count(HARDIRQ_OFFSET); \ add_preempt_count(HARDIRQ_OFFSET); \
trace_hardirq_enter(); \ trace_hardirq_enter(); \
} while (0) } while (0)
...@@ -178,7 +169,7 @@ extern void irq_enter(void); ...@@ -178,7 +169,7 @@ extern void irq_enter(void);
#define __irq_exit() \ #define __irq_exit() \
do { \ do { \
trace_hardirq_exit(); \ trace_hardirq_exit(); \
vtime_account(current); \ vtime_account_irq_exit(current); \
sub_preempt_count(HARDIRQ_OFFSET); \ sub_preempt_count(HARDIRQ_OFFSET); \
} while (0) } while (0)
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/vtime.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/cputime.h> #include <asm/cputime.h>
...@@ -130,12 +131,4 @@ extern void account_process_tick(struct task_struct *, int user); ...@@ -130,12 +131,4 @@ extern void account_process_tick(struct task_struct *, int user);
extern void account_steal_ticks(unsigned long ticks); extern void account_steal_ticks(unsigned long ticks);
extern void account_idle_ticks(unsigned long ticks); extern void account_idle_ticks(unsigned long ticks);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
extern void vtime_task_switch(struct task_struct *prev);
extern void vtime_account_system(struct task_struct *tsk);
extern void vtime_account_idle(struct task_struct *tsk);
#else
static inline void vtime_task_switch(struct task_struct *prev) { }
#endif
#endif /* _LINUX_KERNEL_STAT_H */ #endif /* _LINUX_KERNEL_STAT_H */
...@@ -737,7 +737,11 @@ static inline int kvm_deassign_device(struct kvm *kvm, ...@@ -737,7 +737,11 @@ static inline int kvm_deassign_device(struct kvm *kvm,
static inline void kvm_guest_enter(void) static inline void kvm_guest_enter(void)
{ {
BUG_ON(preemptible()); BUG_ON(preemptible());
vtime_account(current); /*
* This is running in ioctl context so we can avoid
* the call to vtime_account() with its unnecessary idle check.
*/
vtime_account_system(current);
current->flags |= PF_VCPU; current->flags |= PF_VCPU;
/* KVM does not hold any references to rcu protected data when it /* KVM does not hold any references to rcu protected data when it
* switches CPU into a guest mode. In fact switching to a guest mode * switches CPU into a guest mode. In fact switching to a guest mode
...@@ -751,7 +755,11 @@ static inline void kvm_guest_enter(void) ...@@ -751,7 +755,11 @@ static inline void kvm_guest_enter(void)
static inline void kvm_guest_exit(void) static inline void kvm_guest_exit(void)
{ {
vtime_account(current); /*
* This is running in ioctl context so we can avoid
* the call to vtime_account() with its unnecessary idle check.
*/
vtime_account_system(current);
current->flags &= ~PF_VCPU; current->flags &= ~PF_VCPU;
} }
......
#ifndef _LINUX_KERNEL_VTIME_H
#define _LINUX_KERNEL_VTIME_H
struct task_struct;
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
extern void vtime_task_switch(struct task_struct *prev);
extern void __vtime_account_system(struct task_struct *tsk);
extern void vtime_account_system(struct task_struct *tsk);
extern void __vtime_account_idle(struct task_struct *tsk);
extern void vtime_account(struct task_struct *tsk);
#else
static inline void vtime_task_switch(struct task_struct *prev) { }
static inline void __vtime_account_system(struct task_struct *tsk) { }
static inline void vtime_account_system(struct task_struct *tsk) { }
static inline void vtime_account(struct task_struct *tsk) { }
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
extern void irqtime_account_irq(struct task_struct *tsk);
#else
static inline void irqtime_account_irq(struct task_struct *tsk) { }
#endif
static inline void vtime_account_irq_enter(struct task_struct *tsk)
{
/*
* Hardirq can interrupt idle task anytime. So we need vtime_account()
* that performs the idle check in CONFIG_VIRT_CPU_ACCOUNTING.
* Softirq can also interrupt idle task directly if it calls
* local_bh_enable(). Such case probably don't exist but we never know.
* Ksoftirqd is not concerned because idle time is flushed on context
* switch. Softirqs in the end of hardirqs are also not a problem because
* the idle time is flushed on hardirq time already.
*/
vtime_account(tsk);
irqtime_account_irq(tsk);
}
static inline void vtime_account_irq_exit(struct task_struct *tsk)
{
/* On hard|softirq exit we always account to hard|softirq cputime */
__vtime_account_system(tsk);
irqtime_account_irq(tsk);
}
#endif /* _LINUX_KERNEL_VTIME_H */
...@@ -43,7 +43,7 @@ DEFINE_PER_CPU(seqcount_t, irq_time_seq); ...@@ -43,7 +43,7 @@ DEFINE_PER_CPU(seqcount_t, irq_time_seq);
* Called before incrementing preempt_count on {soft,}irq_enter * Called before incrementing preempt_count on {soft,}irq_enter
* and before decrementing preempt_count on {soft,}irq_exit. * and before decrementing preempt_count on {soft,}irq_exit.
*/ */
void vtime_account(struct task_struct *curr) void irqtime_account_irq(struct task_struct *curr)
{ {
unsigned long flags; unsigned long flags;
s64 delta; s64 delta;
...@@ -73,7 +73,7 @@ void vtime_account(struct task_struct *curr) ...@@ -73,7 +73,7 @@ void vtime_account(struct task_struct *curr)
irq_time_write_end(); irq_time_write_end();
local_irq_restore(flags); local_irq_restore(flags);
} }
EXPORT_SYMBOL_GPL(vtime_account); EXPORT_SYMBOL_GPL(irqtime_account_irq);
static int irqtime_account_hi_update(void) static int irqtime_account_hi_update(void)
{ {
...@@ -433,10 +433,20 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) ...@@ -433,10 +433,20 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
*st = cputime.stime; *st = cputime.stime;
} }
void vtime_account_system(struct task_struct *tsk)
{
unsigned long flags;
local_irq_save(flags);
__vtime_account_system(tsk);
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(vtime_account_system);
/* /*
* Archs that account the whole time spent in the idle task * Archs that account the whole time spent in the idle task
* (outside irq) as idle time can rely on this and just implement * (outside irq) as idle time can rely on this and just implement
* vtime_account_system() and vtime_account_idle(). Archs that * __vtime_account_system() and __vtime_account_idle(). Archs that
* have other meaning of the idle time (s390 only includes the * have other meaning of the idle time (s390 only includes the
* time spent by the CPU when it's in low power mode) must override * time spent by the CPU when it's in low power mode) must override
* vtime_account(). * vtime_account().
...@@ -449,9 +459,9 @@ void vtime_account(struct task_struct *tsk) ...@@ -449,9 +459,9 @@ void vtime_account(struct task_struct *tsk)
local_irq_save(flags); local_irq_save(flags);
if (in_interrupt() || !is_idle_task(tsk)) if (in_interrupt() || !is_idle_task(tsk))
vtime_account_system(tsk); __vtime_account_system(tsk);
else else
vtime_account_idle(tsk); __vtime_account_idle(tsk);
local_irq_restore(flags); local_irq_restore(flags);
} }
......
...@@ -221,7 +221,7 @@ asmlinkage void __do_softirq(void) ...@@ -221,7 +221,7 @@ asmlinkage void __do_softirq(void)
current->flags &= ~PF_MEMALLOC; current->flags &= ~PF_MEMALLOC;
pending = local_softirq_pending(); pending = local_softirq_pending();
vtime_account(current); vtime_account_irq_enter(current);
__local_bh_disable((unsigned long)__builtin_return_address(0), __local_bh_disable((unsigned long)__builtin_return_address(0),
SOFTIRQ_OFFSET); SOFTIRQ_OFFSET);
...@@ -272,7 +272,7 @@ asmlinkage void __do_softirq(void) ...@@ -272,7 +272,7 @@ asmlinkage void __do_softirq(void)
lockdep_softirq_exit(); lockdep_softirq_exit();
vtime_account(current); vtime_account_irq_exit(current);
__local_bh_enable(SOFTIRQ_OFFSET); __local_bh_enable(SOFTIRQ_OFFSET);
tsk_restore_flags(current, old_flags, PF_MEMALLOC); tsk_restore_flags(current, old_flags, PF_MEMALLOC);
} }
...@@ -341,7 +341,7 @@ static inline void invoke_softirq(void) ...@@ -341,7 +341,7 @@ static inline void invoke_softirq(void)
*/ */
void irq_exit(void) void irq_exit(void)
{ {
vtime_account(current); vtime_account_irq_exit(current);
trace_hardirq_exit(); trace_hardirq_exit();
sub_preempt_count(IRQ_EXIT_OFFSET); sub_preempt_count(IRQ_EXIT_OFFSET);
if (!in_interrupt() && local_softirq_pending()) if (!in_interrupt() && local_softirq_pending())
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册