提交 ff303e66 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

perf: Fix software migrate events

Stephane asked about PERF_COUNT_SW_CPU_MIGRATIONS and I realized it
was borken:

 > The problem is that the task isn't actually scheduled while its being
 > migrated (obviously), and if its not scheduled, the counters aren't
 > scheduled either, so there's no observing of the fact.
 >
 > A further problem with migrations is that many migrations happen from
 > softirq context, which is nested inside the 'random' task context of
 > whoemever happens to run at that time, similarly for the wakeup
 > migrations triggered from (soft)irq context. All those end up being
 > accounted in the task that's currently running, eg. your 'ls'.

The below cures this by marking a task as migrated and accounting it
on the subsequent sched_in().
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: NIngo Molnar <mingo@kernel.org>
上级 1836ac85
...@@ -798,11 +798,33 @@ perf_sw_event_sched(u32 event_id, u64 nr, u64 addr) ...@@ -798,11 +798,33 @@ perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
extern struct static_key_deferred perf_sched_events; extern struct static_key_deferred perf_sched_events;
static __always_inline bool
perf_sw_migrate_enabled(void)
{
if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
return true;
return false;
}
static inline void perf_event_task_migrate(struct task_struct *task)
{
if (perf_sw_migrate_enabled())
task->sched_migrated = 1;
}
static inline void perf_event_task_sched_in(struct task_struct *prev, static inline void perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task) struct task_struct *task)
{ {
if (static_key_false(&perf_sched_events.key)) if (static_key_false(&perf_sched_events.key))
__perf_event_task_sched_in(prev, task); __perf_event_task_sched_in(prev, task);
if (perf_sw_migrate_enabled() && task->sched_migrated) {
struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
perf_fetch_caller_regs(regs);
___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
task->sched_migrated = 0;
}
} }
static inline void perf_event_task_sched_out(struct task_struct *prev, static inline void perf_event_task_sched_out(struct task_struct *prev,
...@@ -925,6 +947,8 @@ perf_aux_output_skip(struct perf_output_handle *handle, ...@@ -925,6 +947,8 @@ perf_aux_output_skip(struct perf_output_handle *handle,
static inline void * static inline void *
perf_get_aux(struct perf_output_handle *handle) { return NULL; } perf_get_aux(struct perf_output_handle *handle) { return NULL; }
static inline void static inline void
perf_event_task_migrate(struct task_struct *task) { }
static inline void
perf_event_task_sched_in(struct task_struct *prev, perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task) { } struct task_struct *task) { }
static inline void static inline void
......
...@@ -1356,9 +1356,6 @@ struct task_struct { ...@@ -1356,9 +1356,6 @@ struct task_struct {
#endif #endif
struct mm_struct *mm, *active_mm; struct mm_struct *mm, *active_mm;
#ifdef CONFIG_COMPAT_BRK
unsigned brk_randomized:1;
#endif
/* per-thread vma caching */ /* per-thread vma caching */
u32 vmacache_seqnum; u32 vmacache_seqnum;
struct vm_area_struct *vmacache[VMACACHE_SIZE]; struct vm_area_struct *vmacache[VMACACHE_SIZE];
...@@ -1381,10 +1378,14 @@ struct task_struct { ...@@ -1381,10 +1378,14 @@ struct task_struct {
/* Revert to default priority/policy when forking */ /* Revert to default priority/policy when forking */
unsigned sched_reset_on_fork:1; unsigned sched_reset_on_fork:1;
unsigned sched_contributes_to_load:1; unsigned sched_contributes_to_load:1;
unsigned sched_migrated:1;
#ifdef CONFIG_MEMCG_KMEM #ifdef CONFIG_MEMCG_KMEM
unsigned memcg_kmem_skip_account:1; unsigned memcg_kmem_skip_account:1;
#endif #endif
#ifdef CONFIG_COMPAT_BRK
unsigned brk_randomized:1;
#endif
unsigned long atomic_flags; /* Flags needing atomic access. */ unsigned long atomic_flags; /* Flags needing atomic access. */
......
...@@ -1049,7 +1049,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) ...@@ -1049,7 +1049,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
if (p->sched_class->migrate_task_rq) if (p->sched_class->migrate_task_rq)
p->sched_class->migrate_task_rq(p, new_cpu); p->sched_class->migrate_task_rq(p, new_cpu);
p->se.nr_migrations++; p->se.nr_migrations++;
perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0); perf_event_task_migrate(p);
} }
__set_task_cpu(p, new_cpu); __set_task_cpu(p, new_cpu);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册