提交 3ccf3e83 编写于 作者: P Peter Zijlstra 提交者: Ingo Molnar

printk/sched: Introduce special printk_sched() for those awkward moments

There's a few awkward printk()s inside of scheduler guts that people
prefer to keep but really are rather deadlock prone. Fudge around it
by storing the text in a per-cpu buffer and poll it using the existing
printk_tick() handler.

This will drop output when its more frequent than once a tick, however
only the affinity thing could possible go that fast and for that just
one should suffice to notify the admin he's done something silly..
Signed-off-by: NPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/n/tip-wua3lmkt3dg8nfts66o6brne@git.kernel.orgSigned-off-by: NIngo Molnar <mingo@elte.hu>
上级 554cecaf
......@@ -100,6 +100,11 @@ int vprintk(const char *fmt, va_list args);
asmlinkage __printf(1, 2) __cold
int printk(const char *fmt, ...);
/*
* Special printk facility for scheduler use only, _DO_NOT_USE_ !
*/
__printf(1, 2) __cold int printk_sched(const char *fmt, ...);
/*
* Please don't use printk_ratelimit(), because it shares ratelimiting state
* with all other unrelated printk_ratelimit() callsites. Instead use
......@@ -127,6 +132,11 @@ int printk(const char *s, ...)
{
return 0;
}
static inline __printf(1, 2) __cold
int printk_sched(const char *s, ...)
{
return 0;
}
static inline int printk_ratelimit(void)
{
return 0;
......
......@@ -1208,13 +1208,47 @@ int is_console_locked(void)
return console_locked;
}
/*
* Delayed printk facility, for scheduler-internal messages:
*/
#define PRINTK_BUF_SIZE 512
#define PRINTK_PENDING_WAKEUP 0x01
#define PRINTK_PENDING_SCHED 0x02
static DEFINE_PER_CPU(int, printk_pending);
static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf);
int printk_sched(const char *fmt, ...)
{
unsigned long flags;
va_list args;
char *buf;
int r;
local_irq_save(flags);
buf = __get_cpu_var(printk_sched_buf);
va_start(args, fmt);
r = vsnprintf(buf, PRINTK_BUF_SIZE, fmt, args);
va_end(args);
__this_cpu_or(printk_pending, PRINTK_PENDING_SCHED);
local_irq_restore(flags);
return r;
}
void printk_tick(void)
{
if (__this_cpu_read(printk_pending)) {
__this_cpu_write(printk_pending, 0);
wake_up_interruptible(&log_wait);
int pending = __this_cpu_xchg(printk_pending, 0);
if (pending & PRINTK_PENDING_SCHED) {
char *buf = __get_cpu_var(printk_sched_buf);
printk(KERN_WARNING "[sched_delayed] %s", buf);
}
if (pending & PRINTK_PENDING_WAKEUP)
wake_up_interruptible(&log_wait);
}
}
......@@ -1228,7 +1262,7 @@ int printk_needs_cpu(int cpu)
void wake_up_klogd(void)
{
if (waitqueue_active(&log_wait))
this_cpu_write(printk_pending, 1);
this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP);
}
/**
......
......@@ -1284,7 +1284,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
* leave kernel.
*/
if (p->mm && printk_ratelimit()) {
printk(KERN_INFO "process %d (%s) no longer affine to cpu%d\n",
printk_sched("process %d (%s) no longer affine to cpu%d\n",
task_pid_nr(p), p->comm, cpu);
}
......
......@@ -864,8 +864,14 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq)
* but accrue some time due to boosting.
*/
if (likely(rt_b->rt_runtime)) {
static bool once = false;
rt_rq->rt_throttled = 1;
printk_once(KERN_WARNING "sched: RT throttling activated\n");
if (!once) {
once = true;
printk_sched("sched: RT throttling activated\n");
}
} else {
/*
* In case we did anyway, make it go away,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册