diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 60308053fdb2aeb2aaaa35112096e3b23a8dc2f8..e9bae4c2f2ddabf3c9458338791cd039b160878f 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -82,6 +83,15 @@ __setup("unknown_nmi_panic", setup_unknown_nmi_panic); #define nmi_to_desc(type) (&nmi_desc[type]) +static u64 nmi_longest_ns = 1 * NSEC_PER_MSEC; +static int __init nmi_warning_debugfs(void) +{ + debugfs_create_u64("nmi_longest_ns", 0644, + arch_debugfs_dir, &nmi_longest_ns); + return 0; +} +fs_initcall(nmi_warning_debugfs); + static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b) { struct nmi_desc *desc = nmi_to_desc(type); @@ -96,8 +106,25 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2 * can be latched at any given time. Walk the whole list * to handle those situations. */ - list_for_each_entry_rcu(a, &desc->head, list) + list_for_each_entry_rcu(a, &desc->head, list) { + u64 before, delta, whole_msecs; + int decimal_msecs; + + before = local_clock(); handled += a->handler(type, regs); + delta = local_clock() - before; + + if (delta < nmi_longest_ns) + continue; + + nmi_longest_ns = delta; + whole_msecs = do_div(delta, (1000 * 1000)); + decimal_msecs = do_div(delta, 1000) % 1000; + printk_ratelimited(KERN_INFO + "INFO: NMI handler (%ps) took too long to run: " + "%lld.%03d msecs\n", a->handler, whole_msecs, + decimal_msecs); + } rcu_read_unlock();