提交 6727ad9e 编写于 作者: C Chris Metcalf 提交者: Linus Torvalds

nmi_backtrace: generate one-line reports for idle cpus

When doing an nmi backtrace of many cores, most of which are idle, the
output is a little overwhelming and very uninformative.  Suppress
messages for cpus that are idling when they are interrupted and just
emit one line, "NMI backtrace for N skipped: idling at pc 0xNNN".

We do this by grouping all the cpuidle code together into a new
.cpuidle.text section, and then checking the address of the interrupted
PC to see if it lies within that section.

This commit suitably tags x86 and tile idle routines, and only adds in
the minimal framework for other architectures.

Link: http://lkml.kernel.org/r/1472487169-14923-5-git-send-email-cmetcalf@mellanox.comSigned-off-by: NChris Metcalf <cmetcalf@mellanox.com>
Acked-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Tested-by: Daniel Thompson <daniel.thompson@linaro.org> [arm]
Tested-by: NPetr Mladek <pmladek@suse.com>
Cc: Aaron Tomlin <atomlin@redhat.com>
Cc: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: "Rafael J. Wysocki" <rjw@rjwysocki.net>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@elte.hu>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 511f8389
...@@ -22,6 +22,7 @@ SECTIONS ...@@ -22,6 +22,7 @@ SECTIONS
HEAD_TEXT HEAD_TEXT
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
*(.fixup) *(.fixup)
*(.gnu.warning) *(.gnu.warning)
......
...@@ -89,6 +89,7 @@ SECTIONS ...@@ -89,6 +89,7 @@ SECTIONS
_text = .; _text = .;
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
*(.fixup) *(.fixup)
......
...@@ -98,6 +98,7 @@ SECTIONS ...@@ -98,6 +98,7 @@ SECTIONS
IRQENTRY_TEXT IRQENTRY_TEXT
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
*(.gnu.warning) *(.gnu.warning)
......
...@@ -111,6 +111,7 @@ SECTIONS ...@@ -111,6 +111,7 @@ SECTIONS
SOFTIRQENTRY_TEXT SOFTIRQENTRY_TEXT
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
HYPERVISOR_TEXT HYPERVISOR_TEXT
KPROBES_TEXT KPROBES_TEXT
......
...@@ -122,6 +122,7 @@ SECTIONS ...@@ -122,6 +122,7 @@ SECTIONS
ENTRY_TEXT ENTRY_TEXT
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
HYPERVISOR_TEXT HYPERVISOR_TEXT
......
...@@ -52,6 +52,7 @@ SECTIONS ...@@ -52,6 +52,7 @@ SECTIONS
KPROBES_TEXT KPROBES_TEXT
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
*(.fixup) *(.fixup)
*(.gnu.warning) *(.gnu.warning)
......
...@@ -33,6 +33,7 @@ SECTIONS ...@@ -33,6 +33,7 @@ SECTIONS
#ifndef CONFIG_SCHEDULE_L1 #ifndef CONFIG_SCHEDULE_L1
SCHED_TEXT SCHED_TEXT
#endif #endif
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
SOFTIRQENTRY_TEXT SOFTIRQENTRY_TEXT
......
...@@ -70,6 +70,7 @@ SECTIONS ...@@ -70,6 +70,7 @@ SECTIONS
_stext = .; _stext = .;
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
SOFTIRQENTRY_TEXT SOFTIRQENTRY_TEXT
......
...@@ -43,6 +43,7 @@ SECTIONS ...@@ -43,6 +43,7 @@ SECTIONS
HEAD_TEXT HEAD_TEXT
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
*(.fixup) *(.fixup)
*(.text.__*) *(.text.__*)
......
...@@ -63,6 +63,7 @@ SECTIONS ...@@ -63,6 +63,7 @@ SECTIONS
*(.text..tlbmiss) *(.text..tlbmiss)
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
#ifdef CONFIG_DEBUG_INFO #ifdef CONFIG_DEBUG_INFO
INIT_TEXT INIT_TEXT
......
...@@ -29,6 +29,7 @@ SECTIONS ...@@ -29,6 +29,7 @@ SECTIONS
_stext = . ; _stext = . ;
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
#if defined(CONFIG_ROMKERNEL) #if defined(CONFIG_ROMKERNEL)
*(.int_redirect) *(.int_redirect)
......
...@@ -50,6 +50,7 @@ SECTIONS ...@@ -50,6 +50,7 @@ SECTIONS
_text = .; _text = .;
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
*(.fixup) *(.fixup)
......
...@@ -46,6 +46,7 @@ SECTIONS { ...@@ -46,6 +46,7 @@ SECTIONS {
__end_ivt_text = .; __end_ivt_text = .;
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
*(.gnu.linkonce.t*) *(.gnu.linkonce.t*)
......
...@@ -31,6 +31,7 @@ SECTIONS ...@@ -31,6 +31,7 @@ SECTIONS
HEAD_TEXT HEAD_TEXT
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
*(.fixup) *(.fixup)
*(.gnu.warning) *(.gnu.warning)
......
...@@ -45,6 +45,7 @@ SECTIONS { ...@@ -45,6 +45,7 @@ SECTIONS {
HEAD_TEXT HEAD_TEXT
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
*(.fixup) *(.fixup)
. = ALIGN(16); . = ALIGN(16);
......
...@@ -16,6 +16,7 @@ SECTIONS ...@@ -16,6 +16,7 @@ SECTIONS
HEAD_TEXT HEAD_TEXT
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
*(.fixup) *(.fixup)
*(.gnu.warning) *(.gnu.warning)
......
...@@ -16,6 +16,7 @@ SECTIONS ...@@ -16,6 +16,7 @@ SECTIONS
HEAD_TEXT HEAD_TEXT
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
*(.fixup) *(.fixup)
*(.gnu.warning) *(.gnu.warning)
......
...@@ -21,6 +21,7 @@ SECTIONS ...@@ -21,6 +21,7 @@ SECTIONS
.text : { .text : {
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
......
...@@ -33,6 +33,7 @@ SECTIONS { ...@@ -33,6 +33,7 @@ SECTIONS {
EXIT_TEXT EXIT_TEXT
EXIT_CALL EXIT_CALL
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
......
...@@ -55,6 +55,7 @@ SECTIONS ...@@ -55,6 +55,7 @@ SECTIONS
.text : { .text : {
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
......
...@@ -30,6 +30,7 @@ SECTIONS ...@@ -30,6 +30,7 @@ SECTIONS
HEAD_TEXT HEAD_TEXT
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
*(.fixup) *(.fixup)
......
...@@ -37,6 +37,7 @@ SECTIONS ...@@ -37,6 +37,7 @@ SECTIONS
.text : { .text : {
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
SOFTIRQENTRY_TEXT SOFTIRQENTRY_TEXT
......
...@@ -47,6 +47,7 @@ SECTIONS ...@@ -47,6 +47,7 @@ SECTIONS
_stext = .; _stext = .;
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
......
...@@ -69,6 +69,7 @@ SECTIONS ...@@ -69,6 +69,7 @@ SECTIONS
.text ALIGN(PAGE_SIZE) : { .text ALIGN(PAGE_SIZE) : {
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
......
...@@ -52,6 +52,7 @@ SECTIONS ...@@ -52,6 +52,7 @@ SECTIONS
/* careful! __ftr_alt_* sections need to be close to .text */ /* careful! __ftr_alt_* sections need to be close to .text */
*(.text .fixup __ftr_alt_* .ref.text) *(.text .fixup __ftr_alt_* .ref.text)
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
......
...@@ -35,6 +35,7 @@ SECTIONS ...@@ -35,6 +35,7 @@ SECTIONS
HEAD_TEXT HEAD_TEXT
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
......
...@@ -40,6 +40,7 @@ SECTIONS ...@@ -40,6 +40,7 @@ SECTIONS
_text = .; /* Text and read-only data */ _text = .; /* Text and read-only data */
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
*(.text.*) *(.text.*)
......
...@@ -36,6 +36,7 @@ SECTIONS ...@@ -36,6 +36,7 @@ SECTIONS
TEXT_TEXT TEXT_TEXT
EXTRA_TEXT EXTRA_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
......
...@@ -49,6 +49,7 @@ SECTIONS ...@@ -49,6 +49,7 @@ SECTIONS
HEAD_TEXT HEAD_TEXT
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
......
...@@ -50,7 +50,7 @@ STD_ENTRY(smp_nap) ...@@ -50,7 +50,7 @@ STD_ENTRY(smp_nap)
* When interrupted at _cpu_idle_nap, we bump the PC forward 8, and * When interrupted at _cpu_idle_nap, we bump the PC forward 8, and
* as a result return to the function that called _cpu_idle(). * as a result return to the function that called _cpu_idle().
*/ */
STD_ENTRY(_cpu_idle) STD_ENTRY_SECTION(_cpu_idle, .cpuidle.text)
movei r1, 1 movei r1, 1
IRQ_ENABLE_LOAD(r2, r3) IRQ_ENABLE_LOAD(r2, r3)
mtspr INTERRUPT_CRITICAL_SECTION, r1 mtspr INTERRUPT_CRITICAL_SECTION, r1
......
...@@ -42,6 +42,7 @@ SECTIONS ...@@ -42,6 +42,7 @@ SECTIONS
.text : AT (ADDR(.text) - LOAD_OFFSET) { .text : AT (ADDR(.text) - LOAD_OFFSET) {
HEAD_TEXT HEAD_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
IRQENTRY_TEXT IRQENTRY_TEXT
......
...@@ -68,6 +68,7 @@ SECTIONS ...@@ -68,6 +68,7 @@ SECTIONS
_stext = .; _stext = .;
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
*(.fixup) *(.fixup)
*(.stub .text.* .gnu.linkonce.t.*) *(.stub .text.* .gnu.linkonce.t.*)
......
...@@ -28,6 +28,7 @@ SECTIONS ...@@ -28,6 +28,7 @@ SECTIONS
_stext = .; _stext = .;
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
*(.fixup) *(.fixup)
/* .gnu.warning sections are handled specially by elf32.em. */ /* .gnu.warning sections are handled specially by elf32.em. */
......
...@@ -37,6 +37,7 @@ SECTIONS ...@@ -37,6 +37,7 @@ SECTIONS
.text : { /* Real text segment */ .text : { /* Real text segment */
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
*(.fixup) *(.fixup)
......
...@@ -4,6 +4,10 @@ ...@@ -4,6 +4,10 @@
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* Provide __cpuidle; we can't safely include <linux/cpu.h> */
#define __cpuidle __attribute__((__section__(".cpuidle.text")))
/* /*
* Interrupt control: * Interrupt control:
*/ */
...@@ -44,12 +48,12 @@ static inline void native_irq_enable(void) ...@@ -44,12 +48,12 @@ static inline void native_irq_enable(void)
asm volatile("sti": : :"memory"); asm volatile("sti": : :"memory");
} }
static inline void native_safe_halt(void) static inline __cpuidle void native_safe_halt(void)
{ {
asm volatile("sti; hlt": : :"memory"); asm volatile("sti; hlt": : :"memory");
} }
static inline void native_halt(void) static inline __cpuidle void native_halt(void)
{ {
asm volatile("hlt": : :"memory"); asm volatile("hlt": : :"memory");
} }
...@@ -86,7 +90,7 @@ static inline notrace void arch_local_irq_enable(void) ...@@ -86,7 +90,7 @@ static inline notrace void arch_local_irq_enable(void)
* Used in the idle loop; sti takes one instruction cycle * Used in the idle loop; sti takes one instruction cycle
* to complete: * to complete:
*/ */
static inline void arch_safe_halt(void) static inline __cpuidle void arch_safe_halt(void)
{ {
native_safe_halt(); native_safe_halt();
} }
...@@ -95,7 +99,7 @@ static inline void arch_safe_halt(void) ...@@ -95,7 +99,7 @@ static inline void arch_safe_halt(void)
* Used when interrupts are already enabled or to * Used when interrupts are already enabled or to
* shutdown the processor: * shutdown the processor:
*/ */
static inline void halt(void) static inline __cpuidle void halt(void)
{ {
native_halt(); native_halt();
} }
......
...@@ -152,7 +152,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu, ...@@ -152,7 +152,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
} }
EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe); EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
void acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx) void __cpuidle acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
{ {
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
struct cstate_entry *percpu_entry; struct cstate_entry *percpu_entry;
......
...@@ -302,7 +302,7 @@ void arch_cpu_idle(void) ...@@ -302,7 +302,7 @@ void arch_cpu_idle(void)
/* /*
* We use this if we don't have any better idle routine.. * We use this if we don't have any better idle routine..
*/ */
void default_idle(void) void __cpuidle default_idle(void)
{ {
trace_cpu_idle_rcuidle(1, smp_processor_id()); trace_cpu_idle_rcuidle(1, smp_processor_id());
safe_halt(); safe_halt();
...@@ -417,7 +417,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c) ...@@ -417,7 +417,7 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
* with interrupts enabled and no flags, which is backwards compatible with the * with interrupts enabled and no flags, which is backwards compatible with the
* original MWAIT implementation. * original MWAIT implementation.
*/ */
static void mwait_idle(void) static __cpuidle void mwait_idle(void)
{ {
if (!current_set_polling_and_test()) { if (!current_set_polling_and_test()) {
trace_cpu_idle_rcuidle(1, smp_processor_id()); trace_cpu_idle_rcuidle(1, smp_processor_id());
......
...@@ -97,6 +97,7 @@ SECTIONS ...@@ -97,6 +97,7 @@ SECTIONS
_stext = .; _stext = .;
TEXT_TEXT TEXT_TEXT
SCHED_TEXT SCHED_TEXT
CPUIDLE_TEXT
LOCK_TEXT LOCK_TEXT
KPROBES_TEXT KPROBES_TEXT
ENTRY_TEXT ENTRY_TEXT
......
...@@ -89,6 +89,9 @@ SECTIONS ...@@ -89,6 +89,9 @@ SECTIONS
VMLINUX_SYMBOL(__sched_text_start) = .; VMLINUX_SYMBOL(__sched_text_start) = .;
*(.sched.literal .sched.text) *(.sched.literal .sched.text)
VMLINUX_SYMBOL(__sched_text_end) = .; VMLINUX_SYMBOL(__sched_text_end) = .;
VMLINUX_SYMBOL(__cpuidle_text_start) = .;
*(.cpuidle.literal .cpuidle.text)
VMLINUX_SYMBOL(__cpuidle_text_end) = .;
VMLINUX_SYMBOL(__lock_text_start) = .; VMLINUX_SYMBOL(__lock_text_start) = .;
*(.spinlock.literal .spinlock.text) *(.spinlock.literal .spinlock.text)
VMLINUX_SYMBOL(__lock_text_end) = .; VMLINUX_SYMBOL(__lock_text_end) = .;
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/sched.h> /* need_resched() */ #include <linux/sched.h> /* need_resched() */
#include <linux/tick.h> #include <linux/tick.h>
#include <linux/cpuidle.h> #include <linux/cpuidle.h>
#include <linux/cpu.h>
#include <acpi/processor.h> #include <acpi/processor.h>
/* /*
...@@ -115,7 +116,7 @@ static const struct dmi_system_id processor_power_dmi_table[] = { ...@@ -115,7 +116,7 @@ static const struct dmi_system_id processor_power_dmi_table[] = {
* Callers should disable interrupts before the call and enable * Callers should disable interrupts before the call and enable
* interrupts after return. * interrupts after return.
*/ */
static void acpi_safe_halt(void) static void __cpuidle acpi_safe_halt(void)
{ {
if (!tif_need_resched()) { if (!tif_need_resched()) {
safe_halt(); safe_halt();
...@@ -645,7 +646,7 @@ static int acpi_idle_bm_check(void) ...@@ -645,7 +646,7 @@ static int acpi_idle_bm_check(void)
* *
* Caller disables interrupt before call and enables interrupt after return. * Caller disables interrupt before call and enables interrupt after return.
*/ */
static void acpi_idle_do_entry(struct acpi_processor_cx *cx) static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
{ {
if (cx->entry_method == ACPI_CSTATE_FFH) { if (cx->entry_method == ACPI_CSTATE_FFH) {
/* Call into architectural FFH based C-state */ /* Call into architectural FFH based C-state */
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/cpuidle.h> #include <linux/cpuidle.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/tick.h> #include <linux/tick.h>
#include <linux/cpu.h>
#include "cpuidle.h" #include "cpuidle.h"
...@@ -178,8 +179,8 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv) ...@@ -178,8 +179,8 @@ static void __cpuidle_driver_init(struct cpuidle_driver *drv)
} }
#ifdef CONFIG_ARCH_HAS_CPU_RELAX #ifdef CONFIG_ARCH_HAS_CPU_RELAX
static int poll_idle(struct cpuidle_device *dev, static int __cpuidle poll_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index) struct cpuidle_driver *drv, int index)
{ {
local_irq_enable(); local_irq_enable();
if (!current_set_polling_and_test()) { if (!current_set_polling_and_test()) {
......
...@@ -863,8 +863,8 @@ static struct cpuidle_state dnv_cstates[] = { ...@@ -863,8 +863,8 @@ static struct cpuidle_state dnv_cstates[] = {
* *
* Must be called under local_irq_disable(). * Must be called under local_irq_disable().
*/ */
static int intel_idle(struct cpuidle_device *dev, static __cpuidle int intel_idle(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index) struct cpuidle_driver *drv, int index)
{ {
unsigned long ecx = 1; /* break on interrupt flag */ unsigned long ecx = 1; /* break on interrupt flag */
struct cpuidle_state *state = &drv->states[index]; struct cpuidle_state *state = &drv->states[index];
......
...@@ -454,6 +454,12 @@ ...@@ -454,6 +454,12 @@
*(.spinlock.text) \ *(.spinlock.text) \
VMLINUX_SYMBOL(__lock_text_end) = .; VMLINUX_SYMBOL(__lock_text_end) = .;
#define CPUIDLE_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__cpuidle_text_start) = .; \
*(.cpuidle.text) \
VMLINUX_SYMBOL(__cpuidle_text_end) = .;
#define KPROBES_TEXT \ #define KPROBES_TEXT \
ALIGN_FUNCTION(); \ ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__kprobes_text_start) = .; \ VMLINUX_SYMBOL(__kprobes_text_start) = .; \
......
...@@ -231,6 +231,11 @@ void cpu_startup_entry(enum cpuhp_state state); ...@@ -231,6 +231,11 @@ void cpu_startup_entry(enum cpuhp_state state);
void cpu_idle_poll_ctrl(bool enable); void cpu_idle_poll_ctrl(bool enable);
/* Attach to any functions which should be considered cpuidle. */
#define __cpuidle __attribute__((__section__(".cpuidle.text")))
bool cpu_in_idle(unsigned long pc);
void arch_cpu_idle(void); void arch_cpu_idle(void);
void arch_cpu_idle_prepare(void); void arch_cpu_idle_prepare(void);
void arch_cpu_idle_enter(void); void arch_cpu_idle_enter(void);
......
...@@ -16,6 +16,9 @@ ...@@ -16,6 +16,9 @@
#include "sched.h" #include "sched.h"
/* Linker adds these: start and end of __cpuidle functions */
extern char __cpuidle_text_start[], __cpuidle_text_end[];
/** /**
* sched_idle_set_state - Record idle state for the current CPU. * sched_idle_set_state - Record idle state for the current CPU.
* @idle_state: State to record. * @idle_state: State to record.
...@@ -53,7 +56,7 @@ static int __init cpu_idle_nopoll_setup(char *__unused) ...@@ -53,7 +56,7 @@ static int __init cpu_idle_nopoll_setup(char *__unused)
__setup("hlt", cpu_idle_nopoll_setup); __setup("hlt", cpu_idle_nopoll_setup);
#endif #endif
static inline int cpu_idle_poll(void) static noinline int __cpuidle cpu_idle_poll(void)
{ {
rcu_idle_enter(); rcu_idle_enter();
trace_cpu_idle_rcuidle(0, smp_processor_id()); trace_cpu_idle_rcuidle(0, smp_processor_id());
...@@ -84,7 +87,7 @@ void __weak arch_cpu_idle(void) ...@@ -84,7 +87,7 @@ void __weak arch_cpu_idle(void)
* *
* To use when the cpuidle framework cannot be used. * To use when the cpuidle framework cannot be used.
*/ */
void default_idle_call(void) void __cpuidle default_idle_call(void)
{ {
if (current_clr_polling_and_test()) { if (current_clr_polling_and_test()) {
local_irq_enable(); local_irq_enable();
...@@ -271,6 +274,12 @@ static void cpu_idle_loop(void) ...@@ -271,6 +274,12 @@ static void cpu_idle_loop(void)
} }
} }
bool cpu_in_idle(unsigned long pc)
{
return pc >= (unsigned long)__cpuidle_text_start &&
pc < (unsigned long)__cpuidle_text_end;
}
void cpu_startup_entry(enum cpuhp_state state) void cpu_startup_entry(enum cpuhp_state state)
{ {
/* /*
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/nmi.h> #include <linux/nmi.h>
#include <linux/cpu.h>
#ifdef arch_trigger_cpumask_backtrace #ifdef arch_trigger_cpumask_backtrace
/* For reliability, we're prepared to waste bits here. */ /* For reliability, we're prepared to waste bits here. */
...@@ -87,11 +88,16 @@ bool nmi_cpu_backtrace(struct pt_regs *regs) ...@@ -87,11 +88,16 @@ bool nmi_cpu_backtrace(struct pt_regs *regs)
int cpu = smp_processor_id(); int cpu = smp_processor_id();
if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) {
pr_warn("NMI backtrace for cpu %d\n", cpu); if (regs && cpu_in_idle(instruction_pointer(regs))) {
if (regs) pr_warn("NMI backtrace for cpu %d skipped: idling at pc %#lx\n",
show_regs(regs); cpu, instruction_pointer(regs));
else } else {
dump_stack(); pr_warn("NMI backtrace for cpu %d\n", cpu);
if (regs)
show_regs(regs);
else
dump_stack();
}
cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
return true; return true;
} }
......
...@@ -888,7 +888,7 @@ static void check_section(const char *modname, struct elf_info *elf, ...@@ -888,7 +888,7 @@ static void check_section(const char *modname, struct elf_info *elf,
#define DATA_SECTIONS ".data", ".data.rel" #define DATA_SECTIONS ".data", ".data.rel"
#define TEXT_SECTIONS ".text", ".text.unlikely", ".sched.text", \ #define TEXT_SECTIONS ".text", ".text.unlikely", ".sched.text", \
".kprobes.text" ".kprobes.text", ".cpuidle.text"
#define OTHER_TEXT_SECTIONS ".ref.text", ".head.text", ".spinlock.text", \ #define OTHER_TEXT_SECTIONS ".ref.text", ".head.text", ".spinlock.text", \
".fixup", ".entry.text", ".exception.text", ".text.*", \ ".fixup", ".entry.text", ".exception.text", ".text.*", \
".coldtext" ".coldtext"
......
...@@ -365,6 +365,7 @@ is_mcounted_section_name(char const *const txtname) ...@@ -365,6 +365,7 @@ is_mcounted_section_name(char const *const txtname)
strcmp(".irqentry.text", txtname) == 0 || strcmp(".irqentry.text", txtname) == 0 ||
strcmp(".softirqentry.text", txtname) == 0 || strcmp(".softirqentry.text", txtname) == 0 ||
strcmp(".kprobes.text", txtname) == 0 || strcmp(".kprobes.text", txtname) == 0 ||
strcmp(".cpuidle.text", txtname) == 0 ||
strcmp(".text.unlikely", txtname) == 0; strcmp(".text.unlikely", txtname) == 0;
} }
......
...@@ -136,6 +136,7 @@ my %text_sections = ( ...@@ -136,6 +136,7 @@ my %text_sections = (
".irqentry.text" => 1, ".irqentry.text" => 1,
".softirqentry.text" => 1, ".softirqentry.text" => 1,
".kprobes.text" => 1, ".kprobes.text" => 1,
".cpuidle.text" => 1,
".text.unlikely" => 1, ".text.unlikely" => 1,
); );
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册