提交 3ac7fe5a 编写于 作者: T Thomas Gleixner 提交者: Linus Torvalds

infrastructure to debug (dynamic) objects

We can see an ever repeating problem pattern with objects of any kind in the
kernel:

1) freeing of active objects
2) reinitialization of active objects

Both problems can be hard to debug because the crash happens at a point where
we have no chance to decode the root cause anymore.  One problem spot are
kernel timers, where the detection of the problem often happens in interrupt
context and usually causes the machine to panic.

While working on a timer related bug report I had to hack specialized code
into the timer subsystem to get a reasonable hint for the root cause.  This
debug hack was fine for temporary use, but far from a mergeable solution due
to the intrusiveness into the timer code.

The code further lacked the ability to detect and report the root cause
instantly and keep the system operational.

Keeping the system operational is important to get hold of the debug
information without special debugging aids like serial consoles and special
knowledge of the bug reporter.

The problems described above are not restricted to timers, but timers tend to
expose it usually in a full system crash.  Other objects are less explosive,
but the symptoms caused by such mistakes can be even harder to debug.

Instead of creating specialized debugging code for the timer subsystem a
generic infrastructure is created which allows developers to verify their code
and provides an easy to enable debug facility for users in case of trouble.

The debugobjects core code keeps track of operations on static and dynamic
objects by inserting them into a hashed list and sanity checking them on
object operations and provides additional checks whenever kernel memory is
freed.

The tracked object operations are:
- initializing an object
- adding an object to a subsystem list
- deleting an object from a subsystem list

Each operation is sanity checked before the operation is executed and the
subsystem specific code can provide a fixup function which allows to prevent
the damage of the operation.  When the sanity check triggers a warning message
and a stack trace is printed.

The list of operations can be extended if the need arises.  For now it's
limited to the requirements of the first user (timers).

The core code enqueues the objects into hash buckets.  The hash index is
generated from the address of the object to simplify the lookup for the check
on kfree/vfree.  Each bucket has it's own spinlock to avoid contention on a
global lock.

The debug code can be compiled in without being active.  The runtime overhead
is minimal and could be optimized by asm alternatives.  A kernel command line
option enables the debugging code.

Thanks to Ingo Molnar for review, suggestions and cleanup patches.
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
Cc: Greg KH <greg@kroah.com>
Cc: Randy Dunlap <randy.dunlap@oracle.com>
Cc: Kay Sievers <kay.sievers@vrfy.org>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 30327acf
......@@ -561,6 +561,8 @@ and is between 256 and 4096 characters. It is defined in the file
1 will print _a lot_ more information - normally
only useful to kernel developers.
debug_objects [KNL] Enable object debugging
decnet.addr= [HW,NET]
Format: <area>[,<node>]
See also Documentation/networking/decnet.txt.
......
#ifndef _LINUX_DEBUGOBJECTS_H
#define _LINUX_DEBUGOBJECTS_H
#include <linux/list.h>
#include <linux/spinlock.h>
enum debug_obj_state {
ODEBUG_STATE_NONE,
ODEBUG_STATE_INIT,
ODEBUG_STATE_INACTIVE,
ODEBUG_STATE_ACTIVE,
ODEBUG_STATE_DESTROYED,
ODEBUG_STATE_NOTAVAILABLE,
ODEBUG_STATE_MAX,
};
struct debug_obj_descr;
/**
* struct debug_obj - representaion of an tracked object
* @node: hlist node to link the object into the tracker list
* @state: tracked object state
* @object: pointer to the real object
* @descr: pointer to an object type specific debug description structure
*/
struct debug_obj {
struct hlist_node node;
enum debug_obj_state state;
void *object;
struct debug_obj_descr *descr;
};
/**
* struct debug_obj_descr - object type specific debug description structure
* @name: name of the object typee
* @fixup_init: fixup function, which is called when the init check
* fails
* @fixup_activate: fixup function, which is called when the activate check
* fails
* @fixup_destroy: fixup function, which is called when the destroy check
* fails
* @fixup_free: fixup function, which is called when the free check
* fails
*/
struct debug_obj_descr {
const char *name;
int (*fixup_init) (void *addr, enum debug_obj_state state);
int (*fixup_activate) (void *addr, enum debug_obj_state state);
int (*fixup_destroy) (void *addr, enum debug_obj_state state);
int (*fixup_free) (void *addr, enum debug_obj_state state);
};
#ifdef CONFIG_DEBUG_OBJECTS
extern void debug_object_init (void *addr, struct debug_obj_descr *descr);
extern void
debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr);
extern void debug_object_activate (void *addr, struct debug_obj_descr *descr);
extern void debug_object_deactivate(void *addr, struct debug_obj_descr *descr);
extern void debug_object_destroy (void *addr, struct debug_obj_descr *descr);
extern void debug_object_free (void *addr, struct debug_obj_descr *descr);
extern void debug_objects_early_init(void);
extern void debug_objects_mem_init(void);
#else
static inline void
debug_object_init (void *addr, struct debug_obj_descr *descr) { }
static inline void
debug_object_init_on_stack(void *addr, struct debug_obj_descr *descr) { }
static inline void
debug_object_activate (void *addr, struct debug_obj_descr *descr) { }
static inline void
debug_object_deactivate(void *addr, struct debug_obj_descr *descr) { }
static inline void
debug_object_destroy (void *addr, struct debug_obj_descr *descr) { }
static inline void
debug_object_free (void *addr, struct debug_obj_descr *descr) { }
static inline void debug_objects_early_init(void) { }
static inline void debug_objects_mem_init(void) { }
#endif
#ifdef CONFIG_DEBUG_OBJECTS_FREE
extern void debug_check_no_obj_freed(const void *address, unsigned long size);
#else
static inline void
debug_check_no_obj_freed(const void *address, unsigned long size) { }
#endif
#endif
......@@ -52,6 +52,7 @@
#include <linux/unwind.h>
#include <linux/buffer_head.h>
#include <linux/debug_locks.h>
#include <linux/debugobjects.h>
#include <linux/lockdep.h>
#include <linux/pid_namespace.h>
#include <linux/device.h>
......@@ -543,6 +544,7 @@ asmlinkage void __init start_kernel(void)
*/
unwind_init();
lockdep_init();
debug_objects_early_init();
cgroup_init_early();
local_irq_disable();
......@@ -638,6 +640,7 @@ asmlinkage void __init start_kernel(void)
enable_debug_pagealloc();
cpu_hotplug_init();
kmem_cache_init();
debug_objects_mem_init();
idr_init_cache();
setup_per_cpu_pageset();
numa_policy_init();
......
......@@ -194,6 +194,29 @@ config TIMER_STATS
(it defaults to deactivated on bootup and will only be activated
if some application like powertop activates it explicitly).
config DEBUG_OBJECTS
bool "Debug object operations"
depends on DEBUG_KERNEL
help
If you say Y here, additional code will be inserted into the
kernel to track the life time of various objects and validate
the operations on those objects.
config DEBUG_OBJECTS_SELFTEST
bool "Debug objects selftest"
depends on DEBUG_OBJECTS
help
This enables the selftest of the object debug code.
config DEBUG_OBJECTS_FREE
bool "Debug objects in freed memory"
depends on DEBUG_OBJECTS
help
This enables checks whether a k/v free operation frees an area
which contains an object which has not been deactivated
properly. This can make kmalloc/kfree-intensive workloads
much slower.
config DEBUG_SLAB
bool "Debug slab memory allocations"
depends on DEBUG_KERNEL && SLAB
......
......@@ -36,6 +36,7 @@ obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
obj-$(CONFIG_PLIST) += plist.o
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
obj-$(CONFIG_DEBUG_LIST) += list_debug.o
obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
lib-y += dec_and_lock.o
......
此差异已折叠。
......@@ -45,6 +45,7 @@
#include <linux/fault-inject.h>
#include <linux/page-isolation.h>
#include <linux/memcontrol.h>
#include <linux/debugobjects.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
......@@ -532,8 +533,11 @@ static void __free_pages_ok(struct page *page, unsigned int order)
if (reserved)
return;
if (!PageHighMem(page))
if (!PageHighMem(page)) {
debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
debug_check_no_obj_freed(page_address(page),
PAGE_SIZE << order);
}
arch_free_page(page, order);
kernel_map_pages(page, 1 << order, 0);
......@@ -995,8 +999,10 @@ static void free_hot_cold_page(struct page *page, int cold)
if (free_pages_check(page))
return;
if (!PageHighMem(page))
if (!PageHighMem(page)) {
debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
}
arch_free_page(page, 0);
kernel_map_pages(page, 1, 0);
......
......@@ -110,6 +110,7 @@
#include <linux/fault-inject.h>
#include <linux/rtmutex.h>
#include <linux/reciprocal_div.h>
#include <linux/debugobjects.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
......@@ -174,12 +175,14 @@
SLAB_CACHE_DMA | \
SLAB_STORE_USER | \
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
SLAB_DEBUG_OBJECTS)
#else
# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
SLAB_CACHE_DMA | \
SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
SLAB_DEBUG_OBJECTS)
#endif
/*
......@@ -3760,6 +3763,8 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
local_irq_save(flags);
debug_check_no_locks_freed(objp, obj_size(cachep));
if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(objp, obj_size(cachep));
__cache_free(cachep, objp);
local_irq_restore(flags);
}
......@@ -3785,6 +3790,7 @@ void kfree(const void *objp)
kfree_debugcheck(objp);
c = virt_to_cache(objp);
debug_check_no_locks_freed(objp, obj_size(c));
debug_check_no_obj_freed(objp, obj_size(c));
__cache_free(c, (void *)objp);
local_irq_restore(flags);
}
......
......@@ -19,6 +19,7 @@
#include <linux/cpuset.h>
#include <linux/mempolicy.h>
#include <linux/ctype.h>
#include <linux/debugobjects.h>
#include <linux/kallsyms.h>
#include <linux/memory.h>
......@@ -1747,6 +1748,8 @@ static __always_inline void slab_free(struct kmem_cache *s,
local_irq_save(flags);
c = get_cpu_slab(s, smp_processor_id());
debug_check_no_locks_freed(object, c->objsize);
if (!(s->flags & SLAB_DEBUG_OBJECTS))
debug_check_no_obj_freed(object, s->objsize);
if (likely(page == c->page && c->node >= 0)) {
object[c->offset] = c->freelist;
c->freelist = object;
......
......@@ -15,6 +15,7 @@
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/seq_file.h>
#include <linux/debugobjects.h>
#include <linux/vmalloc.h>
#include <linux/kallsyms.h>
......@@ -394,6 +395,7 @@ static void __vunmap(const void *addr, int deallocate_pages)
}
debug_check_no_locks_freed(addr, area->size);
debug_check_no_obj_freed(addr, area->size);
if (deallocate_pages) {
int i;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册