提交 6c9692e2 编写于 作者: P Peter Zijlstra 提交者: Rusty Russell

module: Make the mod_tree stuff conditional on PERF_EVENTS || TRACING

Andrew worried about the overhead on small systems; only use the fancy
code when either perf or tracing is enabled.

Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Steven Rostedt <rostedt@goodmis.org>
Requested-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: NRusty Russell <rusty@rustcorp.com.au>
上级 93c2e105
...@@ -282,7 +282,7 @@ struct module { ...@@ -282,7 +282,7 @@ struct module {
* *
* Cacheline align here, such that: * Cacheline align here, such that:
* module_init, module_core, init_size, core_size, * module_init, module_core, init_size, core_size,
* init_text_size, core_text_size and ltn_core.node[0] * init_text_size, core_text_size and mtn_core::{mod,node[0]}
* are on the same cacheline. * are on the same cacheline.
*/ */
void *module_init ____cacheline_aligned; void *module_init ____cacheline_aligned;
...@@ -296,6 +296,7 @@ struct module { ...@@ -296,6 +296,7 @@ struct module {
/* The size of the executable code in each section. */ /* The size of the executable code in each section. */
unsigned int init_text_size, core_text_size; unsigned int init_text_size, core_text_size;
#ifdef CONFIG_MODULES_TREE_LOOKUP
/* /*
* We want mtn_core::{mod,node[0]} to be in the same cacheline as the * We want mtn_core::{mod,node[0]} to be in the same cacheline as the
* above entries such that a regular lookup will only touch one * above entries such that a regular lookup will only touch one
...@@ -303,6 +304,7 @@ struct module { ...@@ -303,6 +304,7 @@ struct module {
*/ */
struct mod_tree_node mtn_core; struct mod_tree_node mtn_core;
struct mod_tree_node mtn_init; struct mod_tree_node mtn_init;
#endif
/* Size of RO sections of the module (text+rodata) */ /* Size of RO sections of the module (text+rodata) */
unsigned int init_ro_size, core_ro_size; unsigned int init_ro_size, core_ro_size;
......
...@@ -1989,6 +1989,10 @@ endchoice ...@@ -1989,6 +1989,10 @@ endchoice
endif # MODULES endif # MODULES
config MODULES_TREE_LOOKUP
def_bool y
depends on PERF_EVENTS || TRACING
config INIT_ALL_POSSIBLE config INIT_ALL_POSSIBLE
bool bool
help help
......
...@@ -102,6 +102,8 @@ DEFINE_MUTEX(module_mutex); ...@@ -102,6 +102,8 @@ DEFINE_MUTEX(module_mutex);
EXPORT_SYMBOL_GPL(module_mutex); EXPORT_SYMBOL_GPL(module_mutex);
static LIST_HEAD(modules); static LIST_HEAD(modules);
#ifdef CONFIG_MODULES_TREE_LOOKUP
/* /*
* Use a latched RB-tree for __module_address(); this allows us to use * Use a latched RB-tree for __module_address(); this allows us to use
* RCU-sched lookups of the address from any context. * RCU-sched lookups of the address from any context.
...@@ -112,6 +114,10 @@ static LIST_HEAD(modules); ...@@ -112,6 +114,10 @@ static LIST_HEAD(modules);
* *
* Because init ranges are short lived we mark them unlikely and have placed * Because init ranges are short lived we mark them unlikely and have placed
* them outside the critical cacheline in struct module. * them outside the critical cacheline in struct module.
*
* This is conditional on PERF_EVENTS || TRACING because those can really hit
* __module_address() hard by doing a lot of stack unwinding; potentially from
* NMI context.
*/ */
static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n) static __always_inline unsigned long __mod_tree_val(struct latch_tree_node *n)
...@@ -192,7 +198,7 @@ static void mod_tree_remove(struct module *mod) ...@@ -192,7 +198,7 @@ static void mod_tree_remove(struct module *mod)
mod_tree_remove_init(mod); mod_tree_remove_init(mod);
} }
static struct module *mod_tree_find(unsigned long addr) static struct module *mod_find(unsigned long addr)
{ {
struct latch_tree_node *ltn; struct latch_tree_node *ltn;
...@@ -203,6 +209,26 @@ static struct module *mod_tree_find(unsigned long addr) ...@@ -203,6 +209,26 @@ static struct module *mod_tree_find(unsigned long addr)
return container_of(ltn, struct mod_tree_node, node)->mod; return container_of(ltn, struct mod_tree_node, node)->mod;
} }
#else /* MODULES_TREE_LOOKUP */
static void mod_tree_insert(struct module *mod) { }
static void mod_tree_remove_init(struct module *mod) { }
static void mod_tree_remove(struct module *mod) { }
static struct module *mod_find(unsigned long addr)
{
struct module *mod;
list_for_each_entry_rcu(mod, &modules, list) {
if (within_module(addr, mod))
return mod;
}
return NULL;
}
#endif /* MODULES_TREE_LOOKUP */
#ifdef CONFIG_KGDB_KDB #ifdef CONFIG_KGDB_KDB
struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */ struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
#endif /* CONFIG_KGDB_KDB */ #endif /* CONFIG_KGDB_KDB */
...@@ -3966,7 +3992,7 @@ struct module *__module_address(unsigned long addr) ...@@ -3966,7 +3992,7 @@ struct module *__module_address(unsigned long addr)
module_assert_mutex_or_preempt(); module_assert_mutex_or_preempt();
mod = mod_tree_find(addr); mod = mod_find(addr);
if (mod) { if (mod) {
BUG_ON(!within_module(addr, mod)); BUG_ON(!within_module(addr, mod));
if (mod->state == MODULE_STATE_UNFORMED) if (mod->state == MODULE_STATE_UNFORMED)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册