提交 74a0b576 编写于 作者: C Christoph Hellwig 提交者: Linus Torvalds

x86: optimize page faults like all other achitectures and kill notifier cruft

x86(-64) are the last architectures still using the page fault notifier
cruft for the kprobes page fault hook.  This patch converts them to the
proper direct calls, and removes the now unused pagefault notifier bits
aswell as the cruft in kprobes.c that was related to this mess.

I know Andi didn't really like this, but all other architecture maintainers
agreed the direct calls are much better and besides the obvious cruft
removal a common way of dealing with kprobes across architectures is
important aswell.

[akpm@linux-foundation.org: build fix]
[akpm@linux-foundation.org: fix sparc64]
Signed-off-by: NChristoph Hellwig <hch@lst.de>
Cc: Andi Kleen <ak@suse.de>
Cc: <linux-arch@vger.kernel.org>
Cc: Prasanna S Panchamukhi <prasanna@in.ibm.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 d5a7430d
......@@ -584,7 +584,7 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs)
return 1;
}
static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
{
struct kprobe *cur = kprobe_running();
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
......@@ -666,7 +666,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
ret = NOTIFY_STOP;
break;
case DIE_GPF:
case DIE_PAGE_FAULT:
/* kprobe_running() needs smp_processor_id() */
preempt_disable();
if (kprobe_running() &&
......
......@@ -657,7 +657,6 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
ret = NOTIFY_STOP;
break;
case DIE_GPF:
case DIE_PAGE_FAULT:
/* kprobe_running() needs smp_processor_id() */
preempt_disable();
if (kprobe_running() &&
......
......@@ -25,6 +25,7 @@
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/kdebug.h>
#include <linux/kprobes.h>
#include <asm/system.h>
#include <asm/desc.h>
......@@ -32,33 +33,27 @@
extern void die(const char *,struct pt_regs *,long);
static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
int register_page_fault_notifier(struct notifier_block *nb)
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs)
{
vmalloc_sync_all();
return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
}
EXPORT_SYMBOL_GPL(register_page_fault_notifier);
int ret = 0;
/* kprobe_running() needs smp_processor_id() */
if (!user_mode_vm(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 14))
ret = 1;
preempt_enable();
}
int unregister_page_fault_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
return ret;
}
EXPORT_SYMBOL_GPL(unregister_page_fault_notifier);
static inline int notify_page_fault(struct pt_regs *regs, long err)
#else
static inline int notify_page_fault(struct pt_regs *regs)
{
struct die_args args = {
.regs = regs,
.str = "page fault",
.err = err,
.trapnr = 14,
.signr = SIGSEGV
};
return atomic_notifier_call_chain(&notify_page_fault_chain,
DIE_PAGE_FAULT, &args);
return 0;
}
#endif
/*
* Return EIP plus the CS segment base. The segment limit is also
......@@ -331,7 +326,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
if (unlikely(address >= TASK_SIZE)) {
if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
return;
if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
if (notify_page_fault(regs))
return;
/*
* Don't take the mm semaphore here. If we fixup a prefetch
......@@ -340,7 +335,7 @@ fastcall void __kprobes do_page_fault(struct pt_regs *regs,
goto bad_area_nosemaphore;
}
if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
if (notify_page_fault(regs))
return;
/* It's safe to allow irq's after cr2 has been saved and the vmalloc
......
......@@ -25,6 +25,7 @@
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/kdebug.h>
#include <linux/kprobes.h>
#include <asm/system.h>
#include <asm/pgalloc.h>
......@@ -40,34 +41,27 @@
#define PF_RSVD (1<<3)
#define PF_INSTR (1<<4)
static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
/* Hook to register for page fault notifications */
int register_page_fault_notifier(struct notifier_block *nb)
#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs)
{
vmalloc_sync_all();
return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
}
EXPORT_SYMBOL_GPL(register_page_fault_notifier);
int ret = 0;
/* kprobe_running() needs smp_processor_id() */
if (!user_mode(regs)) {
preempt_disable();
if (kprobe_running() && kprobe_fault_handler(regs, 14))
ret = 1;
preempt_enable();
}
int unregister_page_fault_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
return ret;
}
EXPORT_SYMBOL_GPL(unregister_page_fault_notifier);
static inline int notify_page_fault(struct pt_regs *regs, long err)
#else
static inline int notify_page_fault(struct pt_regs *regs)
{
struct die_args args = {
.regs = regs,
.str = "page fault",
.err = err,
.trapnr = 14,
.signr = SIGSEGV
};
return atomic_notifier_call_chain(&notify_page_fault_chain,
DIE_PAGE_FAULT, &args);
return 0;
}
#endif
/* Sometimes the CPU reports invalid exceptions on prefetch.
Check that here and ignore.
......@@ -345,7 +339,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
if (vmalloc_fault(address) >= 0)
return;
}
if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
if (notify_page_fault(regs))
return;
/*
* Don't take the mm semaphore here. If we fixup a prefetch
......@@ -354,7 +348,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
goto bad_area_nosemaphore;
}
if (notify_page_fault(regs, error_code) == NOTIFY_STOP)
if (notify_page_fault(regs))
return;
if (likely(regs->eflags & X86_EFLAGS_IF))
......
#ifndef __ASM_AVR32_KDEBUG_H
#define __ASM_AVR32_KDEBUG_H
#include <linux/notifier.h>
/* Grossly misnamed. */
enum die_val {
DIE_BREAKPOINT,
DIE_SSTEP,
};
/*
* These are only here because kprobes.c wants them to implement a
* blatant layering violation. Will hopefully go away soon once all
* architectures are updated.
*/
static inline int register_page_fault_notifier(struct notifier_block *nb)
{
return 0;
}
static inline int unregister_page_fault_notifier(struct notifier_block *nb)
{
return 0;
}
#endif /* __ASM_AVR32_KDEBUG_H */
......@@ -17,8 +17,6 @@ typedef u16 kprobe_opcode_t;
#define BREAKPOINT_INSTRUCTION 0xd673 /* breakpoint */
#define MAX_INSN_SIZE 2
#define ARCH_INACTIVE_KPROBE_COUNT 1
#define arch_remove_kprobe(p) do { } while (0)
/* Architecture specific copy of original instruction */
......
......@@ -26,21 +26,6 @@
* 2005-Oct Keith Owens <kaos@sgi.com>. Expand notify_die to cover more
* events.
*/
#include <linux/notifier.h>
/*
* These are only here because kprobes.c wants them to implement a
* blatant layering violation. Will hopefully go away soon once all
* architectures are updated.
*/
static inline int register_page_fault_notifier(struct notifier_block *nb)
{
return 0;
}
static inline int unregister_page_fault_notifier(struct notifier_block *nb)
{
return 0;
}
enum die_val {
DIE_BREAK = 1,
......
......@@ -83,7 +83,6 @@ struct kprobe_ctlblk {
};
#define ARCH_SUPPORTS_KRETPROBES
#define ARCH_INACTIVE_KPROBE_COUNT 1
#define SLOT0_OPCODE_SHIFT (37)
#define SLOT1_p1_OPCODE_SHIFT (37 - (64-46))
......
......@@ -2,25 +2,6 @@
#define _ASM_POWERPC_KDEBUG_H
#ifdef __KERNEL__
/* nearly identical to x86_64/i386 code */
#include <linux/notifier.h>
/*
* These are only here because kprobes.c wants them to implement a
* blatant layering violation. Will hopefully go away soon once all
* architectures are updated.
*/
static inline int register_page_fault_notifier(struct notifier_block *nb)
{
return 0;
}
static inline int unregister_page_fault_notifier(struct notifier_block *nb)
{
return 0;
}
extern struct atomic_notifier_head powerpc_die_chain;
/* Grossly misnamed. */
enum die_val {
DIE_OOPS = 1,
......
......@@ -81,7 +81,6 @@ typedef unsigned int kprobe_opcode_t;
#endif
#define ARCH_SUPPORTS_KRETPROBES
#define ARCH_INACTIVE_KPROBE_COUNT 1
#define flush_insn_slot(p) do { } while (0)
void kretprobe_trampoline(void);
......
......@@ -4,24 +4,9 @@
/*
* Feb 2006 Ported to s390 <grundym@us.ibm.com>
*/
#include <linux/notifier.h>
struct pt_regs;
/*
* These are only here because kprobes.c wants them to implement a
* blatant layering violation. Will hopefully go away soon once all
* architectures are updated.
*/
static inline int register_page_fault_notifier(struct notifier_block *nb)
{
return 0;
}
static inline int unregister_page_fault_notifier(struct notifier_block *nb)
{
return 0;
}
enum die_val {
DIE_OOPS = 1,
DIE_BPT,
......
......@@ -47,7 +47,6 @@ typedef u16 kprobe_opcode_t;
: (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
#define ARCH_SUPPORTS_KRETPROBES
#define ARCH_INACTIVE_KPROBE_COUNT 0
#define KPROBE_SWAP_INST 0x10
......
#ifndef __ASM_SH_KDEBUG_H
#define __ASM_SH_KDEBUG_H
#include <linux/notifier.h>
/* Grossly misnamed. */
enum die_val {
DIE_TRAP,
......
#ifndef _SPARC64_KDEBUG_H
#define _SPARC64_KDEBUG_H
/* Nearly identical to x86_64/i386 code. */
#include <linux/notifier.h>
struct pt_regs;
/*
* These are only here because kprobes.c wants them to implement a
* blatant layering violation. Will hopefully go away soon once all
* architectures are updated.
*/
static inline int register_page_fault_notifier(struct notifier_block *nb)
{
return 0;
}
static inline int unregister_page_fault_notifier(struct notifier_block *nb)
{
return 0;
}
extern void bad_trap(struct pt_regs *, long);
/* Grossly misnamed. */
......
......@@ -11,7 +11,6 @@ typedef u32 kprobe_opcode_t;
#define MAX_INSN_SIZE 2
#define arch_remove_kprobe(p) do {} while (0)
#define ARCH_INACTIVE_KPROBE_COUNT 0
#define flush_insn_slot(p) \
do { flushi(&(p)->ainsn.insn[0]); \
......
......@@ -5,14 +5,9 @@
* Aug-05 2004 Ported by Prasanna S Panchamukhi <prasanna@in.ibm.com>
* from x86_64 architecture.
*/
#include <linux/notifier.h>
struct pt_regs;
extern int register_page_fault_notifier(struct notifier_block *);
extern int unregister_page_fault_notifier(struct notifier_block *);
/* Grossly misnamed. */
enum die_val {
DIE_OOPS = 1,
......@@ -27,7 +22,6 @@ enum die_val {
DIE_GPF,
DIE_CALL,
DIE_NMI_IPI,
DIE_PAGE_FAULT,
};
#endif
#ifndef _X86_64_KDEBUG_H
#define _X86_64_KDEBUG_H 1
#include <linux/notifier.h>
#include <linux/compiler.h>
struct pt_regs;
extern int register_page_fault_notifier(struct notifier_block *);
extern int unregister_page_fault_notifier(struct notifier_block *);
/* Grossly misnamed. */
enum die_val {
DIE_OOPS = 1,
......@@ -22,7 +19,6 @@ enum die_val {
DIE_GPF,
DIE_CALL,
DIE_NMI_IPI,
DIE_PAGE_FAULT,
};
extern void printk_address(unsigned long address);
......
......@@ -43,7 +43,6 @@ typedef u8 kprobe_opcode_t;
: (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
#define ARCH_SUPPORTS_KRETPROBES
#define ARCH_INACTIVE_KPROBE_COUNT 0
#define flush_insn_slot(p) do { } while (0)
void arch_remove_kprobe(struct kprobe *p);
......@@ -89,4 +88,5 @@ static inline void restore_interrupts(struct pt_regs *regs)
extern int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data);
extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
#endif /* _ASM_KPROBES_H */
......@@ -42,7 +42,6 @@ typedef u8 kprobe_opcode_t;
: (((unsigned long)current_thread_info()) + THREAD_SIZE - (ADDR)))
#define ARCH_SUPPORTS_KRETPROBES
#define ARCH_INACTIVE_KPROBE_COUNT 1
void kretprobe_trampoline(void);
extern void arch_remove_kprobe(struct kprobe *p);
......
......@@ -64,7 +64,6 @@
static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
static atomic_t kprobe_count;
/* NOTE: change this value only with kprobe_mutex held */
static bool kprobe_enabled;
......@@ -73,11 +72,6 @@ DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
static struct notifier_block kprobe_page_fault_nb = {
.notifier_call = kprobe_exceptions_notify,
.priority = 0x7fffffff /* we need to notified first */
};
#ifdef __ARCH_WANT_KPROBES_INSN_SLOT
/*
* kprobe->ainsn.insn points to the copy of the instruction to be
......@@ -556,8 +550,6 @@ static int __kprobes __register_kprobe(struct kprobe *p,
old_p = get_kprobe(p->addr);
if (old_p) {
ret = register_aggr_kprobe(old_p, p);
if (!ret)
atomic_inc(&kprobe_count);
goto out;
}
......@@ -569,13 +561,9 @@ static int __kprobes __register_kprobe(struct kprobe *p,
hlist_add_head_rcu(&p->hlist,
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
if (kprobe_enabled) {
if (atomic_add_return(1, &kprobe_count) == \
(ARCH_INACTIVE_KPROBE_COUNT + 1))
register_page_fault_notifier(&kprobe_page_fault_nb);
if (kprobe_enabled)
arch_arm_kprobe(p);
}
out:
mutex_unlock(&kprobe_mutex);
......@@ -658,16 +646,6 @@ void __kprobes unregister_kprobe(struct kprobe *p)
}
mutex_unlock(&kprobe_mutex);
}
/* Call unregister_page_fault_notifier()
* if no probes are active
*/
mutex_lock(&kprobe_mutex);
if (atomic_add_return(-1, &kprobe_count) == \
ARCH_INACTIVE_KPROBE_COUNT)
unregister_page_fault_notifier(&kprobe_page_fault_nb);
mutex_unlock(&kprobe_mutex);
return;
}
static struct notifier_block kprobe_exceptions_nb = {
......@@ -815,7 +793,6 @@ static int __init init_kprobes(void)
INIT_HLIST_HEAD(&kprobe_table[i]);
INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
}
atomic_set(&kprobe_count, 0);
/* By default, kprobes are enabled */
kprobe_enabled = true;
......@@ -921,13 +898,6 @@ static void __kprobes enable_all_kprobes(void)
if (kprobe_enabled)
goto already_enabled;
/*
* Re-register the page fault notifier only if there are any
* active probes at the time of enabling kprobes globally
*/
if (atomic_read(&kprobe_count) > ARCH_INACTIVE_KPROBE_COUNT)
register_page_fault_notifier(&kprobe_page_fault_nb);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist)
......@@ -968,10 +938,7 @@ static void __kprobes disable_all_kprobes(void)
mutex_unlock(&kprobe_mutex);
/* Allow all currently running kprobes to complete */
synchronize_sched();
mutex_lock(&kprobe_mutex);
/* Unconditionally unregister the page_fault notifier */
unregister_page_fault_notifier(&kprobe_page_fault_nb);
return;
already_disabled:
mutex_unlock(&kprobe_mutex);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册