提交 441f1eb7 编写于 作者: O Oleg Nesterov

uprobes: Kill uprobe_events, use RB_EMPTY_ROOT() instead

uprobe_events counts the number of uprobes in uprobes_tree but
it is used as a boolean. We can use RB_EMPTY_ROOT() instead.

Probably no_uprobe_events() added by this patch can have more
callers, say, mmf_recalc_uprobes().
Signed-off-by: NOleg Nesterov <oleg@redhat.com>
Acked-by: NAnton Arapov <anton@redhat.com>
Acked-by: NSrikar Dronamraju <srikar@linux.vnet.ibm.com>
上级 d4d3ccc6
...@@ -41,6 +41,11 @@ ...@@ -41,6 +41,11 @@
#define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
static struct rb_root uprobes_tree = RB_ROOT; static struct rb_root uprobes_tree = RB_ROOT;
/*
* allows us to skip the uprobe_mmap if there are no uprobe events active
* at this time. Probably a fine grained per inode count is better?
*/
#define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree)
static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */ static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */
...@@ -74,13 +79,6 @@ static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ]; ...@@ -74,13 +79,6 @@ static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
static struct percpu_rw_semaphore dup_mmap_sem; static struct percpu_rw_semaphore dup_mmap_sem;
/*
* uprobe_events allows us to skip the uprobe_mmap if there are no uprobe
* events active at this time. Probably a fine grained per inode count is
* better?
*/
static atomic_t uprobe_events = ATOMIC_INIT(0);
/* Have a copy of original instruction */ /* Have a copy of original instruction */
#define UPROBE_COPY_INSN 0 #define UPROBE_COPY_INSN 0
/* Can skip singlestep */ /* Can skip singlestep */
...@@ -460,8 +458,6 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset) ...@@ -460,8 +458,6 @@ static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
kfree(uprobe); kfree(uprobe);
uprobe = cur_uprobe; uprobe = cur_uprobe;
iput(inode); iput(inode);
} else {
atomic_inc(&uprobe_events);
} }
return uprobe; return uprobe;
...@@ -685,7 +681,6 @@ static void delete_uprobe(struct uprobe *uprobe) ...@@ -685,7 +681,6 @@ static void delete_uprobe(struct uprobe *uprobe)
spin_unlock(&uprobes_treelock); spin_unlock(&uprobes_treelock);
iput(uprobe->inode); iput(uprobe->inode);
put_uprobe(uprobe); put_uprobe(uprobe);
atomic_dec(&uprobe_events);
} }
struct map_info { struct map_info {
...@@ -975,7 +970,7 @@ int uprobe_mmap(struct vm_area_struct *vma) ...@@ -975,7 +970,7 @@ int uprobe_mmap(struct vm_area_struct *vma)
struct uprobe *uprobe, *u; struct uprobe *uprobe, *u;
struct inode *inode; struct inode *inode;
if (!atomic_read(&uprobe_events) || !valid_vma(vma, true)) if (no_uprobe_events() || !valid_vma(vma, true))
return 0; return 0;
inode = vma->vm_file->f_mapping->host; inode = vma->vm_file->f_mapping->host;
...@@ -1021,7 +1016,7 @@ vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long e ...@@ -1021,7 +1016,7 @@ vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long e
*/ */
void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end) void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{ {
if (!atomic_read(&uprobe_events) || !valid_vma(vma, false)) if (no_uprobe_events() || !valid_vma(vma, false))
return; return;
if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */ if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册