提交 ded49c55 编写于 作者: A Anton Arapov 提交者: Oleg Nesterov

uretprobes: Limit the depth of return probe nestedness

Unlike the kretprobes we can't trust userspace, thus must have
protection from user space attacks. User-space have  "unlimited"
stack, and this patch limits the return probes nestedness as a
simple remedy for it.

Note that this implementation leaks return_instance on siglongjmp
until exit()/exec().

The intention is to have KISS and bare minimum solution for the
initial implementation in order to not complicate the uretprobes
code.

In the future we may come up with more sophisticated solution that
remove this depth limitation. It is not easy task and lays beyond
this patchset.
Signed-off-by: NAnton Arapov <anton@redhat.com>
Acked-by: NSrikar Dronamraju <srikar@linux.vnet.ibm.com>
Signed-off-by: NOleg Nesterov <oleg@redhat.com>
上级 fec8898d
...@@ -38,6 +38,8 @@ struct inode; ...@@ -38,6 +38,8 @@ struct inode;
#define UPROBE_HANDLER_REMOVE 1 #define UPROBE_HANDLER_REMOVE 1
#define UPROBE_HANDLER_MASK 1 #define UPROBE_HANDLER_MASK 1
#define MAX_URETPROBE_DEPTH 64
enum uprobe_filter_ctx { enum uprobe_filter_ctx {
UPROBE_FILTER_REGISTER, UPROBE_FILTER_REGISTER,
UPROBE_FILTER_UNREGISTER, UPROBE_FILTER_UNREGISTER,
...@@ -72,6 +74,7 @@ struct uprobe_task { ...@@ -72,6 +74,7 @@ struct uprobe_task {
struct arch_uprobe_task autask; struct arch_uprobe_task autask;
struct return_instance *return_instances; struct return_instance *return_instances;
unsigned int depth;
struct uprobe *active_uprobe; struct uprobe *active_uprobe;
unsigned long xol_vaddr; unsigned long xol_vaddr;
......
...@@ -1404,6 +1404,13 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) ...@@ -1404,6 +1404,13 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
if (!utask) if (!utask)
return; return;
if (utask->depth >= MAX_URETPROBE_DEPTH) {
printk_ratelimited(KERN_INFO "uprobe: omit uretprobe due to"
" nestedness limit pid/tgid=%d/%d\n",
current->pid, current->tgid);
return;
}
ri = kzalloc(sizeof(struct return_instance), GFP_KERNEL); ri = kzalloc(sizeof(struct return_instance), GFP_KERNEL);
if (!ri) if (!ri)
goto fail; goto fail;
...@@ -1439,6 +1446,8 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs) ...@@ -1439,6 +1446,8 @@ static void prepare_uretprobe(struct uprobe *uprobe, struct pt_regs *regs)
ri->orig_ret_vaddr = orig_ret_vaddr; ri->orig_ret_vaddr = orig_ret_vaddr;
ri->chained = chained; ri->chained = chained;
utask->depth++;
/* add instance to the stack */ /* add instance to the stack */
ri->next = utask->return_instances; ri->next = utask->return_instances;
utask->return_instances = ri; utask->return_instances = ri;
...@@ -1681,6 +1690,8 @@ static bool handle_trampoline(struct pt_regs *regs) ...@@ -1681,6 +1690,8 @@ static bool handle_trampoline(struct pt_regs *regs)
if (!chained) if (!chained)
break; break;
utask->depth--;
BUG_ON(!ri); BUG_ON(!ri);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册