提交 8bc76772 编写于 作者: R Rusty Lynch 提交者: Linus Torvalds

[PATCH] Kprobes ia64 cleanup

A cleanup of the ia64 kprobes implementation such that all of the bundle
manipulation logic is concentrated in arch_prepare_kprobe().

With the current design for kprobes, the arch specific code only has a
chance to return failure inside the arch_prepare_kprobe() function.

This patch moves all of the work that was happening in arch_copy_kprobe()
and most of the work that was happening in arch_arm_kprobe() into
arch_prepare_kprobe().  By doing this we can add further robustness checks
in arch_arm_kprobe() and refuse to insert kprobes that will cause problems.
Signed-off-by: NRusty Lynch <Rusty.lynch@intel.com>
Signed-off-by: NAnil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 cd2675bf
...@@ -84,121 +84,97 @@ static enum instruction_type bundle_encoding[32][3] = { ...@@ -84,121 +84,97 @@ static enum instruction_type bundle_encoding[32][3] = {
int arch_prepare_kprobe(struct kprobe *p) int arch_prepare_kprobe(struct kprobe *p)
{ {
unsigned long addr = (unsigned long) p->addr; unsigned long addr = (unsigned long) p->addr;
unsigned long bundle_addr = addr & ~0xFULL; unsigned long *bundle_addr = (unsigned long *)(addr & ~0xFULL);
unsigned long slot = addr & 0xf; unsigned long slot = addr & 0xf;
bundle_t bundle;
unsigned long template; unsigned long template;
unsigned long major_opcode = 0;
unsigned long lx_type_inst = 0;
unsigned long kprobe_inst = 0;
bundle_t *bundle = &p->ainsn.insn.bundle;
/* memcpy(&p->opcode.bundle, bundle_addr, sizeof(bundle_t));
* TODO: Verify that a probe is not being inserted memcpy(&p->ainsn.insn.bundle, bundle_addr, sizeof(bundle_t));
* in sensitive regions of code
* TODO: Verify that the memory holding the probe is rwx
* TODO: verify this is a kernel address
*/
memcpy(&bundle, (unsigned long *)bundle_addr, sizeof(bundle_t));
template = bundle.quad0.template;
if (((bundle_encoding[template][1] == L) && slot > 1) || (slot > 2)) {
printk(KERN_WARNING "Attempting to insert unaligned kprobe at 0x%lx\n", addr);
return -EINVAL;
}
return 0;
}
void arch_copy_kprobe(struct kprobe *p) p->ainsn.inst_flag = 0;
{ p->ainsn.target_br_reg = 0;
unsigned long addr = (unsigned long)p->addr;
unsigned long bundle_addr = addr & ~0xFULL;
memcpy(&p->ainsn.insn.bundle, (unsigned long *)bundle_addr, template = bundle->quad0.template;
sizeof(bundle_t));
memcpy(&p->opcode.bundle, &p->ainsn.insn.bundle, sizeof(bundle_t));
}
void arch_arm_kprobe(struct kprobe *p) if (((bundle_encoding[template][1] == L) && slot > 1) || (slot > 2)) {
{ printk(KERN_WARNING "Attempting to insert unaligned kprobe at 0x%lx\n",
unsigned long addr = (unsigned long)p->addr; addr);
unsigned long arm_addr = addr & ~0xFULL; return -EINVAL;
unsigned long slot = addr & 0xf; }
unsigned long template;
unsigned long major_opcode = 0;
unsigned long lx_type_inst = 0;
unsigned long kprobe_inst = 0;
bundle_t bundle;
p->ainsn.inst_flag = 0;
p->ainsn.target_br_reg = 0;
memcpy(&bundle, &p->ainsn.insn.bundle, sizeof(bundle_t));
template = bundle.quad0.template;
if (slot == 1 && bundle_encoding[template][1] == L) {
lx_type_inst = 1;
slot = 2;
}
if (slot == 1 && bundle_encoding[template][1] == L) {
lx_type_inst = 1;
slot = 2;
}
switch (slot) { switch (slot) {
case 0: case 0:
major_opcode = (bundle.quad0.slot0 >> SLOT0_OPCODE_SHIFT); major_opcode = (bundle->quad0.slot0 >> SLOT0_OPCODE_SHIFT);
kprobe_inst = bundle.quad0.slot0; kprobe_inst = bundle->quad0.slot0;
bundle.quad0.slot0 = BREAK_INST; bundle->quad0.slot0 = BREAK_INST;
break; break;
case 1: case 1:
major_opcode = (bundle.quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT); major_opcode = (bundle->quad1.slot1_p1 >> SLOT1_p1_OPCODE_SHIFT);
kprobe_inst = (bundle.quad0.slot1_p0 | kprobe_inst = (bundle->quad0.slot1_p0 |
(bundle.quad1.slot1_p1 << (64-46))); (bundle->quad1.slot1_p1 << (64-46)));
bundle.quad0.slot1_p0 = BREAK_INST; bundle->quad0.slot1_p0 = BREAK_INST;
bundle.quad1.slot1_p1 = (BREAK_INST >> (64-46)); bundle->quad1.slot1_p1 = (BREAK_INST >> (64-46));
break; break;
case 2: case 2:
major_opcode = (bundle.quad1.slot2 >> SLOT2_OPCODE_SHIFT); major_opcode = (bundle->quad1.slot2 >> SLOT2_OPCODE_SHIFT);
kprobe_inst = bundle.quad1.slot2; kprobe_inst = bundle->quad1.slot2;
bundle.quad1.slot2 = BREAK_INST; bundle->quad1.slot2 = BREAK_INST;
break; break;
} }
/*
* Look for IP relative Branches, IP relative call or /*
* IP relative predicate instructions * Look for IP relative Branches, IP relative call or
*/ * IP relative predicate instructions
if (bundle_encoding[template][slot] == B) { */
switch (major_opcode) { if (bundle_encoding[template][slot] == B) {
case INDIRECT_CALL_OPCODE: switch (major_opcode) {
p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; case INDIRECT_CALL_OPCODE:
p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
break; p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
case IP_RELATIVE_PREDICT_OPCODE: break;
case IP_RELATIVE_BRANCH_OPCODE: case IP_RELATIVE_PREDICT_OPCODE:
p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR; case IP_RELATIVE_BRANCH_OPCODE:
break; p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
case IP_RELATIVE_CALL_OPCODE: break;
p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR; case IP_RELATIVE_CALL_OPCODE:
p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; p->ainsn.inst_flag |= INST_FLAG_FIX_RELATIVE_IP_ADDR;
p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
break; p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
default: break;
/* Do nothing */ default:
break; /* Do nothing */
} break;
} else if (lx_type_inst) { }
switch (major_opcode) { } else if (lx_type_inst) {
case LONG_CALL_OPCODE: switch (major_opcode) {
p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG; case LONG_CALL_OPCODE:
p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7); p->ainsn.inst_flag |= INST_FLAG_FIX_BRANCH_REG;
break; p->ainsn.target_br_reg = ((kprobe_inst >> 6) & 0x7);
default: break;
/* Do nothing */ default:
break; /* Do nothing */
} break;
} }
}
/* Flush icache for the instruction at the emulated address */
flush_icache_range((unsigned long)&p->ainsn.insn.bundle, return 0;
(unsigned long)&p->ainsn.insn.bundle + }
sizeof(bundle_t));
/* void arch_arm_kprobe(struct kprobe *p)
* Patch the original instruction with the probe instruction {
* and flush the instruction cache unsigned long addr = (unsigned long)p->addr;
*/ unsigned long arm_addr = addr & ~0xFULL;
memcpy((char *) arm_addr, (char *) &bundle, sizeof(bundle_t));
memcpy((char *)arm_addr, &p->ainsn.insn.bundle, sizeof(bundle_t));
flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t)); flush_icache_range(arm_addr, arm_addr + sizeof(bundle_t));
} }
...@@ -226,7 +202,7 @@ void arch_remove_kprobe(struct kprobe *p) ...@@ -226,7 +202,7 @@ void arch_remove_kprobe(struct kprobe *p)
*/ */
static void resume_execution(struct kprobe *p, struct pt_regs *regs) static void resume_execution(struct kprobe *p, struct pt_regs *regs)
{ {
unsigned long bundle_addr = ((unsigned long) (&p->ainsn.insn.bundle)) & ~0xFULL; unsigned long bundle_addr = ((unsigned long) (&p->opcode.bundle)) & ~0xFULL;
unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL; unsigned long resume_addr = (unsigned long)p->addr & ~0xFULL;
unsigned long template; unsigned long template;
int slot = ((unsigned long)p->addr & 0xf); int slot = ((unsigned long)p->addr & 0xf);
...@@ -293,7 +269,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs) ...@@ -293,7 +269,7 @@ static void resume_execution(struct kprobe *p, struct pt_regs *regs)
static void prepare_ss(struct kprobe *p, struct pt_regs *regs) static void prepare_ss(struct kprobe *p, struct pt_regs *regs)
{ {
unsigned long bundle_addr = (unsigned long) &p->ainsn.insn.bundle; unsigned long bundle_addr = (unsigned long) &p->opcode.bundle;
unsigned long slot = (unsigned long)p->addr & 0xf; unsigned long slot = (unsigned long)p->addr & 0xf;
/* Update instruction pointer (IIP) and slot number (IPSR.ri) */ /* Update instruction pointer (IIP) and slot number (IPSR.ri) */
......
...@@ -30,6 +30,8 @@ ...@@ -30,6 +30,8 @@
#define BREAK_INST (long)(__IA64_BREAK_KPROBE << 6) #define BREAK_INST (long)(__IA64_BREAK_KPROBE << 6)
struct kprobe;
typedef struct _bundle { typedef struct _bundle {
struct { struct {
unsigned long long template : 5; unsigned long long template : 5;
...@@ -79,6 +81,11 @@ static inline void jprobe_return(void) ...@@ -79,6 +81,11 @@ static inline void jprobe_return(void)
{ {
} }
/* ia64 does not need this */
static inline void arch_copy_kprobe(struct kprobe *p)
{
}
#ifdef CONFIG_KPROBES #ifdef CONFIG_KPROBES
extern int kprobe_exceptions_notify(struct notifier_block *self, extern int kprobe_exceptions_notify(struct notifier_block *self,
unsigned long val, void *data); unsigned long val, void *data);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册