diff options
author | Anton Altaparmakov <aia21@cantab.net> | 2005-06-25 14:27:27 +0100 |
---|---|---|
committer | Anton Altaparmakov <aia21@cantab.net> | 2005-06-25 14:27:27 +0100 |
commit | 38b22b6e9f46ab8f73ef5734f0e0a000766a9258 (patch) | |
tree | 2ccc41ef55918d3af43e444bde7648562a031559 /kernel/kprobes.c | |
parent | 3357d4c75f1fb67e7304998c4ad4e9a9fed66fa4 (diff) | |
parent | b3e112bcc19abd8e9657dca34a87316786e096f3 (diff) |
Automerge with /usr/src/ntfs-2.6.git.
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r-- | kernel/kprobes.c | 288 |
1 files changed, 266 insertions, 22 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 037142b72a4..334f37472c5 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -27,6 +27,9 @@ * interface to access function arguments. * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes * exceptions notifier to be first on the priority list. + * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston + * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi + * <prasanna@in.ibm.com> added function-return probes. */ #include <linux/kprobes.h> #include <linux/spinlock.h> @@ -41,6 +44,7 @@ #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS) static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE]; +static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE]; unsigned int kprobe_cpu = NR_CPUS; static DEFINE_SPINLOCK(kprobe_lock); @@ -78,22 +82,23 @@ struct kprobe *get_kprobe(void *addr) * Aggregate handlers for multiple kprobes support - these handlers * take care of invoking the individual kprobe handlers on p->list */ -int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) +static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs) { struct kprobe *kp; list_for_each_entry(kp, &p->list, list) { if (kp->pre_handler) { curr_kprobe = kp; - kp->pre_handler(kp, regs); - curr_kprobe = NULL; + if (kp->pre_handler(kp, regs)) + return 1; } + curr_kprobe = NULL; } return 0; } -void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, - unsigned long flags) +static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, + unsigned long flags) { struct kprobe *kp; @@ -107,7 +112,8 @@ void aggr_post_handler(struct kprobe *p, struct pt_regs *regs, return; } -int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr) +static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, + int trapnr) { /* * if we faulted "during" the execution of a user specified @@ -120,19 +126,191 @@ int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs, int trapnr) return 0; } +static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs) +{ + struct kprobe *kp = curr_kprobe; + if (curr_kprobe && kp->break_handler) { + if (kp->break_handler(kp, regs)) { + curr_kprobe = NULL; + return 1; + } + } + curr_kprobe = NULL; + return 0; +} + +struct kprobe trampoline_p = { + .addr = (kprobe_opcode_t *) &kretprobe_trampoline, + .pre_handler = trampoline_probe_handler, + .post_handler = trampoline_post_handler +}; + +struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp) +{ + struct hlist_node *node; + struct kretprobe_instance *ri; + hlist_for_each_entry(ri, node, &rp->free_instances, uflist) + return ri; + return NULL; +} + +static struct kretprobe_instance *get_used_rp_inst(struct kretprobe *rp) +{ + struct hlist_node *node; + struct kretprobe_instance *ri; + hlist_for_each_entry(ri, node, &rp->used_instances, uflist) + return ri; + return NULL; +} + +struct kretprobe_instance *get_rp_inst(void *sara) +{ + struct hlist_head *head; + struct hlist_node *node; + struct task_struct *tsk; + struct kretprobe_instance *ri; + + tsk = arch_get_kprobe_task(sara); + head = &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]; + hlist_for_each_entry(ri, node, head, hlist) { + if (ri->stack_addr == sara) + return ri; + } + return NULL; +} + +void add_rp_inst(struct kretprobe_instance *ri) +{ + struct task_struct *tsk; + /* + * Remove rp inst off the free list - + * Add it back when probed function returns + */ + hlist_del(&ri->uflist); + tsk = arch_get_kprobe_task(ri->stack_addr); + /* Add rp inst onto table */ + INIT_HLIST_NODE(&ri->hlist); + hlist_add_head(&ri->hlist, + &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]); + + /* Also add this rp inst to the used list. */ + INIT_HLIST_NODE(&ri->uflist); + hlist_add_head(&ri->uflist, &ri->rp->used_instances); +} + +void recycle_rp_inst(struct kretprobe_instance *ri) +{ + /* remove rp inst off the rprobe_inst_table */ + hlist_del(&ri->hlist); + if (ri->rp) { + /* remove rp inst off the used list */ + hlist_del(&ri->uflist); + /* put rp inst back onto the free list */ + INIT_HLIST_NODE(&ri->uflist); + hlist_add_head(&ri->uflist, &ri->rp->free_instances); + } else + /* Unregistering */ + kfree(ri); +} + +struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk) +{ + return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]; +} + +struct kretprobe_instance *get_rp_inst_tsk(struct task_struct *tk) +{ + struct task_struct *tsk; + struct hlist_head *head; + struct hlist_node *node; + struct kretprobe_instance *ri; + + head = &kretprobe_inst_table[hash_ptr(tk, KPROBE_HASH_BITS)]; + + hlist_for_each_entry(ri, node, head, hlist) { + tsk = arch_get_kprobe_task(ri->stack_addr); + if (tsk == tk) + return ri; + } + return NULL; +} + +/* + * This function is called from do_exit or do_execv when task tk's stack is + * about to be recycled. Recycle any function-return probe instances + * associated with this task. These represent probed functions that have + * been called but may never return. + */ +void kprobe_flush_task(struct task_struct *tk) +{ + unsigned long flags = 0; + spin_lock_irqsave(&kprobe_lock, flags); + arch_kprobe_flush_task(tk); + spin_unlock_irqrestore(&kprobe_lock, flags); +} + +/* + * This kprobe pre_handler is registered with every kretprobe. When probe + * hits it will set up the return probe. + */ +static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs) +{ + struct kretprobe *rp = container_of(p, struct kretprobe, kp); + + /*TODO: consider to only swap the RA after the last pre_handler fired */ + arch_prepare_kretprobe(rp, regs); + return 0; +} + +static inline void free_rp_inst(struct kretprobe *rp) +{ + struct kretprobe_instance *ri; + while ((ri = get_free_rp_inst(rp)) != NULL) { + hlist_del(&ri->uflist); + kfree(ri); + } +} + +/* + * Keep all fields in the kprobe consistent + */ +static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p) +{ + memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t)); + memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn)); +} + +/* +* Add the new probe to old_p->list. Fail if this is the +* second jprobe at the address - two jprobes can't coexist +*/ +static int add_new_kprobe(struct kprobe *old_p, struct kprobe *p) +{ + struct kprobe *kp; + + if (p->break_handler) { + list_for_each_entry(kp, &old_p->list, list) { + if (kp->break_handler) + return -EEXIST; + } + list_add_tail(&p->list, &old_p->list); + } else + list_add(&p->list, &old_p->list); + return 0; +} + /* * Fill in the required fields of the "manager kprobe". Replace the * earlier kprobe in the hlist with the manager kprobe */ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p) { + copy_kprobe(p, ap); ap->addr = p->addr; - ap->opcode = p->opcode; - memcpy(&ap->ainsn, &p->ainsn, sizeof(struct arch_specific_insn)); - ap->pre_handler = aggr_pre_handler; ap->post_handler = aggr_post_handler; ap->fault_handler = aggr_fault_handler; + ap->break_handler = aggr_break_handler; INIT_LIST_HEAD(&ap->list); list_add(&p->list, &ap->list); @@ -153,16 +331,16 @@ static int register_aggr_kprobe(struct kprobe *old_p, struct kprobe *p) int ret = 0; struct kprobe *ap; - if (old_p->break_handler || p->break_handler) { - ret = -EEXIST; /* kprobe and jprobe can't (yet) coexist */ - } else if (old_p->pre_handler == aggr_pre_handler) { - list_add(&p->list, &old_p->list); + if (old_p->pre_handler == aggr_pre_handler) { + copy_kprobe(old_p, p); + ret = add_new_kprobe(old_p, p); } else { ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC); if (!ap) return -ENOMEM; add_aggr_kprobe(ap, old_p); - list_add(&p->list, &ap->list); + copy_kprobe(ap, p); + ret = add_new_kprobe(ap, p); } return ret; } @@ -170,10 +348,8 @@ static int register_aggr_kprobe(struct kprobe *old_p, struct kprobe *p) /* kprobe removal house-keeping routines */ static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags) { - *p->addr = p->opcode; + arch_disarm_kprobe(p); hlist_del(&p->hlist); - flush_icache_range((unsigned long) p->addr, - (unsigned long) p->addr + sizeof(kprobe_opcode_t)); spin_unlock_irqrestore(&kprobe_lock, flags); arch_remove_kprobe(p); } @@ -200,6 +376,7 @@ int register_kprobe(struct kprobe *p) } spin_lock_irqsave(&kprobe_lock, flags); old_p = get_kprobe(p->addr); + p->nmissed = 0; if (old_p) { ret = register_aggr_kprobe(old_p, p); goto out; @@ -210,10 +387,8 @@ int register_kprobe(struct kprobe *p) hlist_add_head(&p->hlist, &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); - p->opcode = *p->addr; - *p->addr = BREAKPOINT_INSTRUCTION; - flush_icache_range((unsigned long) p->addr, - (unsigned long) p->addr + sizeof(kprobe_opcode_t)); + arch_arm_kprobe(p); + out: spin_unlock_irqrestore(&kprobe_lock, flags); rm_kprobe: @@ -257,16 +432,82 @@ void unregister_jprobe(struct jprobe *jp) unregister_kprobe(&jp->kp); } +#ifdef ARCH_SUPPORTS_KRETPROBES + +int register_kretprobe(struct kretprobe *rp) +{ + int ret = 0; + struct kretprobe_instance *inst; + int i; + + rp->kp.pre_handler = pre_handler_kretprobe; + + /* Pre-allocate memory for max kretprobe instances */ + if (rp->maxactive <= 0) { +#ifdef CONFIG_PREEMPT + rp->maxactive = max(10, 2 * NR_CPUS); +#else + rp->maxactive = NR_CPUS; +#endif + } + INIT_HLIST_HEAD(&rp->used_instances); + INIT_HLIST_HEAD(&rp->free_instances); + for (i = 0; i < rp->maxactive; i++) { + inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL); + if (inst == NULL) { + free_rp_inst(rp); + return -ENOMEM; + } + INIT_HLIST_NODE(&inst->uflist); + hlist_add_head(&inst->uflist, &rp->free_instances); + } + + rp->nmissed = 0; + /* Establish function entry probe point */ + if ((ret = register_kprobe(&rp->kp)) != 0) + free_rp_inst(rp); + return ret; +} + +#else /* ARCH_SUPPORTS_KRETPROBES */ + +int register_kretprobe(struct kretprobe *rp) +{ + return -ENOSYS; +} + +#endif /* ARCH_SUPPORTS_KRETPROBES */ + +void unregister_kretprobe(struct kretprobe *rp) +{ + unsigned long flags; + struct kretprobe_instance *ri; + + unregister_kprobe(&rp->kp); + /* No race here */ + spin_lock_irqsave(&kprobe_lock, flags); + free_rp_inst(rp); + while ((ri = get_used_rp_inst(rp)) != NULL) { + ri->rp = NULL; + hlist_del(&ri->uflist); + } + spin_unlock_irqrestore(&kprobe_lock, flags); +} + static int __init init_kprobes(void) { int i, err = 0; /* FIXME allocate the probe table, currently defined statically */ /* initialize all list heads */ - for (i = 0; i < KPROBE_TABLE_SIZE; i++) + for (i = 0; i < KPROBE_TABLE_SIZE; i++) { INIT_HLIST_HEAD(&kprobe_table[i]); + INIT_HLIST_HEAD(&kretprobe_inst_table[i]); + } err = register_die_notifier(&kprobe_exceptions_nb); + /* Register the trampoline probe for return probe */ + register_kprobe(&trampoline_p); return err; } @@ -277,3 +518,6 @@ EXPORT_SYMBOL_GPL(unregister_kprobe); EXPORT_SYMBOL_GPL(register_jprobe); EXPORT_SYMBOL_GPL(unregister_jprobe); EXPORT_SYMBOL_GPL(jprobe_return); +EXPORT_SYMBOL_GPL(register_kretprobe); +EXPORT_SYMBOL_GPL(unregister_kretprobe); + |