aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/kprobes.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/kprobes.c')
-rw-r--r--kernel/kprobes.c298
1 files changed, 214 insertions, 84 deletions
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 7ba8cd9845cb..a5e74ddee0e2 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -43,6 +43,7 @@
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/kdebug.h>
+#include <linux/memory.h>
#include <asm-generic/sections.h>
#include <asm/cacheflush.h>
@@ -67,7 +68,7 @@ static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
/* NOTE: change this value only with kprobe_mutex held */
-static bool kprobe_enabled;
+static bool kprobes_all_disarmed;
static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
@@ -327,7 +328,7 @@ static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
struct kprobe *kp;
list_for_each_entry_rcu(kp, &p->list, list) {
- if (kp->pre_handler && !kprobe_gone(kp)) {
+ if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
set_kprobe_instance(kp);
if (kp->pre_handler(kp, regs))
return 1;
@@ -343,7 +344,7 @@ static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
struct kprobe *kp;
list_for_each_entry_rcu(kp, &p->list, list) {
- if (kp->post_handler && !kprobe_gone(kp)) {
+ if (kp->post_handler && likely(!kprobe_disabled(kp))) {
set_kprobe_instance(kp);
kp->post_handler(kp, regs, flags);
reset_kprobe_instance();
@@ -517,20 +518,28 @@ static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
}
/*
-* Add the new probe to old_p->list. Fail if this is the
+* Add the new probe to ap->list. Fail if this is the
* second jprobe at the address - two jprobes can't coexist
*/
-static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
+static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
{
+ BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
if (p->break_handler) {
- if (old_p->break_handler)
+ if (ap->break_handler)
return -EEXIST;
- list_add_tail_rcu(&p->list, &old_p->list);
- old_p->break_handler = aggr_break_handler;
+ list_add_tail_rcu(&p->list, &ap->list);
+ ap->break_handler = aggr_break_handler;
} else
- list_add_rcu(&p->list, &old_p->list);
- if (p->post_handler && !old_p->post_handler)
- old_p->post_handler = aggr_post_handler;
+ list_add_rcu(&p->list, &ap->list);
+ if (p->post_handler && !ap->post_handler)
+ ap->post_handler = aggr_post_handler;
+
+ if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
+ ap->flags &= ~KPROBE_FLAG_DISABLED;
+ if (!kprobes_all_disarmed)
+ /* Arm the breakpoint again. */
+ arch_arm_kprobe(ap);
+ }
return 0;
}
@@ -543,6 +552,7 @@ static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
copy_kprobe(p, ap);
flush_insn_slot(ap);
ap->addr = p->addr;
+ ap->flags = p->flags;
ap->pre_handler = aggr_pre_handler;
ap->fault_handler = aggr_fault_handler;
/* We don't care the kprobe which has gone. */
@@ -565,44 +575,59 @@ static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
struct kprobe *p)
{
int ret = 0;
- struct kprobe *ap;
+ struct kprobe *ap = old_p;
- if (kprobe_gone(old_p)) {
+ if (old_p->pre_handler != aggr_pre_handler) {
+ /* If old_p is not an aggr_probe, create new aggr_kprobe. */
+ ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
+ if (!ap)
+ return -ENOMEM;
+ add_aggr_kprobe(ap, old_p);
+ }
+
+ if (kprobe_gone(ap)) {
/*
* Attempting to insert new probe at the same location that
* had a probe in the module vaddr area which already
* freed. So, the instruction slot has already been
* released. We need a new slot for the new probe.
*/
- ret = arch_prepare_kprobe(old_p);
+ ret = arch_prepare_kprobe(ap);
if (ret)
+ /*
+ * Even if fail to allocate new slot, don't need to
+ * free aggr_probe. It will be used next time, or
+ * freed by unregister_kprobe.
+ */
return ret;
- }
- if (old_p->pre_handler == aggr_pre_handler) {
- copy_kprobe(old_p, p);
- ret = add_new_kprobe(old_p, p);
- ap = old_p;
- } else {
- ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
- if (!ap) {
- if (kprobe_gone(old_p))
- arch_remove_kprobe(old_p);
- return -ENOMEM;
- }
- add_aggr_kprobe(ap, old_p);
- copy_kprobe(ap, p);
- ret = add_new_kprobe(ap, p);
- }
- if (kprobe_gone(old_p)) {
+
/*
- * If the old_p has gone, its breakpoint has been disarmed.
- * We have to arm it again after preparing real kprobes.
+ * Clear gone flag to prevent allocating new slot again, and
+ * set disabled flag because it is not armed yet.
*/
- ap->flags &= ~KPROBE_FLAG_GONE;
- if (kprobe_enabled)
- arch_arm_kprobe(ap);
+ ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
+ | KPROBE_FLAG_DISABLED;
}
- return ret;
+
+ copy_kprobe(ap, p);
+ return add_new_kprobe(ap, p);
+}
+
+/* Try to disable aggr_kprobe, and return 1 if succeeded.*/
+static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p)
+{
+ struct kprobe *kp;
+
+ list_for_each_entry_rcu(kp, &p->list, list) {
+ if (!kprobe_disabled(kp))
+ /*
+ * There is an active probe on the list.
+ * We can't disable aggr_kprobe.
+ */
+ return 0;
+ }
+ p->flags |= KPROBE_FLAG_DISABLED;
+ return 1;
}
static int __kprobes in_kprobes_functions(unsigned long addr)
@@ -663,7 +688,9 @@ int __kprobes register_kprobe(struct kprobe *p)
return -EINVAL;
}
- p->flags = 0;
+ /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
+ p->flags &= KPROBE_FLAG_DISABLED;
+
/*
* Check if are we probing a module.
*/
@@ -699,17 +726,20 @@ int __kprobes register_kprobe(struct kprobe *p)
goto out;
}
+ mutex_lock(&text_mutex);
ret = arch_prepare_kprobe(p);
if (ret)
- goto out;
+ goto out_unlock_text;
INIT_HLIST_NODE(&p->hlist);
hlist_add_head_rcu(&p->hlist,
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
- if (kprobe_enabled)
+ if (!kprobes_all_disarmed && !kprobe_disabled(p))
arch_arm_kprobe(p);
+out_unlock_text:
+ mutex_unlock(&text_mutex);
out:
mutex_unlock(&kprobe_mutex);
@@ -718,26 +748,39 @@ out:
return ret;
}
+EXPORT_SYMBOL_GPL(register_kprobe);
-/*
- * Unregister a kprobe without a scheduler synchronization.
- */
-static int __kprobes __unregister_kprobe_top(struct kprobe *p)
+/* Check passed kprobe is valid and return kprobe in kprobe_table. */
+static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
{
struct kprobe *old_p, *list_p;
old_p = get_kprobe(p->addr);
if (unlikely(!old_p))
- return -EINVAL;
+ return NULL;
if (p != old_p) {
list_for_each_entry_rcu(list_p, &old_p->list, list)
if (list_p == p)
/* kprobe p is a valid probe */
- goto valid_p;
- return -EINVAL;
+ goto valid;
+ return NULL;
}
-valid_p:
+valid:
+ return old_p;
+}
+
+/*
+ * Unregister a kprobe without a scheduler synchronization.
+ */
+static int __kprobes __unregister_kprobe_top(struct kprobe *p)
+{
+ struct kprobe *old_p, *list_p;
+
+ old_p = __get_valid_kprobe(p);
+ if (old_p == NULL)
+ return -EINVAL;
+
if (old_p == p ||
(old_p->pre_handler == aggr_pre_handler &&
list_is_singular(&old_p->list))) {
@@ -746,8 +789,11 @@ valid_p:
* enabled and not gone - otherwise, the breakpoint would
* already have been removed. We save on flushing icache.
*/
- if (kprobe_enabled && !kprobe_gone(old_p))
+ if (!kprobes_all_disarmed && !kprobe_disabled(old_p)) {
+ mutex_lock(&text_mutex);
arch_disarm_kprobe(p);
+ mutex_unlock(&text_mutex);
+ }
hlist_del_rcu(&old_p->hlist);
} else {
if (p->break_handler && !kprobe_gone(p))
@@ -761,6 +807,11 @@ valid_p:
}
noclean:
list_del_rcu(&p->list);
+ if (!kprobe_disabled(old_p)) {
+ try_to_disable_aggr_kprobe(old_p);
+ if (!kprobes_all_disarmed && kprobe_disabled(old_p))
+ arch_disarm_kprobe(old_p);
+ }
}
return 0;
}
@@ -796,11 +847,13 @@ int __kprobes register_kprobes(struct kprobe **kps, int num)
}
return ret;
}
+EXPORT_SYMBOL_GPL(register_kprobes);
void __kprobes unregister_kprobe(struct kprobe *p)
{
unregister_kprobes(&p, 1);
}
+EXPORT_SYMBOL_GPL(unregister_kprobe);
void __kprobes unregister_kprobes(struct kprobe **kps, int num)
{
@@ -819,6 +872,7 @@ void __kprobes unregister_kprobes(struct kprobe **kps, int num)
if (kps[i]->addr)
__unregister_kprobe_bottom(kps[i]);
}
+EXPORT_SYMBOL_GPL(unregister_kprobes);
static struct notifier_block kprobe_exceptions_nb = {
.notifier_call = kprobe_exceptions_notify,
@@ -858,16 +912,19 @@ int __kprobes register_jprobes(struct jprobe **jps, int num)
}
return ret;
}
+EXPORT_SYMBOL_GPL(register_jprobes);
int __kprobes register_jprobe(struct jprobe *jp)
{
return register_jprobes(&jp, 1);
}
+EXPORT_SYMBOL_GPL(register_jprobe);
void __kprobes unregister_jprobe(struct jprobe *jp)
{
unregister_jprobes(&jp, 1);
}
+EXPORT_SYMBOL_GPL(unregister_jprobe);
void __kprobes unregister_jprobes(struct jprobe **jps, int num)
{
@@ -887,6 +944,7 @@ void __kprobes unregister_jprobes(struct jprobe **jps, int num)
__unregister_kprobe_bottom(&jps[i]->kp);
}
}
+EXPORT_SYMBOL_GPL(unregister_jprobes);
#ifdef CONFIG_KRETPROBES
/*
@@ -912,10 +970,8 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
ri->rp = rp;
ri->task = current;
- if (rp->entry_handler && rp->entry_handler(ri, regs)) {
- spin_unlock_irqrestore(&rp->lock, flags);
+ if (rp->entry_handler && rp->entry_handler(ri, regs))
return 0;
- }
arch_prepare_kretprobe(ri, regs);
@@ -982,6 +1038,7 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
free_rp_inst(rp);
return ret;
}
+EXPORT_SYMBOL_GPL(register_kretprobe);
int __kprobes register_kretprobes(struct kretprobe **rps, int num)
{
@@ -999,11 +1056,13 @@ int __kprobes register_kretprobes(struct kretprobe **rps, int num)
}
return ret;
}
+EXPORT_SYMBOL_GPL(register_kretprobes);
void __kprobes unregister_kretprobe(struct kretprobe *rp)
{
unregister_kretprobes(&rp, 1);
}
+EXPORT_SYMBOL_GPL(unregister_kretprobe);
void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
{
@@ -1025,24 +1084,30 @@ void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
}
}
}
+EXPORT_SYMBOL_GPL(unregister_kretprobes);
#else /* CONFIG_KRETPROBES */
int __kprobes register_kretprobe(struct kretprobe *rp)
{
return -ENOSYS;
}
+EXPORT_SYMBOL_GPL(register_kretprobe);
int __kprobes register_kretprobes(struct kretprobe **rps, int num)
{
return -ENOSYS;
}
+EXPORT_SYMBOL_GPL(register_kretprobes);
+
void __kprobes unregister_kretprobe(struct kretprobe *rp)
{
}
+EXPORT_SYMBOL_GPL(unregister_kretprobe);
void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
{
}
+EXPORT_SYMBOL_GPL(unregister_kretprobes);
static int __kprobes pre_handler_kretprobe(struct kprobe *p,
struct pt_regs *regs)
@@ -1056,6 +1121,7 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
static void __kprobes kill_kprobe(struct kprobe *p)
{
struct kprobe *kp;
+
p->flags |= KPROBE_FLAG_GONE;
if (p->pre_handler == aggr_pre_handler) {
/*
@@ -1168,8 +1234,8 @@ static int __init init_kprobes(void)
}
}
- /* By default, kprobes are enabled */
- kprobe_enabled = true;
+ /* By default, kprobes are armed */
+ kprobes_all_disarmed = false;
err = arch_init_kprobes();
if (!err)
@@ -1197,12 +1263,18 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
else
kprobe_type = "k";
if (sym)
- seq_printf(pi, "%p %s %s+0x%x %s %s\n", p->addr, kprobe_type,
- sym, offset, (modname ? modname : " "),
- (kprobe_gone(p) ? "[GONE]" : ""));
+ seq_printf(pi, "%p %s %s+0x%x %s %s%s\n",
+ p->addr, kprobe_type, sym, offset,
+ (modname ? modname : " "),
+ (kprobe_gone(p) ? "[GONE]" : ""),
+ ((kprobe_disabled(p) && !kprobe_gone(p)) ?
+ "[DISABLED]" : ""));
else
- seq_printf(pi, "%p %s %p %s\n", p->addr, kprobe_type, p->addr,
- (kprobe_gone(p) ? "[GONE]" : ""));
+ seq_printf(pi, "%p %s %p %s%s\n",
+ p->addr, kprobe_type, p->addr,
+ (kprobe_gone(p) ? "[GONE]" : ""),
+ ((kprobe_disabled(p) && !kprobe_gone(p)) ?
+ "[DISABLED]" : ""));
}
static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
@@ -1267,7 +1339,72 @@ static struct file_operations debugfs_kprobes_operations = {
.release = seq_release,
};
-static void __kprobes enable_all_kprobes(void)
+/* Disable one kprobe */
+int __kprobes disable_kprobe(struct kprobe *kp)
+{
+ int ret = 0;
+ struct kprobe *p;
+
+ mutex_lock(&kprobe_mutex);
+
+ /* Check whether specified probe is valid. */
+ p = __get_valid_kprobe(kp);
+ if (unlikely(p == NULL)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* If the probe is already disabled (or gone), just return */
+ if (kprobe_disabled(kp))
+ goto out;
+
+ kp->flags |= KPROBE_FLAG_DISABLED;
+ if (p != kp)
+ /* When kp != p, p is always enabled. */
+ try_to_disable_aggr_kprobe(p);
+
+ if (!kprobes_all_disarmed && kprobe_disabled(p))
+ arch_disarm_kprobe(p);
+out:
+ mutex_unlock(&kprobe_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(disable_kprobe);
+
+/* Enable one kprobe */
+int __kprobes enable_kprobe(struct kprobe *kp)
+{
+ int ret = 0;
+ struct kprobe *p;
+
+ mutex_lock(&kprobe_mutex);
+
+ /* Check whether specified probe is valid. */
+ p = __get_valid_kprobe(kp);
+ if (unlikely(p == NULL)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (kprobe_gone(kp)) {
+ /* This kprobe has gone, we couldn't enable it. */
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (!kprobes_all_disarmed && kprobe_disabled(p))
+ arch_arm_kprobe(p);
+
+ p->flags &= ~KPROBE_FLAG_DISABLED;
+ if (p != kp)
+ kp->flags &= ~KPROBE_FLAG_DISABLED;
+out:
+ mutex_unlock(&kprobe_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(enable_kprobe);
+
+static void __kprobes arm_all_kprobes(void)
{
struct hlist_head *head;
struct hlist_node *node;
@@ -1276,18 +1413,20 @@ static void __kprobes enable_all_kprobes(void)
mutex_lock(&kprobe_mutex);
- /* If kprobes are already enabled, just return */
- if (kprobe_enabled)
+ /* If kprobes are armed, just return */
+ if (!kprobes_all_disarmed)
goto already_enabled;
+ mutex_lock(&text_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist)
- if (!kprobe_gone(p))
+ if (!kprobe_disabled(p))
arch_arm_kprobe(p);
}
+ mutex_unlock(&text_mutex);
- kprobe_enabled = true;
+ kprobes_all_disarmed = false;
printk(KERN_INFO "Kprobes globally enabled\n");
already_enabled:
@@ -1295,7 +1434,7 @@ already_enabled:
return;
}
-static void __kprobes disable_all_kprobes(void)
+static void __kprobes disarm_all_kprobes(void)
{
struct hlist_head *head;
struct hlist_node *node;
@@ -1304,20 +1443,22 @@ static void __kprobes disable_all_kprobes(void)
mutex_lock(&kprobe_mutex);
- /* If kprobes are already disabled, just return */
- if (!kprobe_enabled)
+ /* If kprobes are already disarmed, just return */
+ if (kprobes_all_disarmed)
goto already_disabled;
- kprobe_enabled = false;
+ kprobes_all_disarmed = true;
printk(KERN_INFO "Kprobes globally disabled\n");
+ mutex_lock(&text_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist) {
- if (!arch_trampoline_kprobe(p) && !kprobe_gone(p))
+ if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
arch_disarm_kprobe(p);
}
}
+ mutex_unlock(&text_mutex);
mutex_unlock(&kprobe_mutex);
/* Allow all currently running kprobes to complete */
synchronize_sched();
@@ -1338,7 +1479,7 @@ static ssize_t read_enabled_file_bool(struct file *file,
{
char buf[3];
- if (kprobe_enabled)
+ if (!kprobes_all_disarmed)
buf[0] = '1';
else
buf[0] = '0';
@@ -1361,12 +1502,12 @@ static ssize_t write_enabled_file_bool(struct file *file,
case 'y':
case 'Y':
case '1':
- enable_all_kprobes();
+ arm_all_kprobes();
break;
case 'n':
case 'N':
case '0':
- disable_all_kprobes();
+ disarm_all_kprobes();
break;
}
@@ -1409,16 +1550,5 @@ late_initcall(debugfs_kprobe_init);
module_init(init_kprobes);
-EXPORT_SYMBOL_GPL(register_kprobe);
-EXPORT_SYMBOL_GPL(unregister_kprobe);
-EXPORT_SYMBOL_GPL(register_kprobes);
-EXPORT_SYMBOL_GPL(unregister_kprobes);
-EXPORT_SYMBOL_GPL(register_jprobe);
-EXPORT_SYMBOL_GPL(unregister_jprobe);
-EXPORT_SYMBOL_GPL(register_jprobes);
-EXPORT_SYMBOL_GPL(unregister_jprobes);
+/* defined in arch/.../kernel/kprobes.c */
EXPORT_SYMBOL_GPL(jprobe_return);
-EXPORT_SYMBOL_GPL(register_kretprobe);
-EXPORT_SYMBOL_GPL(unregister_kretprobe);
-EXPORT_SYMBOL_GPL(register_kretprobes);
-EXPORT_SYMBOL_GPL(unregister_kretprobes);