aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2008-05-12 21:20:48 +0200
committerThomas Gleixner <tglx@linutronix.de>2008-05-23 20:54:16 +0200
commit4eebcc81a33fbc45e28542b50197ed7b3c486d90 (patch)
tree13bbad50aa8d4dc36d630ef08886876f4dc0b6eb /kernel/trace/ftrace.c
parentftrace - fix dynamic ftrace memory leak (diff)
downloadlinux-dev-4eebcc81a33fbc45e28542b50197ed7b3c486d90.tar.xz
linux-dev-4eebcc81a33fbc45e28542b50197ed7b3c486d90.zip
ftrace: disable tracing on failure
Since ftrace touches practically every function. If we detect any anomaly, we want to fully disable ftrace. This patch adds code to try shutdown ftrace as much as possible without doing any more harm is something is detected not quite correct. This only kills ftrace, this patch does have checks for other parts of the tracer (irqsoff, wakeup, etc.). Signed-off-by: Steven Rostedt <srostedt@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c112
1 files changed, 103 insertions, 9 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 8e02aa690b2b..ff42345dd78e 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -29,9 +29,16 @@
#include "trace.h"
-int ftrace_enabled;
+/* ftrace_enabled is a method to turn ftrace on or off */
+int ftrace_enabled __read_mostly;
static int last_ftrace_enabled;
+/*
+ * ftrace_disabled is set when an anomaly is discovered.
+ * ftrace_disabled is much stronger than ftrace_enabled.
+ */
+static int ftrace_disabled __read_mostly;
+
static DEFINE_SPINLOCK(ftrace_lock);
static DEFINE_MUTEX(ftrace_sysctl_lock);
@@ -230,10 +237,11 @@ static notrace struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
if (ftrace_free_records) {
rec = ftrace_free_records;
- /* todo, disable tracing altogether on this warning */
if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
WARN_ON_ONCE(1);
ftrace_free_records = NULL;
+ ftrace_disabled = 1;
+ ftrace_enabled = 0;
return NULL;
}
@@ -260,7 +268,7 @@ ftrace_record_ip(unsigned long ip)
int resched;
int atomic;
- if (!ftrace_enabled)
+ if (!ftrace_enabled || ftrace_disabled)
return;
resched = need_resched();
@@ -485,6 +493,9 @@ static void notrace ftrace_startup(void)
{
int command = 0;
+ if (unlikely(ftrace_disabled))
+ return;
+
mutex_lock(&ftraced_lock);
ftraced_suspend++;
if (ftraced_suspend == 1)
@@ -507,6 +518,9 @@ static void notrace ftrace_shutdown(void)
{
int command = 0;
+ if (unlikely(ftrace_disabled))
+ return;
+
mutex_lock(&ftraced_lock);
ftraced_suspend--;
if (!ftraced_suspend)
@@ -529,6 +543,9 @@ static void notrace ftrace_startup_sysctl(void)
{
int command = FTRACE_ENABLE_MCOUNT;
+ if (unlikely(ftrace_disabled))
+ return;
+
mutex_lock(&ftraced_lock);
/* Force update next time */
saved_ftrace_func = NULL;
@@ -544,6 +561,9 @@ static void notrace ftrace_shutdown_sysctl(void)
{
int command = FTRACE_DISABLE_MCOUNT;
+ if (unlikely(ftrace_disabled))
+ return;
+
mutex_lock(&ftraced_lock);
/* ftraced_suspend is true if ftrace is running */
if (ftraced_suspend)
@@ -600,6 +620,9 @@ static int notrace __ftrace_update_code(void *ignore)
static void notrace ftrace_update_code(void)
{
+ if (unlikely(ftrace_disabled))
+ return;
+
stop_machine_run(__ftrace_update_code, NULL, NR_CPUS);
}
@@ -614,6 +637,9 @@ static int notrace ftraced(void *ignore)
/* check once a second */
schedule_timeout(HZ);
+ if (unlikely(ftrace_disabled))
+ continue;
+
mutex_lock(&ftrace_sysctl_lock);
mutex_lock(&ftraced_lock);
if (ftrace_enabled && ftraced_trigger && !ftraced_suspend) {
@@ -628,6 +654,7 @@ static int notrace ftraced(void *ignore)
ftrace_update_cnt != 1 ? "s" : "",
ftrace_update_tot_cnt,
usecs, usecs != 1 ? "s" : "");
+ ftrace_disabled = 1;
WARN_ON_ONCE(1);
}
ftraced_trigger = 0;
@@ -785,6 +812,9 @@ ftrace_avail_open(struct inode *inode, struct file *file)
struct ftrace_iterator *iter;
int ret;
+ if (unlikely(ftrace_disabled))
+ return -ENODEV;
+
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
return -ENOMEM;
@@ -843,6 +873,9 @@ ftrace_filter_open(struct inode *inode, struct file *file)
struct ftrace_iterator *iter;
int ret = 0;
+ if (unlikely(ftrace_disabled))
+ return -ENODEV;
+
iter = kzalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
return -ENOMEM;
@@ -1063,6 +1096,9 @@ ftrace_filter_write(struct file *file, const char __user *ubuf,
*/
notrace void ftrace_set_filter(unsigned char *buf, int len, int reset)
{
+ if (unlikely(ftrace_disabled))
+ return;
+
mutex_lock(&ftrace_filter_lock);
if (reset)
ftrace_filter_reset();
@@ -1133,7 +1169,7 @@ int ftrace_force_update(void)
DECLARE_WAITQUEUE(wait, current);
int ret = 0;
- if (!ftraced_task)
+ if (unlikely(ftrace_disabled))
return -ENODEV;
mutex_lock(&ftraced_lock);
@@ -1142,6 +1178,11 @@ int ftrace_force_update(void)
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&ftraced_waiters, &wait);
+ if (unlikely(!ftraced_task)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
do {
mutex_unlock(&ftraced_lock);
wake_up_process(ftraced_task);
@@ -1154,6 +1195,7 @@ int ftrace_force_update(void)
set_current_state(TASK_INTERRUPTIBLE);
} while (last_counter == ftraced_iteration_counter);
+ out:
mutex_unlock(&ftraced_lock);
remove_wait_queue(&ftraced_waiters, &wait);
set_current_state(TASK_RUNNING);
@@ -1161,6 +1203,22 @@ int ftrace_force_update(void)
return ret;
}
+static void ftrace_force_shutdown(void)
+{
+ struct task_struct *task;
+ int command = FTRACE_DISABLE_CALLS | FTRACE_UPDATE_TRACE_FUNC;
+
+ mutex_lock(&ftraced_lock);
+ task = ftraced_task;
+ ftraced_task = NULL;
+ ftraced_suspend = -1;
+ ftrace_run_update_code(command);
+ mutex_unlock(&ftraced_lock);
+
+ if (task)
+ kthread_stop(task);
+}
+
static __init int ftrace_init_debugfs(void)
{
struct dentry *d_tracer;
@@ -1194,21 +1252,29 @@ static int __init notrace ftrace_dynamic_init(void)
stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS);
/* ftrace_dyn_arch_init places the return code in addr */
- if (addr)
- return addr;
+ if (addr) {
+ ret = (int)addr;
+ goto failed;
+ }
ret = ftrace_dyn_table_alloc();
if (ret)
- return ret;
+ goto failed;
p = kthread_run(ftraced, NULL, "ftraced");
- if (IS_ERR(p))
- return -1;
+ if (IS_ERR(p)) {
+ ret = -1;
+ goto failed;
+ }
last_ftrace_enabled = ftrace_enabled = 1;
ftraced_task = p;
return 0;
+
+ failed:
+ ftrace_disabled = 1;
+ return ret;
}
core_initcall(ftrace_dynamic_init);
@@ -1217,9 +1283,31 @@ core_initcall(ftrace_dynamic_init);
# define ftrace_shutdown() do { } while (0)
# define ftrace_startup_sysctl() do { } while (0)
# define ftrace_shutdown_sysctl() do { } while (0)
+# define ftrace_force_shutdown() do { } while (0)
#endif /* CONFIG_DYNAMIC_FTRACE */
/**
+ * ftrace_kill - totally shutdown ftrace
+ *
+ * This is a safety measure. If something was detected that seems
+ * wrong, calling this function will keep ftrace from doing
+ * any more modifications, and updates.
+ * used when something went wrong.
+ */
+void ftrace_kill(void)
+{
+ mutex_lock(&ftrace_sysctl_lock);
+ ftrace_disabled = 1;
+ ftrace_enabled = 0;
+
+ clear_ftrace_function();
+ mutex_unlock(&ftrace_sysctl_lock);
+
+ /* Try to totally disable ftrace */
+ ftrace_force_shutdown();
+}
+
+/**
* register_ftrace_function - register a function for profiling
* @ops - ops structure that holds the function for profiling.
*
@@ -1234,6 +1322,9 @@ int register_ftrace_function(struct ftrace_ops *ops)
{
int ret;
+ if (unlikely(ftrace_disabled))
+ return -1;
+
mutex_lock(&ftrace_sysctl_lock);
ret = __register_ftrace_function(ops);
ftrace_startup();
@@ -1267,6 +1358,9 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
{
int ret;
+ if (unlikely(ftrace_disabled))
+ return -ENODEV;
+
mutex_lock(&ftrace_sysctl_lock);
ret = proc_dointvec(table, write, file, buffer, lenp, ppos);