]> rtime.felk.cvut.cz Git - linux-imx.git/blobdiff - kernel/trace/trace.c
tracing: Change tracing_entries_fops to rely on tracing_get_cpu()
[linux-imx.git] / kernel / trace / trace.c
index e71a8be4a6ee9decd1429eb13159885f8c567469..68b46851666f0dae17964a958204cefe165068ed 100644 (file)
@@ -115,6 +115,9 @@ cpumask_var_t __read_mostly tracing_buffer_mask;
 
 enum ftrace_dump_mode ftrace_dump_on_oops;
 
+/* When set, tracing will stop when a WARN*() is hit */
+int __disable_trace_on_warning;
+
 static int tracing_set_tracer(const char *buf);
 
 #define MAX_TRACER_SIZE                100
@@ -149,6 +152,13 @@ static int __init set_ftrace_dump_on_oops(char *str)
 }
 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
 
+static int __init stop_trace_on_warning(char *str)
+{
+       __disable_trace_on_warning = 1;
+       return 1;
+}
+__setup("traceoff_on_warning=", stop_trace_on_warning);
+
 static int __init boot_alloc_snapshot(char *str)
 {
        allocate_snapshot = true;
@@ -170,6 +180,7 @@ static int __init set_trace_boot_options(char *str)
 }
 __setup("trace_options=", set_trace_boot_options);
 
+
 unsigned long long ns2usecs(cycle_t nsec)
 {
        nsec += 500;
@@ -193,6 +204,37 @@ static struct trace_array  global_trace;
 
 LIST_HEAD(ftrace_trace_arrays);
 
+int trace_array_get(struct trace_array *this_tr)
+{
+       struct trace_array *tr;
+       int ret = -ENODEV;
+
+       mutex_lock(&trace_types_lock);
+       list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+               if (tr == this_tr) {
+                       tr->ref++;
+                       ret = 0;
+                       break;
+               }
+       }
+       mutex_unlock(&trace_types_lock);
+
+       return ret;
+}
+
+static void __trace_array_put(struct trace_array *this_tr)
+{
+       WARN_ON(!this_tr->ref);
+       this_tr->ref--;
+}
+
+void trace_array_put(struct trace_array *this_tr)
+{
+       mutex_lock(&trace_types_lock);
+       __trace_array_put(this_tr);
+       mutex_unlock(&trace_types_lock);
+}
+
 int filter_current_check_discard(struct ring_buffer *buffer,
                                 struct ftrace_event_call *call, void *rec,
                                 struct ring_buffer_event *event)
@@ -215,9 +257,24 @@ cycle_t ftrace_now(int cpu)
        return ts;
 }
 
+/**
+ * tracing_is_enabled - Show if global_trace has been disabled
+ *
+ * Shows if the global trace has been enabled or not. It uses the
+ * mirror flag "buffer_disabled" to be used in fast paths such as for
+ * the irqsoff tracer. But it may be inaccurate due to races. If you
+ * need to know the accurate state, use tracing_is_on() which is a little
+ * slower, but accurate.
+ */
 int tracing_is_enabled(void)
 {
-       return tracing_is_on();
+       /*
+        * For quick access (irqsoff uses this in fast path), just
+        * return the mirror variable of the state of the ring buffer.
+        * It's a little racy, but we don't really care.
+        */
+       smp_rmb();
+       return !global_trace.buffer_disabled;
 }
 
 /*
@@ -240,7 +297,7 @@ static struct tracer                *trace_types __read_mostly;
 /*
  * trace_types_lock is used to protect the trace_types list.
  */
-static DEFINE_MUTEX(trace_types_lock);
+DEFINE_MUTEX(trace_types_lock);
 
 /*
  * serialize the access of the ring buffer
@@ -330,6 +387,23 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
        TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
        TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
 
+static void tracer_tracing_on(struct trace_array *tr)
+{
+       if (tr->trace_buffer.buffer)
+               ring_buffer_record_on(tr->trace_buffer.buffer);
+       /*
+        * This flag is looked at when buffers haven't been allocated
+        * yet, or by some tracers (like irqsoff), that just want to
+        * know if the ring buffer has been disabled, but it can handle
+        * races of where it gets disabled but we still do a record.
+        * As the check is in the fast path of the tracers, it is more
+        * important to be fast than accurate.
+        */
+       tr->buffer_disabled = 0;
+       /* Make the flag seen by readers */
+       smp_wmb();
+}
+
 /**
  * tracing_on - enable tracing buffers
  *
@@ -338,15 +412,7 @@ unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
  */
 void tracing_on(void)
 {
-       if (global_trace.trace_buffer.buffer)
-               ring_buffer_record_on(global_trace.trace_buffer.buffer);
-       /*
-        * This flag is only looked at when buffers haven't been
-        * allocated yet. We don't really care about the race
-        * between setting this flag and actually turning
-        * on the buffer.
-        */
-       global_trace.buffer_disabled = 0;
+       tracer_tracing_on(&global_trace);
 }
 EXPORT_SYMBOL_GPL(tracing_on);
 
@@ -540,6 +606,23 @@ void tracing_snapshot_alloc(void)
 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
 #endif /* CONFIG_TRACER_SNAPSHOT */
 
+static void tracer_tracing_off(struct trace_array *tr)
+{
+       if (tr->trace_buffer.buffer)
+               ring_buffer_record_off(tr->trace_buffer.buffer);
+       /*
+        * This flag is looked at when buffers haven't been allocated
+        * yet, or by some tracers (like irqsoff), that just want to
+        * know if the ring buffer has been disabled, but it can handle
+        * races of where it gets disabled but we still do a record.
+        * As the check is in the fast path of the tracers, it is more
+        * important to be fast than accurate.
+        */
+       tr->buffer_disabled = 1;
+       /* Make the flag seen by readers */
+       smp_wmb();
+}
+
 /**
  * tracing_off - turn off tracing buffers
  *
@@ -550,26 +633,35 @@ EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
  */
 void tracing_off(void)
 {
-       if (global_trace.trace_buffer.buffer)
-               ring_buffer_record_off(global_trace.trace_buffer.buffer);
-       /*
-        * This flag is only looked at when buffers haven't been
-        * allocated yet. We don't really care about the race
-        * between setting this flag and actually turning
-        * on the buffer.
-        */
-       global_trace.buffer_disabled = 1;
+       tracer_tracing_off(&global_trace);
 }
 EXPORT_SYMBOL_GPL(tracing_off);
 
+void disable_trace_on_warning(void)
+{
+       if (__disable_trace_on_warning)
+               tracing_off();
+}
+
+/**
+ * tracer_tracing_is_on - show real state of ring buffer enabled
+ * @tr : the trace array to know if ring buffer is enabled
+ *
+ * Shows real state of the ring buffer if it is enabled or not.
+ */
+static int tracer_tracing_is_on(struct trace_array *tr)
+{
+       if (tr->trace_buffer.buffer)
+               return ring_buffer_record_is_on(tr->trace_buffer.buffer);
+       return !tr->buffer_disabled;
+}
+
 /**
  * tracing_is_on - show state of ring buffers enabled
  */
 int tracing_is_on(void)
 {
-       if (global_trace.trace_buffer.buffer)
-               return ring_buffer_record_is_on(global_trace.trace_buffer.buffer);
-       return !global_trace.buffer_disabled;
+       return tracer_tracing_is_on(&global_trace);
 }
 EXPORT_SYMBOL_GPL(tracing_is_on);
 
@@ -1543,15 +1635,6 @@ trace_function(struct trace_array *tr,
                __buffer_unlock_commit(buffer, event);
 }
 
-void
-ftrace(struct trace_array *tr, struct trace_array_cpu *data,
-       unsigned long ip, unsigned long parent_ip, unsigned long flags,
-       int pc)
-{
-       if (likely(!atomic_read(&data->disabled)))
-               trace_function(tr, ip, parent_ip, flags, pc);
-}
-
 #ifdef CONFIG_STACKTRACE
 
 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
@@ -2760,6 +2843,17 @@ static int s_show(struct seq_file *m, void *v)
        return 0;
 }
 
+/*
+ * Should be used after trace_array_get(), trace_types_lock
+ * ensures that i_cdev was already initialized.
+ */
+static inline int tracing_get_cpu(struct inode *inode)
+{
+       if (inode->i_cdev) /* See trace_create_cpu_file() */
+               return (long)inode->i_cdev - 1;
+       return RING_BUFFER_ALL_CPUS;
+}
+
 static const struct seq_operations tracer_seq_ops = {
        .start          = s_start,
        .next           = s_next,
@@ -2768,10 +2862,9 @@ static const struct seq_operations tracer_seq_ops = {
 };
 
 static struct trace_iterator *
-__tracing_open(struct inode *inode, struct file *file, bool snapshot)
+__tracing_open(struct trace_array *tr, struct trace_cpu *tc,
+              struct inode *inode, struct file *file, bool snapshot)
 {
-       struct trace_cpu *tc = inode->i_private;
-       struct trace_array *tr = tc->tr;
        struct trace_iterator *iter;
        int cpu;
 
@@ -2850,8 +2943,6 @@ __tracing_open(struct inode *inode, struct file *file, bool snapshot)
                tracing_iter_reset(iter, cpu);
        }
 
-       tr->ref++;
-
        mutex_unlock(&trace_types_lock);
 
        return iter;
@@ -2874,6 +2965,25 @@ int tracing_open_generic(struct inode *inode, struct file *filp)
        return 0;
 }
 
+/*
+ * Open and update trace_array ref count.
+ * Must have the current trace_array passed to it.
+ */
+static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
+{
+       struct trace_array *tr = inode->i_private;
+
+       if (tracing_disabled)
+               return -ENODEV;
+
+       if (trace_array_get(tr) < 0)
+               return -ENODEV;
+
+       filp->private_data = inode->i_private;
+
+       return 0;
+}
+
 static int tracing_release(struct inode *inode, struct file *file)
 {
        struct seq_file *m = file->private_data;
@@ -2881,17 +2991,19 @@ static int tracing_release(struct inode *inode, struct file *file)
        struct trace_array *tr;
        int cpu;
 
-       if (!(file->f_mode & FMODE_READ))
+       /* Writes do not use seq_file, need to grab tr from inode */
+       if (!(file->f_mode & FMODE_READ)) {
+               struct trace_cpu *tc = inode->i_private;
+
+               trace_array_put(tc->tr);
                return 0;
+       }
 
        iter = m->private;
        tr = iter->tr;
 
        mutex_lock(&trace_types_lock);
 
-       WARN_ON(!tr->ref);
-       tr->ref--;
-
        for_each_tracing_cpu(cpu) {
                if (iter->buffer_iter[cpu])
                        ring_buffer_read_finish(iter->buffer_iter[cpu]);
@@ -2903,6 +3015,9 @@ static int tracing_release(struct inode *inode, struct file *file)
        if (!iter->snapshot)
                /* reenable tracing if it was previously enabled */
                tracing_start_tr(tr);
+
+       __trace_array_put(tr);
+
        mutex_unlock(&trace_types_lock);
 
        mutex_destroy(&iter->mutex);
@@ -2910,20 +3025,40 @@ static int tracing_release(struct inode *inode, struct file *file)
        kfree(iter->trace);
        kfree(iter->buffer_iter);
        seq_release_private(inode, file);
+
+       return 0;
+}
+
+static int tracing_release_generic_tr(struct inode *inode, struct file *file)
+{
+       struct trace_array *tr = inode->i_private;
+
+       trace_array_put(tr);
        return 0;
 }
 
+static int tracing_single_release_tr(struct inode *inode, struct file *file)
+{
+       struct trace_array *tr = inode->i_private;
+
+       trace_array_put(tr);
+
+       return single_release(inode, file);
+}
+
 static int tracing_open(struct inode *inode, struct file *file)
 {
+       struct trace_cpu *tc = inode->i_private;
+       struct trace_array *tr = tc->tr;
        struct trace_iterator *iter;
        int ret = 0;
 
+       if (trace_array_get(tr) < 0)
+               return -ENODEV;
+
        /* If this file was open for write, then erase contents */
        if ((file->f_mode & FMODE_WRITE) &&
            (file->f_flags & O_TRUNC)) {
-               struct trace_cpu *tc = inode->i_private;
-               struct trace_array *tr = tc->tr;
-
                if (tc->cpu == RING_BUFFER_ALL_CPUS)
                        tracing_reset_online_cpus(&tr->trace_buffer);
                else
@@ -2931,12 +3066,16 @@ static int tracing_open(struct inode *inode, struct file *file)
        }
 
        if (file->f_mode & FMODE_READ) {
-               iter = __tracing_open(inode, file, false);
+               iter = __tracing_open(tr, tc, inode, file, false);
                if (IS_ERR(iter))
                        ret = PTR_ERR(iter);
                else if (trace_flags & TRACE_ITER_LATENCY_FMT)
                        iter->iter_flags |= TRACE_FILE_LAT_FMT;
        }
+
+       if (ret < 0)
+               trace_array_put(tr);
+
        return ret;
 }
 
@@ -3293,17 +3432,27 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf,
 
 static int tracing_trace_options_open(struct inode *inode, struct file *file)
 {
+       struct trace_array *tr = inode->i_private;
+       int ret;
+
        if (tracing_disabled)
                return -ENODEV;
 
-       return single_open(file, tracing_trace_options_show, inode->i_private);
+       if (trace_array_get(tr) < 0)
+               return -ENODEV;
+
+       ret = single_open(file, tracing_trace_options_show, inode->i_private);
+       if (ret < 0)
+               trace_array_put(tr);
+
+       return ret;
 }
 
 static const struct file_operations tracing_iter_fops = {
        .open           = tracing_trace_options_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
-       .release        = single_release,
+       .release        = tracing_single_release_tr,
        .write          = tracing_trace_options_write,
 };
 
@@ -3379,14 +3528,14 @@ static const char readme_msg[] =
        "\n  snapshot\t\t- Like 'trace' but shows the content of the static snapshot buffer\n"
        "\t\t\t  Read the contents for more information\n"
 #endif
-#ifdef CONFIG_STACKTRACE
+#ifdef CONFIG_STACK_TRACER
        "  stack_trace\t\t- Shows the max stack trace when active\n"
        "  stack_max_size\t- Shows current max stack size that was traced\n"
        "\t\t\t  Write into this file to reset the max size (trigger a new trace)\n"
 #ifdef CONFIG_DYNAMIC_FTRACE
        "  stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace traces\n"
 #endif
-#endif /* CONFIG_STACKTRACE */
+#endif /* CONFIG_STACK_TRACER */
 ;
 
 static ssize_t
@@ -3783,20 +3932,23 @@ tracing_max_lat_write(struct file *filp, const char __user *ubuf,
 
 static int tracing_open_pipe(struct inode *inode, struct file *filp)
 {
-       struct trace_cpu *tc = inode->i_private;
-       struct trace_array *tr = tc->tr;
+       struct trace_array *tr = inode->i_private;
        struct trace_iterator *iter;
        int ret = 0;
 
        if (tracing_disabled)
                return -ENODEV;
 
+       if (trace_array_get(tr) < 0)
+               return -ENODEV;
+
        mutex_lock(&trace_types_lock);
 
        /* create a buffer to store the information to pass to userspace */
        iter = kzalloc(sizeof(*iter), GFP_KERNEL);
        if (!iter) {
                ret = -ENOMEM;
+               __trace_array_put(tr);
                goto out;
        }
 
@@ -3826,9 +3978,9 @@ static int tracing_open_pipe(struct inode *inode, struct file *filp)
        if (trace_clocks[tr->clock_id].in_ns)
                iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
 
-       iter->cpu_file = tc->cpu;
-       iter->tr = tc->tr;
-       iter->trace_buffer = &tc->tr->trace_buffer;
+       iter->tr = tr;
+       iter->trace_buffer = &tr->trace_buffer;
+       iter->cpu_file = tracing_get_cpu(inode);
        mutex_init(&iter->mutex);
        filp->private_data = iter;
 
@@ -3843,6 +3995,7 @@ out:
 fail:
        kfree(iter->trace);
        kfree(iter);
+       __trace_array_put(tr);
        mutex_unlock(&trace_types_lock);
        return ret;
 }
@@ -3850,6 +4003,7 @@ fail:
 static int tracing_release_pipe(struct inode *inode, struct file *file)
 {
        struct trace_iterator *iter = file->private_data;
+       struct trace_array *tr = inode->i_private;
 
        mutex_lock(&trace_types_lock);
 
@@ -3863,6 +4017,8 @@ static int tracing_release_pipe(struct inode *inode, struct file *file)
        kfree(iter->trace);
        kfree(iter);
 
+       trace_array_put(tr);
+
        return 0;
 }
 
@@ -3939,7 +4095,7 @@ static int tracing_wait_pipe(struct file *filp)
                 *
                 * iter->pos will be 0 if we haven't read anything.
                 */
-               if (!tracing_is_enabled() && iter->pos)
+               if (!tracing_is_on() && iter->pos)
                        break;
        }
 
@@ -4200,15 +4356,16 @@ static ssize_t
 tracing_entries_read(struct file *filp, char __user *ubuf,
                     size_t cnt, loff_t *ppos)
 {
-       struct trace_cpu *tc = filp->private_data;
-       struct trace_array *tr = tc->tr;
+       struct inode *inode = file_inode(filp);
+       struct trace_array *tr = inode->i_private;
+       int cpu = tracing_get_cpu(inode);
        char buf[64];
        int r = 0;
        ssize_t ret;
 
        mutex_lock(&trace_types_lock);
 
-       if (tc->cpu == RING_BUFFER_ALL_CPUS) {
+       if (cpu == RING_BUFFER_ALL_CPUS) {
                int cpu, buf_size_same;
                unsigned long size;
 
@@ -4235,7 +4392,7 @@ tracing_entries_read(struct file *filp, char __user *ubuf,
                } else
                        r = sprintf(buf, "X\n");
        } else
-               r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, tc->cpu)->entries >> 10);
+               r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
 
        mutex_unlock(&trace_types_lock);
 
@@ -4247,7 +4404,8 @@ static ssize_t
 tracing_entries_write(struct file *filp, const char __user *ubuf,
                      size_t cnt, loff_t *ppos)
 {
-       struct trace_cpu *tc = filp->private_data;
+       struct inode *inode = file_inode(filp);
+       struct trace_array *tr = inode->i_private;
        unsigned long val;
        int ret;
 
@@ -4261,8 +4419,7 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
 
        /* value is in KB */
        val <<= 10;
-
-       ret = tracing_resize_ring_buffer(tc->tr, val, tc->cpu);
+       ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
        if (ret < 0)
                return ret;
 
@@ -4320,6 +4477,8 @@ tracing_free_buffer_release(struct inode *inode, struct file *filp)
        /* resize the ring buffer to 0 */
        tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
 
+       trace_array_put(tr);
+
        return 0;
 }
 
@@ -4328,6 +4487,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
                                        size_t cnt, loff_t *fpos)
 {
        unsigned long addr = (unsigned long)ubuf;
+       struct trace_array *tr = filp->private_data;
        struct ring_buffer_event *event;
        struct ring_buffer *buffer;
        struct print_entry *entry;
@@ -4387,7 +4547,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
 
        local_save_flags(irq_flags);
        size = sizeof(*entry) + cnt + 2; /* possible \n added */
-       buffer = global_trace.trace_buffer.buffer;
+       buffer = tr->trace_buffer.buffer;
        event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
                                          irq_flags, preempt_count());
        if (!event) {
@@ -4495,10 +4655,20 @@ static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
 
 static int tracing_clock_open(struct inode *inode, struct file *file)
 {
+       struct trace_array *tr = inode->i_private;
+       int ret;
+
        if (tracing_disabled)
                return -ENODEV;
 
-       return single_open(file, tracing_clock_show, inode->i_private);
+       if (trace_array_get(tr))
+               return -ENODEV;
+
+       ret = single_open(file, tracing_clock_show, inode->i_private);
+       if (ret < 0)
+               trace_array_put(tr);
+
+       return ret;
 }
 
 struct ftrace_buffer_info {
@@ -4511,30 +4681,40 @@ struct ftrace_buffer_info {
 static int tracing_snapshot_open(struct inode *inode, struct file *file)
 {
        struct trace_cpu *tc = inode->i_private;
+       struct trace_array *tr = tc->tr;
        struct trace_iterator *iter;
        struct seq_file *m;
        int ret = 0;
 
+       if (trace_array_get(tr) < 0)
+               return -ENODEV;
+
        if (file->f_mode & FMODE_READ) {
-               iter = __tracing_open(inode, file, true);
+               iter = __tracing_open(tr, tc, inode, file, true);
                if (IS_ERR(iter))
                        ret = PTR_ERR(iter);
        } else {
                /* Writes still need the seq_file to hold the private data */
+               ret = -ENOMEM;
                m = kzalloc(sizeof(*m), GFP_KERNEL);
                if (!m)
-                       return -ENOMEM;
+                       goto out;
                iter = kzalloc(sizeof(*iter), GFP_KERNEL);
                if (!iter) {
                        kfree(m);
-                       return -ENOMEM;
+                       goto out;
                }
-               iter->tr = tc->tr;
+               ret = 0;
+
+               iter->tr = tr;
                iter->trace_buffer = &tc->tr->max_buffer;
                iter->cpu_file = tc->cpu;
                m->private = iter;
                file->private_data = m;
        }
+out:
+       if (ret < 0)
+               trace_array_put(tr);
 
        return ret;
 }
@@ -4616,9 +4796,12 @@ out:
 static int tracing_snapshot_release(struct inode *inode, struct file *file)
 {
        struct seq_file *m = file->private_data;
+       int ret;
+
+       ret = tracing_release(inode, file);
 
        if (file->f_mode & FMODE_READ)
-               return tracing_release(inode, file);
+               return ret;
 
        /* If write only, the seq_file is just a stub */
        if (m)
@@ -4684,34 +4867,38 @@ static const struct file_operations tracing_pipe_fops = {
 };
 
 static const struct file_operations tracing_entries_fops = {
-       .open           = tracing_open_generic,
+       .open           = tracing_open_generic_tr,
        .read           = tracing_entries_read,
        .write          = tracing_entries_write,
        .llseek         = generic_file_llseek,
+       .release        = tracing_release_generic_tr,
 };
 
 static const struct file_operations tracing_total_entries_fops = {
-       .open           = tracing_open_generic,
+       .open           = tracing_open_generic_tr,
        .read           = tracing_total_entries_read,
        .llseek         = generic_file_llseek,
+       .release        = tracing_release_generic_tr,
 };
 
 static const struct file_operations tracing_free_buffer_fops = {
+       .open           = tracing_open_generic_tr,
        .write          = tracing_free_buffer_write,
        .release        = tracing_free_buffer_release,
 };
 
 static const struct file_operations tracing_mark_fops = {
-       .open           = tracing_open_generic,
+       .open           = tracing_open_generic_tr,
        .write          = tracing_mark_write,
        .llseek         = generic_file_llseek,
+       .release        = tracing_release_generic_tr,
 };
 
 static const struct file_operations trace_clock_fops = {
        .open           = tracing_clock_open,
        .read           = seq_read,
        .llseek         = seq_lseek,
-       .release        = single_release,
+       .release        = tracing_single_release_tr,
        .write          = tracing_clock_write,
 };
 
@@ -4736,23 +4923,26 @@ static const struct file_operations snapshot_raw_fops = {
 
 static int tracing_buffers_open(struct inode *inode, struct file *filp)
 {
-       struct trace_cpu *tc = inode->i_private;
-       struct trace_array *tr = tc->tr;
+       struct trace_array *tr = inode->i_private;
        struct ftrace_buffer_info *info;
+       int ret;
 
        if (tracing_disabled)
                return -ENODEV;
 
+       if (trace_array_get(tr) < 0)
+               return -ENODEV;
+
        info = kzalloc(sizeof(*info), GFP_KERNEL);
-       if (!info)
+       if (!info) {
+               trace_array_put(tr);
                return -ENOMEM;
+       }
 
        mutex_lock(&trace_types_lock);
 
-       tr->ref++;
-
        info->iter.tr           = tr;
-       info->iter.cpu_file     = tc->cpu;
+       info->iter.cpu_file     = tracing_get_cpu(inode);
        info->iter.trace        = tr->current_trace;
        info->iter.trace_buffer = &tr->trace_buffer;
        info->spare             = NULL;
@@ -4763,7 +4953,11 @@ static int tracing_buffers_open(struct inode *inode, struct file *filp)
 
        mutex_unlock(&trace_types_lock);
 
-       return nonseekable_open(inode, filp);
+       ret = nonseekable_open(inode, filp);
+       if (ret < 0)
+               trace_array_put(tr);
+
+       return ret;
 }
 
 static unsigned int
@@ -4863,8 +5057,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
 
        mutex_lock(&trace_types_lock);
 
-       WARN_ON(!iter->tr->ref);
-       iter->tr->ref--;
+       __trace_array_put(iter->tr);
 
        if (info->spare)
                ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
@@ -5066,14 +5259,14 @@ static ssize_t
 tracing_stats_read(struct file *filp, char __user *ubuf,
                   size_t count, loff_t *ppos)
 {
-       struct trace_cpu *tc = filp->private_data;
-       struct trace_array *tr = tc->tr;
+       struct inode *inode = file_inode(filp);
+       struct trace_array *tr = inode->i_private;
        struct trace_buffer *trace_buf = &tr->trace_buffer;
+       int cpu = tracing_get_cpu(inode);
        struct trace_seq *s;
        unsigned long cnt;
        unsigned long long t;
        unsigned long usec_rem;
-       int cpu = tc->cpu;
 
        s = kmalloc(sizeof(*s), GFP_KERNEL);
        if (!s)
@@ -5126,9 +5319,10 @@ tracing_stats_read(struct file *filp, char __user *ubuf,
 }
 
 static const struct file_operations tracing_stats_fops = {
-       .open           = tracing_open_generic,
+       .open           = tracing_open_generic_tr,
        .read           = tracing_stats_read,
        .llseek         = generic_file_llseek,
+       .release        = tracing_release_generic_tr,
 };
 
 #ifdef CONFIG_DYNAMIC_FTRACE
@@ -5317,6 +5511,17 @@ static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
        return tr->percpu_dir;
 }
 
+static struct dentry *
+trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
+                     void *data, long cpu, const struct file_operations *fops)
+{
+       struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
+
+       if (ret) /* See tracing_get_cpu() */
+               ret->d_inode->i_cdev = (void *)(cpu + 1);
+       return ret;
+}
+
 static void
 tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
 {
@@ -5336,28 +5541,28 @@ tracing_init_debugfs_percpu(struct trace_array *tr, long cpu)
        }
 
        /* per cpu trace_pipe */
-       trace_create_file("trace_pipe", 0444, d_cpu,
-                       (void *)&data->trace_cpu, &tracing_pipe_fops);
+       trace_create_cpu_file("trace_pipe", 0444, d_cpu,
+                               tr, cpu, &tracing_pipe_fops);
 
        /* per cpu trace */
-       trace_create_file("trace", 0644, d_cpu,
-                       (void *)&data->trace_cpu, &tracing_fops);
+       trace_create_cpu_file("trace", 0644, d_cpu,
+                               &data->trace_cpu, cpu, &tracing_fops);
 
-       trace_create_file("trace_pipe_raw", 0444, d_cpu,
-                       (void *)&data->trace_cpu, &tracing_buffers_fops);
+       trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
+                               tr, cpu, &tracing_buffers_fops);
 
-       trace_create_file("stats", 0444, d_cpu,
-                       (void *)&data->trace_cpu, &tracing_stats_fops);
+       trace_create_cpu_file("stats", 0444, d_cpu,
+                               tr, cpu, &tracing_stats_fops);
 
-       trace_create_file("buffer_size_kb", 0444, d_cpu,
-                       (void *)&data->trace_cpu, &tracing_entries_fops);
+       trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
+                               tr, cpu, &tracing_entries_fops);
 
 #ifdef CONFIG_TRACER_SNAPSHOT
-       trace_create_file("snapshot", 0644, d_cpu,
-                         (void *)&data->trace_cpu, &snapshot_fops);
+       trace_create_cpu_file("snapshot", 0644, d_cpu,
+                               &data->trace_cpu, cpu, &snapshot_fops);
 
-       trace_create_file("snapshot_raw", 0444, d_cpu,
-                       (void *)&data->trace_cpu, &snapshot_raw_fops);
+       trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
+                               tr, cpu, &snapshot_raw_fops);
 #endif
 }
 
@@ -5612,15 +5817,10 @@ rb_simple_read(struct file *filp, char __user *ubuf,
               size_t cnt, loff_t *ppos)
 {
        struct trace_array *tr = filp->private_data;
-       struct ring_buffer *buffer = tr->trace_buffer.buffer;
        char buf[64];
        int r;
 
-       if (buffer)
-               r = ring_buffer_record_is_on(buffer);
-       else
-               r = 0;
-
+       r = tracer_tracing_is_on(tr);
        r = sprintf(buf, "%d\n", r);
 
        return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
@@ -5642,11 +5842,11 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
        if (buffer) {
                mutex_lock(&trace_types_lock);
                if (val) {
-                       ring_buffer_record_on(buffer);
+                       tracer_tracing_on(tr);
                        if (tr->current_trace->start)
                                tr->current_trace->start(tr);
                } else {
-                       ring_buffer_record_off(buffer);
+                       tracer_tracing_off(tr);
                        if (tr->current_trace->stop)
                                tr->current_trace->stop(tr);
                }
@@ -5659,9 +5859,10 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
 }
 
 static const struct file_operations rb_simple_fops = {
-       .open           = tracing_open_generic,
+       .open           = tracing_open_generic_tr,
        .read           = rb_simple_read,
        .write          = rb_simple_write,
+       .release        = tracing_release_generic_tr,
        .llseek         = default_llseek,
 };
 
@@ -5775,8 +5976,10 @@ static int new_instance_create(const char *name)
                goto out_free_tr;
 
        ret = event_trace_add_tracer(tr->dir, tr);
-       if (ret)
+       if (ret) {
+               debugfs_remove_recursive(tr->dir);
                goto out_free_tr;
+       }
 
        init_tracer_debugfs(tr, tr->dir);
 
@@ -5925,15 +6128,15 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer)
                        (void *)&tr->trace_cpu, &tracing_fops);
 
        trace_create_file("trace_pipe", 0444, d_tracer,
-                       (void *)&tr->trace_cpu, &tracing_pipe_fops);
+                         tr, &tracing_pipe_fops);
 
        trace_create_file("buffer_size_kb", 0644, d_tracer,
-                       (void *)&tr->trace_cpu, &tracing_entries_fops);
+                         tr, &tracing_entries_fops);
 
        trace_create_file("buffer_total_size_kb", 0444, d_tracer,
                          tr, &tracing_total_entries_fops);
 
-       trace_create_file("free_buffer", 0644, d_tracer,
+       trace_create_file("free_buffer", 0200, d_tracer,
                          tr, &tracing_free_buffer_fops);
 
        trace_create_file("trace_marker", 0220, d_tracer,