ftrace: fix updates to max trace

This patch fixes some bugs to the updating of the max trace that
was caused by implementing the new buffering.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 9175ce9..9596656 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -153,6 +153,7 @@
 		memcpy(max_tr.data[i], data, sizeof(*data));
 		data->trace = save_trace;
 		data->trace_pages = save_pages;
+		tracing_reset(data);
 	}
 
 	__update_max_tr(tr, tsk, cpu);
@@ -183,6 +184,7 @@
 	memcpy(max_tr.data[cpu], data, sizeof(*data));
 	data->trace = save_trace;
 	data->trace_pages = save_pages;
+	tracing_reset(data);
 
 	__update_max_tr(tr, tsk, cpu);
 	spin_unlock(&ftrace_max_lock);
@@ -877,6 +879,8 @@
 			   entry->ctx.next_prio,
 			   comm);
 		break;
+	default:
+		seq_printf(m, "Unknown type %d\n", entry->type);
 	}
 }
 
@@ -1625,7 +1629,6 @@
 	 * round up a bit.
 	 */
 	global_trace.entries = ENTRIES_PER_PAGE;
-	max_tr.entries = global_trace.entries;
 	pages++;
 
 	while (global_trace.entries < trace_nr_entries) {
@@ -1633,6 +1636,7 @@
 			break;
 		pages++;
 	}
+	max_tr.entries = global_trace.entries;
 
 	pr_info("tracer: %d pages allocated for %ld",
 		pages, trace_nr_entries);
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index bd3f881..74165f6 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -23,6 +23,8 @@
 
 static DEFINE_PER_CPU(int, tracing_cpu);
 
+static DEFINE_SPINLOCK(max_trace_lock);
+
 enum {
 	TRACER_IRQS_OFF		= (1 << 1),
 	TRACER_PREEMPT_OFF	= (1 << 2),
@@ -126,7 +128,7 @@
 		      int cpu)
 {
 	unsigned long latency, t0, t1;
-	cycle_t T0, T1, T2, delta;
+	cycle_t T0, T1, delta;
 	unsigned long flags;
 
 	/*
@@ -142,20 +144,18 @@
 	if (!report_latency(delta))
 		goto out;
 
-	ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
-	/*
-	 * Update the timestamp, because the trace entry above
-	 * might change it (it can only get larger so the latency
-	 * is fair to be reported):
-	 */
-	T2 = now(cpu);
+	spin_lock(&max_trace_lock);
 
-	delta = T2-T0;
+	/* check if we are still the max latency */
+	if (!report_latency(delta))
+		goto out_unlock;
+
+	ftrace(tr, data, CALLER_ADDR0, parent_ip, flags);
 
 	latency = nsecs_to_usecs(delta);
 
 	if (data->critical_sequence != max_sequence)
-		goto out;
+		goto out_unlock;
 
 	tracing_max_latency = delta;
 	t0 = nsecs_to_usecs(T0);
@@ -189,6 +189,9 @@
 
 	max_sequence++;
 
+out_unlock:
+	spin_unlock(&max_trace_lock);
+
 out:
 	data->critical_sequence = max_sequence;
 	data->preempt_timestamp = now(cpu);
@@ -366,14 +369,14 @@
 
 static void start_irqsoff_tracer(struct trace_array *tr)
 {
-	tracer_enabled = 1;
 	register_ftrace_function(&trace_ops);
+	tracer_enabled = 1;
 }
 
 static void stop_irqsoff_tracer(struct trace_array *tr)
 {
-	unregister_ftrace_function(&trace_ops);
 	tracer_enabled = 0;
+	unregister_ftrace_function(&trace_ops);
 }
 
 static void __irqsoff_tracer_init(struct trace_array *tr)