perf session: Move the hist_entries rb tree to perf_session

As we'll need to sort multiple times for multiple perf sessions,
so that we can then do a diff.

Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frédéric Weisbecker <fweisbec@gmail.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <1260803439-16783-1-git-send-email-acme@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index b40e37d..b9828fc 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1,7 +1,6 @@
 #include "hist.h"
-
-struct rb_root hist;
-int callchain;
+#include "session.h"
+#include "sort.h"
 
 struct callchain_param	callchain_param = {
 	.mode	= CHAIN_GRAPH_REL,
@@ -12,11 +11,12 @@
  * histogram, sorted on item, collects counts
  */
 
-struct hist_entry *__hist_entry__add(struct addr_location *al,
-				     struct symbol *sym_parent,
-				     u64 count, bool *hit)
+struct hist_entry *__perf_session__add_hist_entry(struct perf_session *self,
+						  struct addr_location *al,
+						  struct symbol *sym_parent,
+						  u64 count, bool *hit)
 {
-	struct rb_node **p = &hist.rb_node;
+	struct rb_node **p = &self->hists.rb_node;
 	struct rb_node *parent = NULL;
 	struct hist_entry *he;
 	struct hist_entry entry = {
@@ -52,7 +52,7 @@
 		return NULL;
 	*he = entry;
 	rb_link_node(&he->rb_node, parent, p);
-	rb_insert_color(&he->rb_node, &hist);
+	rb_insert_color(&he->rb_node, &self->hists);
 	*hit = false;
 	return he;
 }
@@ -129,7 +129,7 @@
 	rb_insert_color(&he->rb_node, root);
 }
 
-void collapse__resort(void)
+void perf_session__collapse_resort(struct perf_session *self)
 {
 	struct rb_root tmp;
 	struct rb_node *next;
@@ -139,31 +139,33 @@
 		return;
 
 	tmp = RB_ROOT;
-	next = rb_first(&hist);
+	next = rb_first(&self->hists);
 
 	while (next) {
 		n = rb_entry(next, struct hist_entry, rb_node);
 		next = rb_next(&n->rb_node);
 
-		rb_erase(&n->rb_node, &hist);
+		rb_erase(&n->rb_node, &self->hists);
 		collapse__insert_entry(&tmp, n);
 	}
 
-	hist = tmp;
+	self->hists = tmp;
 }
 
 /*
  * reverse the map, sort on count.
  */
 
-static void output__insert_entry(struct rb_root *root, struct hist_entry *he,
-				 u64 min_callchain_hits)
+static void perf_session__insert_output_hist_entry(struct perf_session *self,
+						   struct rb_root *root,
+						   struct hist_entry *he,
+						   u64 min_callchain_hits)
 {
 	struct rb_node **p = &root->rb_node;
 	struct rb_node *parent = NULL;
 	struct hist_entry *iter;
 
-	if (callchain)
+	if (self->use_callchain)
 		callchain_param.sort(&he->sorted_chain, &he->callchain,
 				      min_callchain_hits, &callchain_param);
 
@@ -181,7 +183,7 @@
 	rb_insert_color(&he->rb_node, root);
 }
 
-void output__resort(u64 total_samples)
+void perf_session__output_resort(struct perf_session *self, u64 total_samples)
 {
 	struct rb_root tmp;
 	struct rb_node *next;
@@ -192,15 +194,16 @@
 		total_samples * (callchain_param.min_percent / 100);
 
 	tmp = RB_ROOT;
-	next = rb_first(&hist);
+	next = rb_first(&self->hists);
 
 	while (next) {
 		n = rb_entry(next, struct hist_entry, rb_node);
 		next = rb_next(&n->rb_node);
 
-		rb_erase(&n->rb_node, &hist);
-		output__insert_entry(&tmp, n, min_callchain_hits);
+		rb_erase(&n->rb_node, &self->hists);
+		perf_session__insert_output_hist_entry(self, &tmp, n,
+						       min_callchain_hits);
 	}
 
-	hist = tmp;
+	self->hists = tmp;
 }