Cleanup code around recording method trace events

There are three cleanups in this CL:
1. Merge streaming_lock_ and unique_methods_lock_. unique_methods_lock_
   is used to prevent concurrent access to method->id map. In streaming
   mode we only access that with streaming_lock_. So an additional lock
   isn't required to protect these. In non-streaming case we never need
   a streaming_lock_. So we can merge these and rename streaming_lock_
   to tracing_lock_.
2. Remove seen_methods_. We already have a method->id map to check if a
   method was already seen or not. Additional bookkeeping isn't required
   there.
3. Pass in TraceAction directly when logging method trace event.

Bug: 259258187
Test: art/test.py
Change-Id: I240d2698ae0f72ad99b0b362022655e13059f16e
diff --git a/runtime/trace.cc b/runtime/trace.cc
index 4e39eb3..8516aea 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -83,12 +83,10 @@
 }
 
 ArtMethod* Trace::DecodeTraceMethod(uint32_t tmid) {
-  MutexLock mu(Thread::Current(), *unique_methods_lock_);
   return unique_methods_[tmid >> TraceActionBits];
 }
 
 uint32_t Trace::EncodeTraceMethod(ArtMethod* method) {
-  MutexLock mu(Thread::Current(), *unique_methods_lock_);
   uint32_t idx;
   auto it = art_method_id_map_.find(method);
   if (it != art_method_id_map_.end()) {
@@ -103,12 +101,6 @@
   return idx;
 }
 
-uint32_t Trace::EncodeTraceMethodAndAction(ArtMethod* method, TraceAction action) {
-  uint32_t tmid = (EncodeTraceMethod(method) << TraceActionBits) | action;
-  DCHECK_EQ(method, DecodeTraceMethod(tmid));
-  return tmid;
-}
-
 std::vector<ArtMethod*>* Trace::AllocStackTrace() {
   return (temp_stack_trace_.get() != nullptr)  ? temp_stack_trace_.release() :
       new std::vector<ArtMethod*>();
@@ -243,8 +235,7 @@
     // If there's no previous stack trace sample for this thread, log an entry event for all
     // methods in the trace.
     for (auto rit = stack_trace->rbegin(); rit != stack_trace->rend(); ++rit) {
-      LogMethodTraceEvent(thread, *rit, instrumentation::Instrumentation::kMethodEntered,
-                          thread_clock_diff, wall_clock_diff);
+      LogMethodTraceEvent(thread, *rit, kTraceMethodEnter, thread_clock_diff, wall_clock_diff);
     }
   } else {
     // If there's a previous stack trace for this thread, diff the traces and emit entry and exit
@@ -258,13 +249,11 @@
     }
     // Iterate top-down over the old trace until the point where they differ, emitting exit events.
     for (auto old_it = old_stack_trace->begin(); old_it != old_rit.base(); ++old_it) {
-      LogMethodTraceEvent(thread, *old_it, instrumentation::Instrumentation::kMethodExited,
-                          thread_clock_diff, wall_clock_diff);
+      LogMethodTraceEvent(thread, *old_it, kTraceMethodExit, thread_clock_diff, wall_clock_diff);
     }
     // Iterate bottom-up over the new trace from the point where they differ, emitting entry events.
     for (; rit != stack_trace->rend(); ++rit) {
-      LogMethodTraceEvent(thread, *rit, instrumentation::Instrumentation::kMethodEntered,
-                          thread_clock_diff, wall_clock_diff);
+      LogMethodTraceEvent(thread, *rit, kTraceMethodEnter, thread_clock_diff, wall_clock_diff);
     }
     FreeStackTrace(old_stack_trace);
   }
@@ -600,8 +589,7 @@
       clock_overhead_ns_(GetClockOverheadNanoSeconds()),
       overflow_(false),
       interval_us_(0),
-      streaming_lock_(nullptr),
-      unique_methods_lock_(new Mutex("unique methods lock", kTracingUniqueMethodsLock)) {
+      tracing_lock_("tracing lock", LockLevel::kTracingStreamingLock) {
   CHECK_IMPLIES(trace_file == nullptr, output_mode == TraceOutputMode::kDDMS);
 
   uint16_t trace_version = GetTraceVersion(clock_source_);
@@ -623,7 +611,6 @@
   cur_offset_.store(kTraceHeaderLength, std::memory_order_relaxed);
 
   if (output_mode == TraceOutputMode::kStreaming) {
-    streaming_lock_ = new Mutex("tracing lock", LockLevel::kTracingStreamingLock);
     // Flush the header information to the file. We use a per thread buffer, so
     // it is easier to just write the header information directly to file.
     if (!trace_file_->WriteFully(buf_.get(), kTraceHeaderLength)) {
@@ -633,11 +620,6 @@
   }
 }
 
-Trace::~Trace() {
-  delete streaming_lock_;
-  delete unique_methods_lock_;
-}
-
 static uint64_t ReadBytes(uint8_t* buf, size_t bytes) {
   uint64_t ret = 0;
   for (size_t i = 0; i < bytes; ++i) {
@@ -650,6 +632,7 @@
   uint8_t* ptr = buf + kTraceHeaderLength;
   uint8_t* end = buf + buf_size;
 
+  MutexLock mu(Thread::Current(), tracing_lock_);
   while (ptr < end) {
     uint32_t tmid = ReadBytes(ptr + 2, sizeof(tmid));
     ArtMethod* method = DecodeTraceMethod(tmid);
@@ -662,11 +645,7 @@
 void Trace::FinishTracing() {
   size_t final_offset = 0;
   std::set<ArtMethod*> visited_methods;
-  if (trace_output_mode_ == TraceOutputMode::kStreaming) {
-    // Clean up.
-    MutexLock mu(Thread::Current(), *streaming_lock_);
-    STLDeleteValues(&seen_methods_);
-  } else {
+  if (trace_output_mode_ != TraceOutputMode::kStreaming) {
     final_offset = cur_offset_.load(std::memory_order_relaxed);
     GetVisitedMethods(final_offset, &visited_methods);
   }
@@ -793,8 +772,7 @@
   uint32_t thread_clock_diff = 0;
   uint32_t wall_clock_diff = 0;
   ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
-  LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodEntered,
-                      thread_clock_diff, wall_clock_diff);
+  LogMethodTraceEvent(thread, method, kTraceMethodEnter, thread_clock_diff, wall_clock_diff);
 }
 
 void Trace::MethodExited(Thread* thread,
@@ -806,7 +784,7 @@
   ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
   LogMethodTraceEvent(thread,
                       method,
-                      instrumentation::Instrumentation::kMethodExited,
+                      kTraceMethodExit,
                       thread_clock_diff,
                       wall_clock_diff);
 }
@@ -817,8 +795,7 @@
   uint32_t thread_clock_diff = 0;
   uint32_t wall_clock_diff = 0;
   ReadClocks(thread, &thread_clock_diff, &wall_clock_diff);
-  LogMethodTraceEvent(thread, method, instrumentation::Instrumentation::kMethodUnwind,
-                      thread_clock_diff, wall_clock_diff);
+  LogMethodTraceEvent(thread, method, kTraceUnroll, thread_clock_diff, wall_clock_diff);
 }
 
 void Trace::ExceptionThrown(Thread* thread ATTRIBUTE_UNUSED,
@@ -861,16 +838,7 @@
 }
 
 bool Trace::RegisterMethod(ArtMethod* method) {
-  const DexFile* dex_file = method->GetDexFile();
-  if (seen_methods_.find(dex_file) == seen_methods_.end()) {
-    seen_methods_.insert(std::make_pair(dex_file, new DexIndexBitSet()));
-  }
-  DexIndexBitSet* bit_set = seen_methods_.find(dex_file)->second;
-  if (!(*bit_set)[method->GetDexMethodIndex()]) {
-    bit_set->set(method->GetDexMethodIndex());
-    return true;
-  }
-  return false;
+  return art_method_id_map_.find(method) == art_method_id_map_.end();
 }
 
 std::string Trace::GetMethodLine(ArtMethod* method) {
@@ -911,7 +879,7 @@
     Append2LE(header + 5, static_cast<uint16_t>(thread_name.length()));
 
     {
-      MutexLock mu(Thread::Current(), *streaming_lock_);
+      MutexLock mu(Thread::Current(), tracing_lock_);
       if (!trace_file_->WriteFully(header, kThreadNameHeaderSize) ||
           !trace_file_->WriteFully(reinterpret_cast<const uint8_t*>(thread_name.c_str()),
                                    thread_name.length())) {
@@ -970,10 +938,10 @@
 }
 
 void Trace::FlushStreamingBuffer(Thread* thread) {
-  // Take a streaming_lock_ to serialize writes across threads. We also need to allocate a unique
+  // Take a tracing_lock_ to serialize writes across threads. We also need to allocate a unique
   // method id for each method. We do that by maintaining a map from id to method for each newly
-  // seen method (see RegisterMethod). streaming_lock_ also is required to serialize these.
-  MutexLock mu(Thread::Current(), *streaming_lock_);
+  // seen method (see RegisterMethod). tracing_lock_ also is required to serialize these.
+  MutexLock mu(Thread::Current(), tracing_lock_);
   uintptr_t* method_trace_buffer = thread->GetMethodTraceBuffer();
   // Create a temporary buffer to encode the trace events from the specified thread.
   size_t buffer_size = kPerThreadBufSize;
@@ -1059,33 +1027,19 @@
   // the buffer memory.
   uint8_t* ptr;
   ptr = buf_.get() + old_offset;
+  MutexLock mu(Thread::Current(), tracing_lock_);
   EncodeEventEntry(ptr, thread, method, action, thread_clock_diff, wall_clock_diff);
 }
 
 void Trace::LogMethodTraceEvent(Thread* thread,
                                 ArtMethod* method,
-                                instrumentation::Instrumentation::InstrumentationEvent event,
+                                TraceAction action,
                                 uint32_t thread_clock_diff,
                                 uint32_t wall_clock_diff) {
   // This method is called in both tracing modes (method and sampling). In sampling mode, this
   // method is only called by the sampling thread. In method tracing mode, it can be called
   // concurrently.
 
-  TraceAction action = kTraceMethodEnter;
-  switch (event) {
-    case instrumentation::Instrumentation::kMethodEntered:
-      action = kTraceMethodEnter;
-      break;
-    case instrumentation::Instrumentation::kMethodExited:
-      action = kTraceMethodExit;
-      break;
-    case instrumentation::Instrumentation::kMethodUnwind:
-      action = kTraceUnroll;
-      break;
-    default:
-      UNIMPLEMENTED(FATAL) << "Unexpected event: " << event;
-  }
-
   // Ensure we always use the non-obsolete version of the method so that entry/exit events have the
   // same pointer value.
   method = method->GetNonObsoleteMethod();
@@ -1104,7 +1058,8 @@
                              uint32_t thread_clock_diff,
                              uint32_t wall_clock_diff) {
   static constexpr size_t kPacketSize = 14U;  // The maximum size of data in a packet.
-  uint32_t method_value = EncodeTraceMethodAndAction(method, action);
+  uint32_t method_value = (EncodeTraceMethod(method) << TraceActionBits) | action;
+  DCHECK_EQ(method, DecodeTraceMethod(method_value));
   Append2LE(ptr, thread->GetTid());
   Append4LE(ptr + 2, method_value);
   ptr += 6;
@@ -1138,6 +1093,7 @@
   uint8_t* ptr = buf_.get() + kTraceHeaderLength;
   uint8_t* end = buf_.get() + buf_size;
 
+  MutexLock mu(Thread::Current(), tracing_lock_);
   while (ptr < end) {
     uint32_t tmid = ReadBytes(ptr + 2, sizeof(tmid));
     ArtMethod* method = DecodeTraceMethod(tmid);
@@ -1147,6 +1103,7 @@
 }
 
 void Trace::DumpMethodList(std::ostream& os, const std::set<ArtMethod*>& visited_methods) {
+  MutexLock mu(Thread::Current(), tracing_lock_);
   for (const auto& method : visited_methods) {
     os << GetMethodLine(method);
   }
diff --git a/runtime/trace.h b/runtime/trace.h
index 344c783..8858040 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -29,6 +29,7 @@
 #include "base/atomic.h"
 #include "base/locks.h"
 #include "base/macros.h"
+#include "base/mutex.h"
 #include "base/os.h"
 #include "base/safe_map.h"
 #include "instrumentation.h"
@@ -43,7 +44,6 @@
 class ArtField;
 class ArtMethod;
 class DexFile;
-class LOCKABLE Mutex;
 class ShadowFrame;
 class Thread;
 
@@ -120,8 +120,6 @@
     kSampling
   };
 
-  ~Trace();
-
   static void SetDefaultClockSource(TraceClockSource clock_source);
 
   static void Start(const char* trace_filename,
@@ -175,51 +173,51 @@
   uint32_t GetClockOverheadNanoSeconds();
 
   void CompareAndUpdateStackTrace(Thread* thread, std::vector<ArtMethod*>* stack_trace)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!tracing_lock_);
 
   // InstrumentationListener implementation.
   void MethodEntered(Thread* thread, ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!unique_methods_lock_, !streaming_lock_) override;
+      REQUIRES(!tracing_lock_) override;
   void MethodExited(Thread* thread,
                     ArtMethod* method,
                     instrumentation::OptionalFrame frame,
                     JValue& return_value)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!tracing_lock_)
       override;
   void MethodUnwind(Thread* thread,
                     ArtMethod* method,
                     uint32_t dex_pc)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!tracing_lock_)
       override;
   void DexPcMoved(Thread* thread,
                   Handle<mirror::Object> this_object,
                   ArtMethod* method,
                   uint32_t new_dex_pc)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_)
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!tracing_lock_)
       override;
   void FieldRead(Thread* thread,
                  Handle<mirror::Object> this_object,
                  ArtMethod* method,
                  uint32_t dex_pc,
                  ArtField* field)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_) override;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!tracing_lock_) override;
   void FieldWritten(Thread* thread,
                     Handle<mirror::Object> this_object,
                     ArtMethod* method,
                     uint32_t dex_pc,
                     ArtField* field,
                     const JValue& field_value)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_) override;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!tracing_lock_) override;
   void ExceptionThrown(Thread* thread,
                        Handle<mirror::Throwable> exception_object)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_) override;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!tracing_lock_) override;
   void ExceptionHandled(Thread* thread, Handle<mirror::Throwable> exception_object)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_) override;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!tracing_lock_) override;
   void Branch(Thread* thread,
               ArtMethod* method,
               uint32_t dex_pc,
               int32_t dex_pc_offset)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_) override;
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!tracing_lock_) override;
   void WatchedFramePop(Thread* thread, const ShadowFrame& frame)
       REQUIRES_SHARED(Locks::mutator_lock_) override;
   // Reuse an old stack trace if it exists, otherwise allocate a new one.
@@ -256,34 +254,33 @@
       // how to annotate this.
       NO_THREAD_SAFETY_ANALYSIS;
   void FinishTracing()
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!tracing_lock_);
 
   void ReadClocks(Thread* thread, uint32_t* thread_clock_diff, uint32_t* wall_clock_diff);
 
-  void LogMethodTraceEvent(Thread* thread, ArtMethod* method,
-                           instrumentation::Instrumentation::InstrumentationEvent event,
+  void LogMethodTraceEvent(Thread* thread, ArtMethod* method, TraceAction action,
                            uint32_t thread_clock_diff, uint32_t wall_clock_diff)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_, !streaming_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!tracing_lock_);
 
   // Methods to output traced methods and threads.
   void GetVisitedMethods(size_t end_offset, std::set<ArtMethod*>* visited_methods)
-      REQUIRES(!unique_methods_lock_);
+      REQUIRES(!tracing_lock_);
   void DumpMethodList(std::ostream& os, const std::set<ArtMethod*>& visited_methods)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!tracing_lock_);
   void DumpThreadList(std::ostream& os) REQUIRES(!Locks::thread_list_lock_);
 
   // Methods to register seen entitites in streaming mode. The methods return true if the entity
   // is newly discovered.
   bool RegisterMethod(ArtMethod* method)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(streaming_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(tracing_lock_);
   bool RegisterThread(Thread* thread)
-      REQUIRES(streaming_lock_);
+      REQUIRES(tracing_lock_);
 
   void RecordMethodEvent(Thread* thread,
                          ArtMethod* method,
                          TraceAction action,
                          uint32_t thread_clock_diff,
-                         uint32_t wall_clock_diff) REQUIRES(!unique_methods_lock_);
+                         uint32_t wall_clock_diff) REQUIRES(!tracing_lock_);
 
   // Encodes event in non-streaming mode. This assumes that there is enough space reserved to
   // encode the entry.
@@ -292,7 +289,7 @@
                         ArtMethod* method,
                         TraceAction action,
                         uint32_t thread_clock_diff,
-                        uint32_t wall_clock_diff) REQUIRES(!unique_methods_lock_);
+                        uint32_t wall_clock_diff) REQUIRES(tracing_lock_);
 
   // These methods are used to encode events in streaming mode.
 
@@ -304,14 +301,14 @@
                                   TraceAction action,
                                   uint32_t thread_clock_diff,
                                   uint32_t wall_clock_diff) REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!unique_methods_lock_) REQUIRES(!streaming_lock_);
+      REQUIRES(!tracing_lock_);
   // This encodes all the events in the per-thread trace buffer and writes it to the trace file.
   // This acquires streaming lock to prevent any other threads writing concurrently. It is required
   // to serialize these since each method is encoded with a unique id which is assigned when the
   // method is seen for the first time in the recoreded events. So we need to serialize these
   // flushes across threads.
   void FlushStreamingBuffer(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
-      REQUIRES(!unique_methods_lock_) REQUIRES(!streaming_lock_);
+      REQUIRES(!tracing_lock_);
   // Ensures there is sufficient space in the buffer to record the requested_size. If there is not
   // enough sufficient space the current contents of the buffer are written to the file and
   // current_index is reset to 0. This doesn't check if buffer_size is big enough to hold the
@@ -329,15 +326,13 @@
                   uint8_t* buffer,
                   size_t buffer_size);
 
-  uint32_t EncodeTraceMethod(ArtMethod* method) REQUIRES(!unique_methods_lock_);
-  uint32_t EncodeTraceMethodAndAction(ArtMethod* method, TraceAction action)
-      REQUIRES(!unique_methods_lock_);
-  ArtMethod* DecodeTraceMethod(uint32_t tmid) REQUIRES(!unique_methods_lock_);
-  std::string GetMethodLine(ArtMethod* method) REQUIRES(!unique_methods_lock_)
+  uint32_t EncodeTraceMethod(ArtMethod* method) REQUIRES(tracing_lock_);
+  ArtMethod* DecodeTraceMethod(uint32_t tmid) REQUIRES(tracing_lock_);
+  std::string GetMethodLine(ArtMethod* method) REQUIRES(tracing_lock_)
       REQUIRES_SHARED(Locks::mutator_lock_);
 
   void DumpBuf(uint8_t* buf, size_t buf_size, TraceClockSource clock_source)
-      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!unique_methods_lock_);
+      REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!tracing_lock_);
 
   // Singleton instance of the Trace or null when no method tracing is active.
   static Trace* volatile the_trace_ GUARDED_BY(Locks::trace_lock_);
@@ -355,7 +350,7 @@
   std::unique_ptr<File> trace_file_;
 
   // Buffer to store trace data. In streaming mode, this is protected
-  // by the streaming_lock_. In non-streaming mode, reserved regions
+  // by the tracing_lock_. In non-streaming mode, reserved regions
   // are atomically allocated (using cur_offset_) for log entries to
   // be written.
   std::unique_ptr<uint8_t[]> buf_;
@@ -384,7 +379,7 @@
   // to concurrently reserve space in the buffer. The newly written
   // buffer contents are not read without some other form of thread
   // synchronization, such as suspending all potential writers or
-  // acquiring *streaming_lock_. Reading cur_offset_ is thus never
+  // acquiring *tracing_lock_. Reading cur_offset_ is thus never
   // used to ensure visibility of any other objects, and all accesses
   // are memory_order_relaxed.
   //
@@ -413,14 +408,12 @@
   int interval_us_;
 
   // Streaming mode data.
-  Mutex* streaming_lock_;
-  std::map<const DexFile*, DexIndexBitSet*> seen_methods_ GUARDED_BY(streaming_lock_);
+  Mutex tracing_lock_;
 
   // Bijective map from ArtMethod* to index.
   // Map from ArtMethod* to index in unique_methods_;
-  Mutex* unique_methods_lock_ ACQUIRED_AFTER(streaming_lock_);
-  std::unordered_map<ArtMethod*, uint32_t> art_method_id_map_ GUARDED_BY(unique_methods_lock_);
-  std::vector<ArtMethod*> unique_methods_ GUARDED_BY(unique_methods_lock_);
+  std::unordered_map<ArtMethod*, uint32_t> art_method_id_map_ GUARDED_BY(tracing_lock_);
+  std::vector<ArtMethod*> unique_methods_ GUARDED_BY(tracing_lock_);
 
   DISALLOW_COPY_AND_ASSIGN(Trace);
 };