Remove Dbg::VisitRoots().

We replace the GC strong roots with JNI.

jdwp_test passes.

Bug: 12687968
Change-Id: I841f1aaa5dffa784ca25953a251600f70df273b2
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index 11415b4..6161aff 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -170,27 +170,47 @@
   AllocRecordStackTraceElement stack_[kMaxAllocRecordStackDepth];  // Unused entries have NULL method.
 };
 
-struct Breakpoint {
+class Breakpoint {
+ public:
+  Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc, bool need_full_deoptimization)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+    : method_(nullptr), dex_pc_(dex_pc), need_full_deoptimization_(need_full_deoptimization) {
+    ScopedObjectAccessUnchecked soa(Thread::Current());
+    method_ = soa.EncodeMethod(method);
+  }
+
+  Breakpoint(const Breakpoint& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+    : method_(nullptr), dex_pc_(other.dex_pc_),
+      need_full_deoptimization_(other.need_full_deoptimization_) {
+    ScopedObjectAccessUnchecked soa(Thread::Current());
+    method_ = soa.EncodeMethod(other.Method());
+  }
+
+  mirror::ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    ScopedObjectAccessUnchecked soa(Thread::Current());
+    return soa.DecodeMethod(method_);
+  }
+
+  uint32_t DexPc() const {
+    return dex_pc_;
+  }
+
+  bool NeedFullDeoptimization() const {
+    return need_full_deoptimization_;
+  }
+
+ private:
   // The location of this breakpoint.
-  mirror::ArtMethod* method;
-  uint32_t dex_pc;
+  jmethodID method_;
+  uint32_t dex_pc_;
 
   // Indicates whether breakpoint needs full deoptimization or selective deoptimization.
-  bool need_full_deoptimization;
-
-  Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc, bool need_full_deoptimization)
-    : method(method), dex_pc(dex_pc), need_full_deoptimization(need_full_deoptimization) {}
-
-  void VisitRoots(RootCallback* callback, void* arg) {
-    if (method != nullptr) {
-      callback(reinterpret_cast<mirror::Object**>(&method), arg, 0, kRootDebugger);
-    }
-  }
+  bool need_full_deoptimization_;
 };
 
-static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
+static std::ostream& operator<<(std::ostream& os, Breakpoint& rhs)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.method).c_str(), rhs.dex_pc);
+  os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.Method()).c_str(), rhs.DexPc());
   return os;
 }
 
@@ -349,18 +369,12 @@
   dex_pcs.clear();
 }
 
-void DeoptimizationRequest::VisitRoots(RootCallback* callback, void* arg) {
-  if (method != nullptr) {
-    callback(reinterpret_cast<mirror::Object**>(&method), arg, 0, kRootDebugger);
-  }
-}
-
 static bool IsBreakpoint(const mirror::ArtMethod* m, uint32_t dex_pc)
     LOCKS_EXCLUDED(Locks::breakpoint_lock_)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
   for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
-    if (gBreakpoints[i].method == m && gBreakpoints[i].dex_pc == dex_pc) {
+    if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].Method() == m) {
       VLOG(jdwp) << "Hit breakpoint #" << i << ": " << gBreakpoints[i];
       return true;
     }
@@ -641,21 +655,6 @@
   }
 }
 
-void Dbg::VisitRoots(RootCallback* callback, void* arg) {
-  {
-    MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
-    for (Breakpoint& bp : gBreakpoints) {
-      bp.VisitRoots(callback, arg);
-    }
-  }
-  if (deoptimization_lock_ != nullptr) {  // only true if the debugger is started.
-    MutexLock mu(Thread::Current(), *deoptimization_lock_);
-    for (DeoptimizationRequest& req : deoptimization_requests_) {
-      req.VisitRoots(callback, arg);
-    }
-  }
-}
-
 void Dbg::StopJdwp() {
   // Prevent the JDWP thread from processing JDWP incoming packets after we close the connection.
   Disposed();
@@ -2844,22 +2843,22 @@
 // Process request while all mutator threads are suspended.
 void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) {
   instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
-  switch (request.kind) {
+  switch (request.GetKind()) {
     case DeoptimizationRequest::kNothing:
       LOG(WARNING) << "Ignoring empty deoptimization request.";
       break;
     case DeoptimizationRequest::kRegisterForEvent:
       VLOG(jdwp) << StringPrintf("Add debugger as listener for instrumentation event 0x%x",
-                                 request.instrumentation_event);
-      instrumentation->AddListener(&gDebugInstrumentationListener, request.instrumentation_event);
-      instrumentation_events_ |= request.instrumentation_event;
+                                 request.InstrumentationEvent());
+      instrumentation->AddListener(&gDebugInstrumentationListener, request.InstrumentationEvent());
+      instrumentation_events_ |= request.InstrumentationEvent();
       break;
     case DeoptimizationRequest::kUnregisterForEvent:
       VLOG(jdwp) << StringPrintf("Remove debugger as listener for instrumentation event 0x%x",
-                                 request.instrumentation_event);
+                                 request.InstrumentationEvent());
       instrumentation->RemoveListener(&gDebugInstrumentationListener,
-                                      request.instrumentation_event);
-      instrumentation_events_ &= ~request.instrumentation_event;
+                                      request.InstrumentationEvent());
+      instrumentation_events_ &= ~request.InstrumentationEvent();
       break;
     case DeoptimizationRequest::kFullDeoptimization:
       VLOG(jdwp) << "Deoptimize the world ...";
@@ -2872,17 +2871,17 @@
       VLOG(jdwp) << "Undeoptimize the world DONE";
       break;
     case DeoptimizationRequest::kSelectiveDeoptimization:
-      VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.method) << " ...";
-      instrumentation->Deoptimize(request.method);
-      VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.method) << " DONE";
+      VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " ...";
+      instrumentation->Deoptimize(request.Method());
+      VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " DONE";
       break;
     case DeoptimizationRequest::kSelectiveUndeoptimization:
-      VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.method) << " ...";
-      instrumentation->Undeoptimize(request.method);
-      VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.method) << " DONE";
+      VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " ...";
+      instrumentation->Undeoptimize(request.Method());
+      VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " DONE";
       break;
     default:
-      LOG(FATAL) << "Unsupported deoptimization request kind " << request.kind;
+      LOG(FATAL) << "Unsupported deoptimization request kind " << request.GetKind();
       break;
   }
 }
@@ -2899,8 +2898,8 @@
     MutexLock mu(Thread::Current(), *deoptimization_lock_);
     while (delayed_full_undeoptimization_count_ > 0) {
       DeoptimizationRequest req;
-      req.kind = DeoptimizationRequest::kFullUndeoptimization;
-      req.method = nullptr;
+      req.SetKind(DeoptimizationRequest::kFullUndeoptimization);
+      req.SetMethod(nullptr);
       RequestDeoptimizationLocked(req);
       --delayed_full_undeoptimization_count_;
     }
@@ -2909,7 +2908,7 @@
 }
 
 void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) {
-  if (req.kind == DeoptimizationRequest::kNothing) {
+  if (req.GetKind() == DeoptimizationRequest::kNothing) {
     // Nothing to do.
     return;
   }
@@ -2918,35 +2917,35 @@
 }
 
 void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) {
-  switch (req.kind) {
+  switch (req.GetKind()) {
     case DeoptimizationRequest::kRegisterForEvent: {
-      DCHECK_NE(req.instrumentation_event, 0u);
-      size_t* counter = GetReferenceCounterForEvent(req.instrumentation_event);
+      DCHECK_NE(req.InstrumentationEvent(), 0u);
+      size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
       CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
-                                                req.instrumentation_event);
+                                                req.InstrumentationEvent());
       if (*counter == 0) {
         VLOG(jdwp) << StringPrintf("Queue request #%zd to start listening to instrumentation event 0x%x",
-                                   deoptimization_requests_.size(), req.instrumentation_event);
+                                   deoptimization_requests_.size(), req.InstrumentationEvent());
         deoptimization_requests_.push_back(req);
       }
       *counter = *counter + 1;
       break;
     }
     case DeoptimizationRequest::kUnregisterForEvent: {
-      DCHECK_NE(req.instrumentation_event, 0u);
-      size_t* counter = GetReferenceCounterForEvent(req.instrumentation_event);
+      DCHECK_NE(req.InstrumentationEvent(), 0u);
+      size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
       CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
-                                                req.instrumentation_event);
+                                                req.InstrumentationEvent());
       *counter = *counter - 1;
       if (*counter == 0) {
         VLOG(jdwp) << StringPrintf("Queue request #%zd to stop listening to instrumentation event 0x%x",
-                                   deoptimization_requests_.size(), req.instrumentation_event);
+                                   deoptimization_requests_.size(), req.InstrumentationEvent());
         deoptimization_requests_.push_back(req);
       }
       break;
     }
     case DeoptimizationRequest::kFullDeoptimization: {
-      DCHECK(req.method == nullptr);
+      DCHECK(req.Method() == nullptr);
       if (full_deoptimization_event_count_ == 0) {
         VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
                    << " for full deoptimization";
@@ -2956,7 +2955,7 @@
       break;
     }
     case DeoptimizationRequest::kFullUndeoptimization: {
-      DCHECK(req.method == nullptr);
+      DCHECK(req.Method() == nullptr);
       DCHECK_GT(full_deoptimization_event_count_, 0U);
       --full_deoptimization_event_count_;
       if (full_deoptimization_event_count_ == 0) {
@@ -2967,21 +2966,21 @@
       break;
     }
     case DeoptimizationRequest::kSelectiveDeoptimization: {
-      DCHECK(req.method != nullptr);
+      DCHECK(req.Method() != nullptr);
       VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
-                 << " for deoptimization of " << PrettyMethod(req.method);
+                 << " for deoptimization of " << PrettyMethod(req.Method());
       deoptimization_requests_.push_back(req);
       break;
     }
     case DeoptimizationRequest::kSelectiveUndeoptimization: {
-      DCHECK(req.method != nullptr);
+      DCHECK(req.Method() != nullptr);
       VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
-                 << " for undeoptimization of " << PrettyMethod(req.method);
+                 << " for undeoptimization of " << PrettyMethod(req.Method());
       deoptimization_requests_.push_back(req);
       break;
     }
     default: {
-      LOG(FATAL) << "Unknown deoptimization request kind " << req.kind;
+      LOG(FATAL) << "Unknown deoptimization request kind " << req.GetKind();
       break;
     }
   }
@@ -3005,7 +3004,7 @@
   {
     MutexLock mu(self, *deoptimization_lock_);
     size_t req_index = 0;
-    for (const DeoptimizationRequest& request : deoptimization_requests_) {
+    for (DeoptimizationRequest& request : deoptimization_requests_) {
       VLOG(jdwp) << "Process deoptimization request #" << req_index++;
       ProcessDeoptimizationRequest(request);
     }
@@ -3036,9 +3035,9 @@
 }
 
 static const Breakpoint* FindFirstBreakpointForMethod(mirror::ArtMethod* m)
-    EXCLUSIVE_LOCKS_REQUIRED(Locks::breakpoint_lock_) {
-  for (const Breakpoint& breakpoint : gBreakpoints) {
-    if (breakpoint.method == m) {
+    EXCLUSIVE_LOCKS_REQUIRED(Locks::breakpoint_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  for (Breakpoint& breakpoint : gBreakpoints) {
+    if (breakpoint.Method() == m) {
       return &breakpoint;
     }
   }
@@ -3050,7 +3049,7 @@
     EXCLUSIVE_LOCKS_REQUIRED(Locks::breakpoint_lock_)  {
   if (kIsDebugBuild) {
     for (const Breakpoint& breakpoint : gBreakpoints) {
-      CHECK_EQ(need_full_deoptimization, breakpoint.need_full_deoptimization);
+      CHECK_EQ(need_full_deoptimization, breakpoint.NeedFullDeoptimization());
     }
     if (need_full_deoptimization) {
       // We should have deoptimized everything but not "selectively" deoptimized this method.
@@ -3080,18 +3079,18 @@
     // inlined, we deoptimize everything; otherwise we deoptimize only this method.
     need_full_deoptimization = IsMethodPossiblyInlined(self, m);
     if (need_full_deoptimization) {
-      req->kind = DeoptimizationRequest::kFullDeoptimization;
-      req->method = nullptr;
+      req->SetKind(DeoptimizationRequest::kFullDeoptimization);
+      req->SetMethod(nullptr);
     } else {
-      req->kind = DeoptimizationRequest::kSelectiveDeoptimization;
-      req->method = m;
+      req->SetKind(DeoptimizationRequest::kSelectiveDeoptimization);
+      req->SetMethod(m);
     }
   } else {
     // There is at least one breakpoint for this method: we don't need to deoptimize.
-    req->kind = DeoptimizationRequest::kNothing;
-    req->method = nullptr;
+    req->SetKind(DeoptimizationRequest::kNothing);
+    req->SetMethod(nullptr);
 
-    need_full_deoptimization = existing_breakpoint->need_full_deoptimization;
+    need_full_deoptimization = existing_breakpoint->NeedFullDeoptimization();
     SanityCheckExistingBreakpoints(m, need_full_deoptimization);
   }
 
@@ -3103,15 +3102,14 @@
 // Uninstalls a breakpoint at the specified location. Also indicates through the deoptimization
 // request if we need to undeoptimize.
 void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
+  MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
   mirror::ArtMethod* m = FromMethodId(location->method_id);
   DCHECK(m != nullptr) << "No method for method id " << location->method_id;
-
-  MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
   bool need_full_deoptimization = false;
   for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
-    if (gBreakpoints[i].method == m && gBreakpoints[i].dex_pc == location->dex_pc) {
+    if (gBreakpoints[i].DexPc() == location->dex_pc && gBreakpoints[i].Method() == m) {
       VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i];
-      need_full_deoptimization = gBreakpoints[i].need_full_deoptimization;
+      need_full_deoptimization = gBreakpoints[i].NeedFullDeoptimization();
       DCHECK_NE(need_full_deoptimization, Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
       gBreakpoints.erase(gBreakpoints.begin() + i);
       break;
@@ -3122,17 +3120,17 @@
     // There is no more breakpoint on this method: we need to undeoptimize.
     if (need_full_deoptimization) {
       // This method required full deoptimization: we need to undeoptimize everything.
-      req->kind = DeoptimizationRequest::kFullUndeoptimization;
-      req->method = nullptr;
+      req->SetKind(DeoptimizationRequest::kFullUndeoptimization);
+      req->SetMethod(nullptr);
     } else {
       // This method required selective deoptimization: we need to undeoptimize only that method.
-      req->kind = DeoptimizationRequest::kSelectiveUndeoptimization;
-      req->method = m;
+      req->SetKind(DeoptimizationRequest::kSelectiveUndeoptimization);
+      req->SetMethod(m);
     }
   } else {
     // There is at least one breakpoint for this method: we don't need to undeoptimize.
-    req->kind = DeoptimizationRequest::kNothing;
-    req->method = nullptr;
+    req->SetKind(DeoptimizationRequest::kNothing);
+    req->SetMethod(nullptr);
     SanityCheckExistingBreakpoints(m, need_full_deoptimization);
   }
 }
@@ -4590,4 +4588,14 @@
   return result;
 }
 
+mirror::ArtMethod* DeoptimizationRequest::Method() const {
+  ScopedObjectAccessUnchecked soa(Thread::Current());
+  return soa.DecodeMethod(method_);
+}
+
+void DeoptimizationRequest::SetMethod(mirror::ArtMethod* m) {
+  ScopedObjectAccessUnchecked soa(Thread::Current());
+  method_ = soa.EncodeMethod(m);
+}
+
 }  // namespace art