Move IsDaemon to native code.

Having the daemon status in the managed heap means it is only readable
by an attached thread in the runnable state. This is problematic when
terminating the VM.

Add extra asserts that ScopedJniThreadState and Thread::SetState only
occur on expected threads.

Remember a cause for no thread suspension asserting.

Change-Id: I8cf35c4c77751f8b1a35a5fb9a7f67682bf153bc
diff --git a/src/native/java_lang_Thread.cc b/src/native/java_lang_Thread.cc
index 86b3a20..626255e 100644
--- a/src/native/java_lang_Thread.cc
+++ b/src/native/java_lang_Thread.cc
@@ -42,8 +42,9 @@
   return (thread != NULL) ? thread->IsInterrupted() : JNI_FALSE;
 }
 
-static void Thread_nativeCreate(JNIEnv* env, jclass, jobject java_thread, jlong stack_size) {
-  Thread::CreateNativeThread(env, java_thread, stack_size);
+static void Thread_nativeCreate(JNIEnv* env, jclass, jobject java_thread, jlong stack_size,
+                                jboolean daemon) {
+  Thread::CreateNativeThread(env, java_thread, stack_size, daemon == JNI_TRUE);
 }
 
 static jint Thread_nativeGetStatus(JNIEnv* env, jobject java_thread, jboolean has_been_started) {
@@ -140,7 +141,7 @@
   NATIVE_METHOD(Thread, currentThread, "()Ljava/lang/Thread;"),
   NATIVE_METHOD(Thread, interrupted, "()Z"),
   NATIVE_METHOD(Thread, isInterrupted, "()Z"),
-  NATIVE_METHOD(Thread, nativeCreate, "(Ljava/lang/Thread;J)V"),
+  NATIVE_METHOD(Thread, nativeCreate, "(Ljava/lang/Thread;JZ)V"),
   NATIVE_METHOD(Thread, nativeGetStatus, "(Z)I"),
   NATIVE_METHOD(Thread, nativeHoldsLock, "(Ljava/lang/Object;)Z"),
   NATIVE_METHOD(Thread, nativeInterrupt, "()V"),
diff --git a/src/scoped_jni_thread_state.h b/src/scoped_jni_thread_state.h
index 42ed19c..1c9ab2c 100644
--- a/src/scoped_jni_thread_state.h
+++ b/src/scoped_jni_thread_state.h
@@ -34,22 +34,24 @@
 class ScopedJniThreadState {
  public:
   explicit ScopedJniThreadState(JNIEnv* env, ThreadState new_state = kRunnable)
-    : env_(reinterpret_cast<JNIEnvExt*>(env)), vm_(env_->vm), self_(ThreadForEnv(env)),
-      old_thread_state_(self_->SetState(new_state)), thread_state_(new_state) {
+      : env_(reinterpret_cast<JNIEnvExt*>(env)), vm_(env_->vm), self_(ThreadForEnv(env)),
+        old_thread_state_(self_->SetState(new_state)), thread_state_(new_state) {
     self_->VerifyStack();
   }
 
   explicit ScopedJniThreadState(Thread* self, ThreadState new_state = kRunnable)
-    : env_(reinterpret_cast<JNIEnvExt*>(self->GetJniEnv())), vm_(env_->vm), self_(self),
-      old_thread_state_(self_->SetState(new_state)), thread_state_(new_state) {
+      : env_(reinterpret_cast<JNIEnvExt*>(self->GetJniEnv())), vm_(env_->vm), self_(self),
+        old_thread_state_(self_->SetState(new_state)), thread_state_(new_state) {
+    if (!Vm()->work_around_app_jni_bugs && self != Thread::Current()) {
+      UnexpectedThreads(self, Thread::Current());
+    }
     self_->VerifyStack();
   }
 
-  // Used when we want a scoped jni thread state but have no thread/JNIEnv.
+  // Used when we want a scoped JNI thread state but have no thread/JNIEnv.
   explicit ScopedJniThreadState(JavaVM* vm)
-    : env_(NULL), vm_(reinterpret_cast<JavaVMExt*>(vm)), self_(NULL),
-      old_thread_state_(kTerminated), thread_state_(kTerminated) {
-  }
+      : env_(NULL), vm_(reinterpret_cast<JavaVMExt*>(vm)), self_(NULL),
+        old_thread_state_(kTerminated), thread_state_(kTerminated) {}
 
   ~ScopedJniThreadState() {
     if (self_ != NULL) {
@@ -164,13 +166,19 @@
     Thread* env_self = full_env->self;
     Thread* self = work_around_app_jni_bugs ? Thread::Current() : env_self;
     if (!work_around_app_jni_bugs && self != env_self) {
-      // TODO: pass through function name so we can use it here instead of NULL...
-      JniAbortF(NULL, "JNIEnv for %s used on %s",
-                ToStr<Thread>(*env_self).c_str(), ToStr<Thread>(*self).c_str());
+      UnexpectedThreads(env_self, self);
     }
     return self;
   }
 
+  static void UnexpectedThreads(Thread* found_self, Thread* expected_self) {
+    // TODO: pass through function name so we can use it here instead of NULL...
+    JniAbortF(NULL, "JNIEnv for %s used on %s",
+             found_self != NULL ? ToStr<Thread>(*found_self).c_str() : "NULL",
+             expected_self != NULL ? ToStr<Thread>(*expected_self).c_str() : "NULL");
+
+  }
+
   // The full JNIEnv.
   JNIEnvExt* const env_;
   // The full JavaVM.
diff --git a/src/thread.cc b/src/thread.cc
index 8fe0160..104b9b9 100644
--- a/src/thread.cc
+++ b/src/thread.cc
@@ -207,8 +207,8 @@
   delete[] allocated_signal_stack;
 }
 
-void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size) {
-  Thread* native_thread = new Thread;
+void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool daemon) {
+  Thread* native_thread = new Thread(daemon);
   {
     ScopedJniThreadState ts(env);
     Object* peer = ts.Decode<Object*>(java_peer);
@@ -277,7 +277,7 @@
 }
 
 Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_group) {
-  Thread* self = new Thread;
+  Thread* self = new Thread(as_daemon);
   self->Init();
 
   self->SetState(kNative);
@@ -599,12 +599,20 @@
 }
 
 void Thread::SetStateWithoutSuspendCheck(ThreadState new_state) {
+  DCHECK_EQ(this, Thread::Current());
   volatile void* raw = reinterpret_cast<volatile void*>(&state_);
   volatile int32_t* addr = reinterpret_cast<volatile int32_t*>(raw);
   android_atomic_release_store(new_state, addr);
 }
 
 ThreadState Thread::SetState(ThreadState new_state) {
+  if (new_state != kVmWait && new_state != kTerminated) {
+    // TODO: kVmWait is set by the parent thread to a child thread to indicate it can go. Similarly
+    // kTerminated may be set by a parent thread to its child if pthread creation fails.  This
+    // overloaded use of the state variable means we cannot fully assert that only threads
+    // themselves modify their state.
+    DCHECK_EQ(this, Thread::Current());
+  }
   ThreadState old_state = state_;
   if (old_state == kRunnable) {
     // Non-runnable states are points where we expect thread suspension can occur.
@@ -764,7 +772,7 @@
   CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key");
 }
 
-Thread::Thread()
+Thread::Thread(bool daemon)
     : suspend_count_(0),
       card_table_(NULL),
       exception_(NULL),
@@ -793,7 +801,9 @@
       debug_invoke_req_(new DebugInvokeReq),
       trace_stack_(new std::vector<TraceStackFrame>),
       name_(new std::string(kThreadNameDuringStartup)),
-      no_thread_suspension_(0) {
+      daemon_(daemon),
+      no_thread_suspension_(0),
+      last_no_thread_suspension_cause_(NULL) {
   CHECK_EQ((sizeof(Thread) % 4), 0U) << sizeof(Thread);
   memset(&held_mutexes_[0], 0, sizeof(held_mutexes_));
 }
@@ -1065,14 +1075,18 @@
     // object graph.
     method_trace->Set(depth, dex_pc_trace);
     // Set the Object*s and assert that no thread suspension is now possible.
-    ts.Self()->StartAssertNoThreadSuspension();
+    const char* last_no_suspend_cause =
+        ts.Self()->StartAssertNoThreadSuspension("Building internal stack trace");
+    CHECK(last_no_suspend_cause == NULL) << last_no_suspend_cause;
     method_trace_ = method_trace.get();
     dex_pc_trace_ = dex_pc_trace;
     return true;
   }
 
   virtual ~BuildInternalStackTraceVisitor() {
-    Thread::Current()->EndAssertNoThreadSuspension();
+    if (method_trace_ != NULL) {
+      Thread::Current()->EndAssertNoThreadSuspension(NULL);
+    }
   }
 
   bool VisitFrame() {
@@ -1436,7 +1450,12 @@
         throw_frame_id_(0), throw_dex_pc_(0), handler_quick_frame_(NULL),
         handler_quick_frame_pc_(0), handler_dex_pc_(0), native_method_count_(0),
         method_tracing_active_(Runtime::Current()->IsMethodTracingActive()) {
-    self->StartAssertNoThreadSuspension();  // Exception not in root sets, can't allow GC.
+    // Exception not in root sets, can't allow GC.
+    last_no_assert_suspension_cause_ = self->StartAssertNoThreadSuspension("Finding catch block");
+  }
+
+  ~CatchBlockStackVisitor() {
+    LOG(FATAL) << "UNREACHABLE";  // Expected to take long jump.
   }
 
   bool VisitFrame() {
@@ -1496,8 +1515,8 @@
         LOG(INFO) << "Handler: " << PrettyMethod(catch_method) << " (line: " << line_number << ")";
       }
     }
-    self_->SetException(exception_);
-    self_->EndAssertNoThreadSuspension();  // Exception back in root set.
+    self_->SetException(exception_);  // Exception back in root set.
+    self_->EndAssertNoThreadSuspension(last_no_assert_suspension_cause_);
     // Place context back on thread so it will be available when we continue.
     self_->ReleaseLongJumpContext(context_);
     context_->SetSP(reinterpret_cast<uintptr_t>(handler_quick_frame_));
@@ -1525,6 +1544,8 @@
   uint32_t native_method_count_;
   // Is method tracing active?
   const bool method_tracing_active_;
+  // Support for nesting no thread suspension checks.
+  const char* last_no_assert_suspension_cause_;
 };
 
 void Thread::DeliverException() {
@@ -1595,11 +1616,6 @@
   return object->GetThinLockId() == thin_lock_id_;
 }
 
-bool Thread::IsDaemon() {
-  ScopedJniThreadState ts(this);
-  return ts.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->GetBoolean(peer_);
-}
-
 class ReferenceMapVisitor : public StackVisitor {
  public:
   ReferenceMapVisitor(const ManagedStack* stack, const std::vector<TraceStackFrame>* trace_stack,
diff --git a/src/thread.h b/src/thread.h
index 7cd55a3..5e60637 100644
--- a/src/thread.h
+++ b/src/thread.h
@@ -94,7 +94,7 @@
 
   // Creates a new native thread corresponding to the given managed peer.
   // Used to implement Thread.start.
-  static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size);
+  static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
 
   // Attaches the calling native thread to the runtime, returning the new native peer.
   // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
@@ -131,27 +131,45 @@
   ThreadState SetState(ThreadState new_state);
   void SetStateWithoutSuspendCheck(ThreadState new_state);
 
-  bool IsDaemon();
+  bool IsDaemon() const {
+    return daemon_;
+  }
+
   bool IsSuspended();
 
   void WaitUntilSuspended();
 
   // Once called thread suspension will cause an assertion failure.
-  void StartAssertNoThreadSuspension() {
 #ifndef NDEBUG
+  const char* StartAssertNoThreadSuspension(const char* cause) {
+    CHECK(cause != NULL);
+    const char* previous_cause = last_no_thread_suspension_cause_;
     no_thread_suspension_++;
-#endif
+    last_no_thread_suspension_cause_ = cause;
+    return previous_cause;
   }
+#else
+  const char* StartAssertNoThreadSuspension(const char* cause) {
+    CHECK(cause != NULL);
+    return NULL;
+  }
+#endif
+
   // End region where no thread suspension is expected.
-  void EndAssertNoThreadSuspension() {
 #ifndef NDEBUG
-    DCHECK_GT(no_thread_suspension_, 0U);
+  void EndAssertNoThreadSuspension(const char* old_cause) {
+    CHECK(old_cause != NULL || no_thread_suspension_ == 1);
+    CHECK_GT(no_thread_suspension_, 0U);
     no_thread_suspension_--;
-#endif
+    last_no_thread_suspension_cause_ = old_cause;
   }
+#else
+  void EndAssertNoThreadSuspension(const char*) {
+  }
+#endif
 
   void AssertThreadSuspensionIsAllowable() const {
-    DCHECK_EQ(0u, no_thread_suspension_);
+    DCHECK_EQ(0u, no_thread_suspension_) << last_no_thread_suspension_cause_;
   }
 
   bool CanAccessDirectReferences() const {
@@ -494,7 +512,7 @@
   void CheckSafeToWait(MutexRank rank);
 
  private:
-  Thread();
+  explicit Thread(bool daemon);
   ~Thread();
   void Destroy();
   friend class ThreadList;  // For ~Thread and Destroy.
@@ -626,6 +644,12 @@
   // A cached copy of the java.lang.Thread's name.
   std::string* name_;
 
+  // Is the thread a daemon?
+  const bool daemon_;
+
+  // Keep data fields within Thread 4 byte aligned.
+  byte pad_[3];
+
   // A cached pthread_t for the pthread underlying this Thread*.
   pthread_t pthread_self_;
 
@@ -634,6 +658,9 @@
 
   // A positive value implies we're in a region where thread suspension isn't expected.
   uint32_t no_thread_suspension_;
+
+  // Cause for last suspension.
+  const char* last_no_thread_suspension_cause_;
  public:
   // Runtime support function pointers
   EntryPoints entrypoints_;