Merge "Fix signal chain to allow for signal() call to be used"
diff --git a/compiler/dex/quick/ralloc_util.cc b/compiler/dex/quick/ralloc_util.cc
index 0a737a9..195da0d 100644
--- a/compiler/dex/quick/ralloc_util.cc
+++ b/compiler/dex/quick/ralloc_util.cc
@@ -934,9 +934,9 @@
 bool Mir2Lir::CheckCorePoolSanity() {
   GrowableArray<RegisterInfo*>::Iterator it(&tempreg_info_);
   for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) {
-    if (info->IsTemp() && info->IsLive() && info->IsWide()) {
+    int my_sreg = info->SReg();
+    if (info->IsTemp() && info->IsLive() && info->IsWide() && my_sreg != INVALID_SREG) {
       RegStorage my_reg = info->GetReg();
-      int my_sreg = info->SReg();
       RegStorage partner_reg = info->Partner();
       RegisterInfo* partner = GetRegInfo(partner_reg);
       DCHECK(partner != NULL);
@@ -944,12 +944,8 @@
       DCHECK_EQ(my_reg.GetReg(), partner->Partner().GetReg());
       DCHECK(partner->IsLive());
       int partner_sreg = partner->SReg();
-      if (my_sreg == INVALID_SREG) {
-        DCHECK_EQ(partner_sreg, INVALID_SREG);
-      } else {
-        int diff = my_sreg - partner_sreg;
-        DCHECK((diff == 0) || (diff == -1) || (diff == 1));
-      }
+      int diff = my_sreg - partner_sreg;
+      DCHECK((diff == 0) || (diff == -1) || (diff == 1));
     }
     if (info->Master() != info) {
       // Aliased.
diff --git a/runtime/debugger.cc b/runtime/debugger.cc
index ffd15f6..9136f9f 100644
--- a/runtime/debugger.cc
+++ b/runtime/debugger.cc
@@ -2281,8 +2281,9 @@
   // trying to suspend this one.
   MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
   bool timed_out;
-  Thread* thread = ThreadList::SuspendThreadByPeer(peer.get(), request_suspension, true,
-                                                   &timed_out);
+  ThreadList* thread_list = Runtime::Current()->GetThreadList();
+  Thread* thread = thread_list->SuspendThreadByPeer(peer.get(), request_suspension, true,
+                                                    &timed_out);
   if (thread != NULL) {
     return JDWP::ERR_NONE;
   } else if (timed_out) {
@@ -3171,8 +3172,8 @@
         {
           // Take suspend thread lock to avoid races with threads trying to suspend this one.
           MutexLock mu(soa.Self(), *Locks::thread_list_suspend_thread_lock_);
-          suspended_thread = ThreadList::SuspendThreadByPeer(thread_peer, true, true,
-                                                             &timed_out);
+          ThreadList* thread_list = Runtime::Current()->GetThreadList();
+          suspended_thread = thread_list->SuspendThreadByPeer(thread_peer, true, true, &timed_out);
         }
         CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kWaitingForDebuggerSuspension);
         if (suspended_thread == nullptr) {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index f99f361..6a91501 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -407,9 +407,11 @@
     mark_compact_collector_ = new collector::MarkCompact(this);
     garbage_collectors_.push_back(mark_compact_collector_);
   }
-  if (GetImageSpace() != nullptr && non_moving_space_ != nullptr) {
+  if (GetImageSpace() != nullptr && non_moving_space_ != nullptr &&
+      (is_zygote || separate_non_moving_space || foreground_collector_type_ == kCollectorTypeGSS)) {
     // Check that there's no gap between the image space and the non moving space so that the
-    // immune region won't break (eg. due to a large object allocated in the gap).
+    // immune region won't break (eg. due to a large object allocated in the gap). This is only
+    // required when we're the zygote or using GSS.
     bool no_gap = MemMap::CheckNoGaps(GetImageSpace()->GetMemMap(),
                                       non_moving_space_->GetMemMap());
     if (!no_gap) {
diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc
index 416c267..41c34c9 100644
--- a/runtime/gc/space/image_space.cc
+++ b/runtime/gc/space/image_space.cc
@@ -16,6 +16,9 @@
 
 #include "image_space.h"
 
+#include <dirent.h>
+#include <sys/types.h>
+
 #include <random>
 
 #include "base/stl_util.h"
@@ -65,7 +68,54 @@
   return r;
 }
 
-static bool GenerateImage(const std::string& image_filename, std::string* error_msg) {
+// We are relocating or generating the core image. We should get rid of everything. It is all
+// out-of-date. We also don't really care if this fails since it is just a convienence.
+// Adapted from prune_dex_cache(const char* subdir) in frameworks/native/cmds/installd/commands.c
+// Note this should only be used during first boot.
+static void RealPruneDexCache(const std::string& cache_dir_path);
+static void PruneDexCache(InstructionSet isa) {
+  CHECK_NE(isa, kNone);
+  // Prune the base /data/dalvik-cache
+  RealPruneDexCache(GetDalvikCacheOrDie(".", false));
+  // prune /data/dalvik-cache/<isa>
+  RealPruneDexCache(GetDalvikCacheOrDie(GetInstructionSetString(isa), false));
+}
+static void RealPruneDexCache(const std::string& cache_dir_path) {
+  if (!OS::DirectoryExists(cache_dir_path.c_str())) {
+    return;
+  }
+  DIR* cache_dir = opendir(cache_dir_path.c_str());
+  if (cache_dir == nullptr) {
+    PLOG(WARNING) << "Unable to open " << cache_dir_path << " to delete it's contents";
+    return;
+  }
+
+  for (struct dirent* de = readdir(cache_dir); de != nullptr; de = readdir(cache_dir)) {
+    const char* name = de->d_name;
+    if (strcmp(name, ".") == 0 || strcmp(name, "..") == 0) {
+      continue;
+    }
+    // We only want to delete regular files.
+    if (de->d_type != DT_REG) {
+      if (de->d_type != DT_DIR) {
+        // We do expect some directories (namely the <isa> for pruning the base dalvik-cache).
+        LOG(WARNING) << "Unexpected file type of " << std::hex << de->d_type << " encountered.";
+      }
+      continue;
+    }
+    std::string cache_file(cache_dir_path);
+    cache_file += '/';
+    cache_file += name;
+    if (TEMP_FAILURE_RETRY(unlink(cache_file.c_str())) != 0) {
+      PLOG(ERROR) << "Unable to unlink " << cache_file;
+      continue;
+    }
+  }
+  CHECK_EQ(0, TEMP_FAILURE_RETRY(closedir(cache_dir))) << "Unable to close directory.";
+}
+
+static bool GenerateImage(const std::string& image_filename, InstructionSet image_isa,
+                          std::string* error_msg) {
   const std::string boot_class_path_string(Runtime::Current()->GetBootClassPathString());
   std::vector<std::string> boot_class_path;
   Split(boot_class_path_string, ':', boot_class_path);
@@ -73,6 +123,11 @@
     *error_msg = "Failed to generate image because no boot class path specified";
     return false;
   }
+  // We should clean up so we are more likely to have room for the image.
+  if (Runtime::Current()->IsZygote()) {
+    LOG(INFO) << "Pruning dalvik-cache since we are relocating an image and will need to recompile";
+    PruneDexCache(image_isa);
+  }
 
   std::vector<std::string> arg_vector;
 
@@ -94,6 +149,7 @@
   arg_vector.push_back(oat_file_option_string);
 
   Runtime::Current()->AddCurrentRuntimeFeaturesAsDex2OatArguments(&arg_vector);
+  CHECK_EQ(image_isa, kRuntimeISA) << "We should always be generating an image for the current isa.";
 
   int32_t base_offset = ChooseRelocationOffsetDelta(ART_BASE_ADDRESS_MIN_DELTA,
                                                     ART_BASE_ADDRESS_MAX_DELTA);
@@ -169,6 +225,12 @@
 // Relocate the image at image_location to dest_filename and relocate it by a random amount.
 static bool RelocateImage(const char* image_location, const char* dest_filename,
                                InstructionSet isa, std::string* error_msg) {
+  // We should clean up so we are more likely to have room for the image.
+  if (Runtime::Current()->IsZygote()) {
+    LOG(INFO) << "Pruning dalvik-cache since we are relocating an image and will need to recompile";
+    PruneDexCache(isa);
+  }
+
   std::string patchoat(Runtime::Current()->GetPatchoatExecutable());
 
   std::string input_image_location_arg("--input-image-location=");
@@ -398,7 +460,7 @@
   } else if (!dalvik_cache_exists) {
     *error_msg = StringPrintf("No place to put generated image.");
     return nullptr;
-  } else if (!GenerateImage(cache_filename, error_msg)) {
+  } else if (!GenerateImage(cache_filename, image_isa, error_msg)) {
     *error_msg = StringPrintf("Failed to generate image '%s': %s",
                               cache_filename.c_str(), error_msg->c_str());
     return nullptr;
diff --git a/runtime/indirect_reference_table-inl.h b/runtime/indirect_reference_table-inl.h
index c826716..00f7b06 100644
--- a/runtime/indirect_reference_table-inl.h
+++ b/runtime/indirect_reference_table-inl.h
@@ -79,7 +79,7 @@
   mirror::Object* obj = table_[idx].Read<kWithoutReadBarrier>();
   if (LIKELY(obj != kClearedJniWeakGlobal)) {
     // The read barrier or VerifyObject won't handle kClearedJniWeakGlobal.
-    obj = table_[idx].Read();
+    obj = table_[idx].Read<kReadBarrierOption>();
     VerifyObject(obj);
   }
   return obj;
diff --git a/runtime/native/dalvik_system_VMStack.cc b/runtime/native/dalvik_system_VMStack.cc
index b079229..eef1c46 100644
--- a/runtime/native/dalvik_system_VMStack.cc
+++ b/runtime/native/dalvik_system_VMStack.cc
@@ -34,12 +34,13 @@
   } else {
     // Suspend thread to build stack trace.
     soa.Self()->TransitionFromRunnableToSuspended(kNative);
+    ThreadList* thread_list = Runtime::Current()->GetThreadList();
     bool timed_out;
     Thread* thread;
     {
       // Take suspend thread lock to avoid races with threads trying to suspend this one.
       MutexLock mu(soa.Self(), *Locks::thread_list_suspend_thread_lock_);
-      thread = ThreadList::SuspendThreadByPeer(peer, true, false, &timed_out);
+      thread = thread_list->SuspendThreadByPeer(peer, true, false, &timed_out);
     }
     if (thread != nullptr) {
       // Must be runnable to create returned array.
@@ -47,7 +48,7 @@
       trace = thread->CreateInternalStackTrace<false>(soa);
       soa.Self()->TransitionFromRunnableToSuspended(kNative);
       // Restart suspended thread.
-      Runtime::Current()->GetThreadList()->Resume(thread, false);
+      thread_list->Resume(thread, false);
     } else {
       if (timed_out) {
         LOG(ERROR) << "Trying to get thread's stack failed as the thread failed to suspend within a "
diff --git a/runtime/native/java_lang_Thread.cc b/runtime/native/java_lang_Thread.cc
index 8f83f96..c0c7265 100644
--- a/runtime/native/java_lang_Thread.cc
+++ b/runtime/native/java_lang_Thread.cc
@@ -128,19 +128,20 @@
   // Suspend thread to avoid it from killing itself while we set its name. We don't just hold the
   // thread list lock to avoid this, as setting the thread name causes mutator to lock/unlock
   // in the DDMS send code.
+  ThreadList* thread_list = Runtime::Current()->GetThreadList();
   bool timed_out;
   // Take suspend thread lock to avoid races with threads trying to suspend this one.
   Thread* thread;
   {
     MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
-    thread = ThreadList::SuspendThreadByPeer(peer, true, false, &timed_out);
+    thread = thread_list->SuspendThreadByPeer(peer, true, false, &timed_out);
   }
   if (thread != NULL) {
     {
       ScopedObjectAccess soa(env);
       thread->SetThreadName(name.c_str());
     }
-    Runtime::Current()->GetThreadList()->Resume(thread, false);
+    thread_list->Resume(thread, false);
   } else if (timed_out) {
     LOG(ERROR) << "Trying to set thread name to '" << name.c_str() << "' failed as the thread "
         "failed to suspend within a generous timeout.";
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 7d69828..e0d67d6 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -507,40 +507,9 @@
     + 4 * KB;
   if (read_stack_size <= min_stack) {
     LOG(FATAL) << "Attempt to attach a thread with a too-small stack (" << read_stack_size
-        << " bytes)";
+               << " bytes)";
   }
 
-  // TODO: move this into the Linux GetThreadStack implementation.
-#if !defined(__APPLE__)
-  // If we're the main thread, check whether we were run with an unlimited stack. In that case,
-  // glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection
-  // will be broken because we'll die long before we get close to 2GB.
-  bool is_main_thread = (::art::GetTid() == getpid());
-  if (is_main_thread) {
-    rlimit stack_limit;
-    if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) {
-      PLOG(FATAL) << "getrlimit(RLIMIT_STACK) failed";
-    }
-    if (stack_limit.rlim_cur == RLIM_INFINITY) {
-      // Find the default stack size for new threads...
-      pthread_attr_t default_attributes;
-      size_t default_stack_size;
-      CHECK_PTHREAD_CALL(pthread_attr_init, (&default_attributes), "default stack size query");
-      CHECK_PTHREAD_CALL(pthread_attr_getstacksize, (&default_attributes, &default_stack_size),
-                         "default stack size query");
-      CHECK_PTHREAD_CALL(pthread_attr_destroy, (&default_attributes), "default stack size query");
-
-      // ...and use that as our limit.
-      size_t old_stack_size = read_stack_size;
-      tlsPtr_.stack_size = default_stack_size;
-      tlsPtr_.stack_begin += (old_stack_size - default_stack_size);
-      VLOG(threads) << "Limiting unlimited stack (reported as " << PrettySize(old_stack_size) << ")"
-                    << " to " << PrettySize(default_stack_size)
-                    << " with base " << reinterpret_cast<void*>(tlsPtr_.stack_begin);
-    }
-  }
-#endif
-
   // Set stack_end_ to the bottom of the stack saving space of stack overflows
 
   Runtime* runtime = Runtime::Current();
diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc
index 5077a89..740e3b0 100644
--- a/runtime/thread_list.cc
+++ b/runtime/thread_list.cc
@@ -406,7 +406,8 @@
 void ThreadList::Resume(Thread* thread, bool for_debugger) {
   Thread* self = Thread::Current();
   DCHECK_NE(thread, self);
-  VLOG(threads) << "Resume(" << *thread << ") starting..." << (for_debugger ? " (debugger)" : "");
+  VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") starting..."
+      << (for_debugger ? " (debugger)" : "");
 
   {
     // To check Contains.
@@ -415,18 +416,22 @@
     MutexLock mu2(self, *Locks::thread_suspend_count_lock_);
     DCHECK(thread->IsSuspended());
     if (!Contains(thread)) {
+      // We only expect threads within the thread-list to have been suspended otherwise we can't
+      // stop such threads from delete-ing themselves.
+      LOG(ERROR) << "Resume(" << reinterpret_cast<void*>(thread)
+          << ") thread not within thread list";
       return;
     }
     thread->ModifySuspendCount(self, -1, for_debugger);
   }
 
   {
-    VLOG(threads) << "Resume(" << *thread << ") waking others";
+    VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") waking others";
     MutexLock mu(self, *Locks::thread_suspend_count_lock_);
     Thread::resume_cond_->Broadcast(self);
   }
 
-  VLOG(threads) << "Resume(" << *thread << ") complete";
+  VLOG(threads) << "Resume(" << reinterpret_cast<void*>(thread) << ") complete";
 }
 
 static void ThreadSuspendByPeerWarning(Thread* self, int level, const char* message, jobject peer) {
@@ -451,6 +456,7 @@
   bool did_suspend_request = false;
   *timed_out = false;
   Thread* self = Thread::Current();
+  VLOG(threads) << "SuspendThreadByPeer starting";
   while (true) {
     Thread* thread;
     {
@@ -462,10 +468,16 @@
       ScopedObjectAccess soa(self);
       MutexLock mu(self, *Locks::thread_list_lock_);
       thread = Thread::FromManagedThread(soa, peer);
-      if (thread == NULL) {
+      if (thread == nullptr) {
         ThreadSuspendByPeerWarning(self, WARNING, "No such thread for suspend", peer);
-        return NULL;
+        return nullptr;
       }
+      if (!Contains(thread)) {
+        VLOG(threads) << "SuspendThreadByPeer failed for unattached thread: "
+            << reinterpret_cast<void*>(thread);
+        return nullptr;
+      }
+      VLOG(threads) << "SuspendThreadByPeer found thread: " << *thread;
       {
         MutexLock mu(self, *Locks::thread_suspend_count_lock_);
         if (request_suspension) {
@@ -485,6 +497,7 @@
         // count, or else we've waited and it has self suspended) or is the current thread, we're
         // done.
         if (thread->IsSuspended()) {
+          VLOG(threads) << "SuspendThreadByPeer thread suspended: " << *thread;
           return thread;
         }
         if (total_delay_us >= kTimeoutUs) {
@@ -493,11 +506,12 @@
             thread->ModifySuspendCount(soa.Self(), -1, debug_suspension);
           }
           *timed_out = true;
-          return NULL;
+          return nullptr;
         }
       }
       // Release locks and come out of runnable state.
     }
+    VLOG(threads) << "SuspendThreadByPeer sleeping to allow thread chance to suspend";
     ThreadSuspendSleep(self, &delay_us, &total_delay_us);
   }
 }
@@ -515,6 +529,7 @@
   Thread* suspended_thread = nullptr;
   Thread* self = Thread::Current();
   CHECK_NE(thread_id, kInvalidThreadId);
+  VLOG(threads) << "SuspendThreadByThreadId starting";
   while (true) {
     {
       // Note: this will transition to runnable and potentially suspend. We ensure only one thread
@@ -536,8 +551,10 @@
             << " no longer in thread list";
         // There's a race in inflating a lock and the owner giving up ownership and then dying.
         ThreadSuspendByThreadIdWarning(WARNING, "No such thread id for suspend", thread_id);
-        return NULL;
+        return nullptr;
       }
+      VLOG(threads) << "SuspendThreadByThreadId found thread: " << *thread;
+      DCHECK(Contains(thread));
       {
         MutexLock mu(self, *Locks::thread_suspend_count_lock_);
         if (suspended_thread == nullptr) {
@@ -557,6 +574,7 @@
         // count, or else we've waited and it has self suspended) or is the current thread, we're
         // done.
         if (thread->IsSuspended()) {
+          VLOG(threads) << "SuspendThreadByThreadId thread suspended: " << *thread;
           return thread;
         }
         if (total_delay_us >= kTimeoutUs) {
@@ -565,11 +583,12 @@
             thread->ModifySuspendCount(soa.Self(), -1, debug_suspension);
           }
           *timed_out = true;
-          return NULL;
+          return nullptr;
         }
       }
       // Release locks and come out of runnable state.
     }
+    VLOG(threads) << "SuspendThreadByThreadId sleeping to allow thread chance to suspend";
     ThreadSuspendSleep(self, &delay_us, &total_delay_us);
   }
 }
diff --git a/runtime/thread_list.h b/runtime/thread_list.h
index 1b67ac0..bb4f775 100644
--- a/runtime/thread_list.h
+++ b/runtime/thread_list.h
@@ -66,8 +66,8 @@
   // If the thread should be suspended then value of request_suspension should be true otherwise
   // the routine will wait for a previous suspend request. If the suspension times out then *timeout
   // is set to true.
-  static Thread* SuspendThreadByPeer(jobject peer, bool request_suspension, bool debug_suspension,
-                                     bool* timed_out)
+  Thread* SuspendThreadByPeer(jobject peer, bool request_suspension, bool debug_suspension,
+                              bool* timed_out)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_suspend_thread_lock_)
       LOCKS_EXCLUDED(Locks::mutator_lock_,
                      Locks::thread_list_lock_,
diff --git a/runtime/utils.cc b/runtime/utils.cc
index 55ecc1e..cb281f2 100644
--- a/runtime/utils.cc
+++ b/runtime/utils.cc
@@ -108,6 +108,31 @@
   CHECK_PTHREAD_CALL(pthread_attr_getstack, (&attributes, stack_base, stack_size), __FUNCTION__);
   CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, guard_size), __FUNCTION__);
   CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), __FUNCTION__);
+
+#if defined(__GLIBC__)
+  // If we're the main thread, check whether we were run with an unlimited stack. In that case,
+  // glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection
+  // will be broken because we'll die long before we get close to 2GB.
+  bool is_main_thread = (::art::GetTid() == getpid());
+  if (is_main_thread) {
+    rlimit stack_limit;
+    if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) {
+      PLOG(FATAL) << "getrlimit(RLIMIT_STACK) failed";
+    }
+    if (stack_limit.rlim_cur == RLIM_INFINITY) {
+      size_t old_stack_size = *stack_size;
+
+      // Use the kernel default limit as our size, and adjust the base to match.
+      *stack_size = 8 * MB;
+      *stack_base = reinterpret_cast<uint8_t*>(*stack_base) + (old_stack_size - *stack_size);
+
+      VLOG(threads) << "Limiting unlimited stack (reported as " << PrettySize(old_stack_size) << ")"
+                    << " to " << PrettySize(*stack_size)
+                    << " with base " << *stack_base;
+    }
+  }
+#endif
+
 #endif
 }
 
@@ -1069,10 +1094,6 @@
 
 void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix,
     mirror::ArtMethod* current_method) {
-  // We may be called from contexts where current_method is not null, so we must assert this.
-  if (current_method != nullptr) {
-    Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
-  }
 #ifdef __linux__
   std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, tid));
   if (!backtrace->Unwind(0)) {
@@ -1104,7 +1125,9 @@
         if (it->func_offset != 0) {
           os << "+" << it->func_offset;
         }
-      } else if (current_method != nullptr && current_method->IsWithinQuickCode(it->pc)) {
+      } else if (current_method != nullptr &&
+                 Locks::mutator_lock_->IsSharedHeld(Thread::Current()) &&
+                 current_method->IsWithinQuickCode(it->pc)) {
         const void* start_of_code = current_method->GetEntryPointFromQuickCompiledCode();
         os << JniLongName(current_method) << "+"
            << (it->pc - reinterpret_cast<uintptr_t>(start_of_code));
diff --git a/test/051-thread/expected.txt b/test/051-thread/expected.txt
index 7139b7f..943d1df 100644
--- a/test/051-thread/expected.txt
+++ b/test/051-thread/expected.txt
@@ -6,4 +6,7 @@
 testThreadDaemons @ Thread bailing
 testThreadDaemons finished
 testSleepZero finished
+testSetName starting
+testSetName running
+testSetName finished
 thread test done
diff --git a/test/051-thread/src/Main.java b/test/051-thread/src/Main.java
index 608b7e0..390685d 100644
--- a/test/051-thread/src/Main.java
+++ b/test/051-thread/src/Main.java
@@ -25,6 +25,7 @@
         testThreadCapacity();
         testThreadDaemons();
         testSleepZero();
+        testSetName();
         System.out.println("thread test done");
     }
 
@@ -112,4 +113,24 @@
         }
         System.out.print("testSleepZero finished\n");
     }
+
+    private static void testSetName() throws Exception {
+        System.out.print("testSetName starting\n");
+        Thread thread = new Thread() {
+            @Override
+            public void run() {
+                System.out.print("testSetName running\n");
+            }
+        };
+        thread.start();
+        thread.setName("HelloWorld");  // b/17302037 hang if setName called after start
+        if (!thread.getName().equals("HelloWorld")) {
+            throw new AssertionError("Unexpected thread name: " + thread.getName());
+        }
+        thread.join();
+        if (!thread.getName().equals("HelloWorld")) {
+            throw new AssertionError("Unexpected thread name after join: " + thread.getName());
+        }
+        System.out.print("testSetName finished\n");
+    }
 }