summaryrefslogtreecommitdiff
path: root/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'runtime')
-rw-r--r--runtime/Android.bp4
-rw-r--r--runtime/asm_support.h2
-rw-r--r--runtime/atomic.h9
-rw-r--r--runtime/gc/heap.cc181
-rw-r--r--runtime/gc/heap.h70
-rw-r--r--runtime/monitor.cc58
-rw-r--r--runtime/openjdkjvmti/ti_heap.cc8
-rw-r--r--runtime/openjdkjvmti/ti_redefine.cc139
-rw-r--r--runtime/openjdkjvmti/ti_redefine.h14
-rw-r--r--runtime/openjdkjvmti/ti_stack.cc5
-rw-r--r--runtime/openjdkjvmti/ti_thread.cc4
-rw-r--r--runtime/openjdkjvmti/ti_threadgroup.cc2
-rw-r--r--runtime/stack.cc21
-rw-r--r--runtime/stack.h12
-rw-r--r--runtime/thread.cc62
-rw-r--r--runtime/thread.h12
16 files changed, 417 insertions, 186 deletions
diff --git a/runtime/Android.bp b/runtime/Android.bp
index 1bae245b4e..9585ba2d8e 100644
--- a/runtime/Android.bp
+++ b/runtime/Android.bp
@@ -380,6 +380,10 @@ cc_defaults {
},
cflags: ["-DBUILDING_LIBART=1"],
generated_sources: ["art_operator_srcs"],
+ // asm_support_gen.h (used by asm_support.h) is generated with cpp-define-generator
+ generated_headers: ["cpp-define-generator-asm-support"],
+ // export our headers so the libart-gtest targets can use it as well.
+ export_generated_headers: ["cpp-define-generator-asm-support"],
clang: true,
include_dirs: [
"art/cmdline",
diff --git a/runtime/asm_support.h b/runtime/asm_support.h
index 46f2c08663..c7a94a90dc 100644
--- a/runtime/asm_support.h
+++ b/runtime/asm_support.h
@@ -72,7 +72,7 @@ ADD_TEST_EQ(static_cast<size_t>(1U << POINTER_SIZE_SHIFT),
// Import platform-independent constant defines from our autogenerated list.
// Export new defines (for assembly use) by editing cpp-define-generator def files.
#define DEFINE_CHECK_EQ ADD_TEST_EQ
-#include "generated/asm_support_gen.h"
+#include "asm_support_gen.h"
// Offset of field Thread::tlsPtr_.exception.
#define THREAD_EXCEPTION_OFFSET (THREAD_CARD_TABLE_OFFSET + __SIZEOF_POINTER__)
diff --git a/runtime/atomic.h b/runtime/atomic.h
index e2a7259784..45c3165b18 100644
--- a/runtime/atomic.h
+++ b/runtime/atomic.h
@@ -235,6 +235,11 @@ class PACKED(sizeof(T)) Atomic : public std::atomic<T> {
this->store(desired, std::memory_order_seq_cst);
}
+ // Atomically replace the value with desired value.
+ T ExchangeRelaxed(T desired_value) {
+ return this->exchange(desired_value, std::memory_order_relaxed);
+ }
+
// Atomically replace the value with desired value if it matches the expected value.
// Participates in total ordering of atomic operations.
bool CompareExchangeStrongSequentiallyConsistent(T expected_value, T desired_value) {
@@ -283,6 +288,10 @@ class PACKED(sizeof(T)) Atomic : public std::atomic<T> {
return this->fetch_sub(value, std::memory_order_seq_cst); // Return old value.
}
+ T FetchAndSubRelaxed(const T value) {
+ return this->fetch_sub(value, std::memory_order_relaxed); // Return old value.
+ }
+
T FetchAndOrSequentiallyConsistent(const T value) {
return this->fetch_or(value, std::memory_order_seq_cst); // Return old_value.
}
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index fc475b5e5a..051f3f7b00 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -128,8 +128,6 @@ static constexpr uint32_t kAllocSpaceBeginForDeterministicAoT = 0x40000000;
// Dump the rosalloc stats on SIGQUIT.
static constexpr bool kDumpRosAllocStatsOnSigQuit = false;
-static constexpr size_t kNativeAllocationHistogramBuckets = 16;
-
// Extra added to the heap growth multiplier. Used to adjust the GC ergonomics for the read barrier
// config.
static constexpr double kExtraHeapGrowthMultiplier = kUseReadBarrier ? 1.0 : 0.0;
@@ -195,18 +193,12 @@ Heap::Heap(size_t initial_size,
capacity_(capacity),
growth_limit_(growth_limit),
max_allowed_footprint_(initial_size),
- native_footprint_gc_watermark_(initial_size),
- native_need_to_run_finalization_(false),
concurrent_start_bytes_(std::numeric_limits<size_t>::max()),
total_bytes_freed_ever_(0),
total_objects_freed_ever_(0),
num_bytes_allocated_(0),
- native_bytes_allocated_(0),
- native_histogram_lock_("Native allocation lock"),
- native_allocation_histogram_("Native allocation sizes",
- 1U,
- kNativeAllocationHistogramBuckets),
- native_free_histogram_("Native free sizes", 1U, kNativeAllocationHistogramBuckets),
+ new_native_bytes_allocated_(0),
+ old_native_bytes_allocated_(0),
num_bytes_freed_revoke_(0),
verify_missing_card_marks_(false),
verify_system_weaks_(false),
@@ -545,6 +537,12 @@ Heap::Heap(size_t initial_size,
gc_complete_lock_ = new Mutex("GC complete lock");
gc_complete_cond_.reset(new ConditionVariable("GC complete condition variable",
*gc_complete_lock_));
+ native_blocking_gc_lock_ = new Mutex("Native blocking GC lock");
+ native_blocking_gc_cond_.reset(new ConditionVariable("Native blocking GC condition variable",
+ *native_blocking_gc_lock_));
+ native_blocking_gc_in_progress_ = false;
+ native_blocking_gcs_finished_ = 0;
+
thread_flip_lock_ = new Mutex("GC thread flip lock");
thread_flip_cond_.reset(new ConditionVariable("GC thread flip condition variable",
*thread_flip_lock_));
@@ -1112,19 +1110,9 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) {
rosalloc_space_->DumpStats(os);
}
- {
- MutexLock mu(Thread::Current(), native_histogram_lock_);
- if (native_allocation_histogram_.SampleSize() > 0u) {
- os << "Histogram of native allocation ";
- native_allocation_histogram_.DumpBins(os);
- os << " bucket size " << native_allocation_histogram_.BucketWidth() << "\n";
- }
- if (native_free_histogram_.SampleSize() > 0u) {
- os << "Histogram of native free ";
- native_free_histogram_.DumpBins(os);
- os << " bucket size " << native_free_histogram_.BucketWidth() << "\n";
- }
- }
+ os << "Registered native bytes allocated: "
+ << old_native_bytes_allocated_.LoadRelaxed() + new_native_bytes_allocated_.LoadRelaxed()
+ << "\n";
BaseMutex::DumpAll(os);
}
@@ -1209,6 +1197,7 @@ Heap::~Heap() {
STLDeleteElements(&continuous_spaces_);
STLDeleteElements(&discontinuous_spaces_);
delete gc_complete_lock_;
+ delete native_blocking_gc_lock_;
delete thread_flip_lock_;
delete pending_task_lock_;
delete backtrace_lock_;
@@ -2656,6 +2645,13 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
// Approximate heap size.
ATRACE_INT("Heap size (KB)", bytes_allocated_before_gc / KB);
+ if (gc_type == NonStickyGcType()) {
+ // Move all bytes from new_native_bytes_allocated_ to
+ // old_native_bytes_allocated_ now that GC has been triggered, resetting
+ // new_native_bytes_allocated_ to zero in the process.
+ old_native_bytes_allocated_.FetchAndAddRelaxed(new_native_bytes_allocated_.ExchangeRelaxed(0));
+ }
+
DCHECK_LT(gc_type, collector::kGcTypeMax);
DCHECK_NE(gc_type, collector::kGcTypeNone);
@@ -3515,18 +3511,6 @@ bool Heap::IsMovableObject(ObjPtr<mirror::Object> obj) const {
return false;
}
-void Heap::UpdateMaxNativeFootprint() {
- size_t native_size = native_bytes_allocated_.LoadRelaxed();
- // TODO: Tune the native heap utilization to be a value other than the java heap utilization.
- size_t target_size = native_size / GetTargetHeapUtilization();
- if (target_size > native_size + max_free_) {
- target_size = native_size + max_free_;
- } else if (target_size < native_size + min_free_) {
- target_size = native_size + min_free_;
- }
- native_footprint_gc_watermark_ = std::min(growth_limit_, target_size);
-}
-
collector::GarbageCollector* Heap::FindCollectorByGcType(collector::GcType gc_type) {
for (const auto& collector : garbage_collectors_) {
if (collector->GetCollectorType() == collector_type_ &&
@@ -3566,11 +3550,9 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran,
target_size = bytes_allocated + delta * multiplier;
target_size = std::min(target_size, bytes_allocated + adjusted_max_free);
target_size = std::max(target_size, bytes_allocated + adjusted_min_free);
- native_need_to_run_finalization_ = true;
next_gc_type_ = collector::kGcTypeSticky;
} else {
- collector::GcType non_sticky_gc_type =
- HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
+ collector::GcType non_sticky_gc_type = NonStickyGcType();
// Find what the next non sticky collector will be.
collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
// If the throughput of the current sticky GC >= throughput of the non sticky collector, then
@@ -3721,7 +3703,7 @@ void Heap::ConcurrentGC(Thread* self, bool force_full) {
collector::GcType next_gc_type = next_gc_type_;
// If forcing full and next gc type is sticky, override with a non-sticky type.
if (force_full && next_gc_type == collector::kGcTypeSticky) {
- next_gc_type = HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
+ next_gc_type = NonStickyGcType();
}
if (CollectGarbageInternal(next_gc_type, kGcCauseBackground, false) ==
collector::kGcTypeNone) {
@@ -3878,70 +3860,79 @@ void Heap::RunFinalization(JNIEnv* env, uint64_t timeout) {
}
void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) {
- Thread* self = ThreadForEnv(env);
- {
- MutexLock mu(self, native_histogram_lock_);
- native_allocation_histogram_.AddValue(bytes);
- }
- if (native_need_to_run_finalization_) {
- RunFinalization(env, kNativeAllocationFinalizeTimeout);
- UpdateMaxNativeFootprint();
- native_need_to_run_finalization_ = false;
- }
- // Total number of native bytes allocated.
- size_t new_native_bytes_allocated = native_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes);
- new_native_bytes_allocated += bytes;
- if (new_native_bytes_allocated > native_footprint_gc_watermark_) {
- collector::GcType gc_type = HasZygoteSpace() ? collector::kGcTypePartial :
- collector::kGcTypeFull;
-
- // The second watermark is higher than the gc watermark. If you hit this it means you are
- // allocating native objects faster than the GC can keep up with.
- if (new_native_bytes_allocated > growth_limit_) {
- if (WaitForGcToComplete(kGcCauseForNativeAlloc, self) != collector::kGcTypeNone) {
- // Just finished a GC, attempt to run finalizers.
- RunFinalization(env, kNativeAllocationFinalizeTimeout);
- CHECK(!env->ExceptionCheck());
- // Native bytes allocated may be updated by finalization, refresh it.
- new_native_bytes_allocated = native_bytes_allocated_.LoadRelaxed();
- }
- // If we still are over the watermark, attempt a GC for alloc and run finalizers.
- if (new_native_bytes_allocated > growth_limit_) {
- CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
- RunFinalization(env, kNativeAllocationFinalizeTimeout);
- native_need_to_run_finalization_ = false;
- CHECK(!env->ExceptionCheck());
+ // See the REDESIGN section of go/understanding-register-native-allocation
+ // for an explanation of how RegisterNativeAllocation works.
+ size_t new_value = bytes + new_native_bytes_allocated_.FetchAndAddRelaxed(bytes);
+ if (new_value > NativeAllocationBlockingGcWatermark()) {
+ // Wait for a new GC to finish and finalizers to run, because the
+ // allocation rate is too high.
+ Thread* self = ThreadForEnv(env);
+
+ bool run_gc = false;
+ {
+ MutexLock mu(self, *native_blocking_gc_lock_);
+ uint32_t initial_gcs_finished = native_blocking_gcs_finished_;
+ if (native_blocking_gc_in_progress_) {
+ // A native blocking GC is in progress from the last time the native
+ // allocation blocking GC watermark was exceeded. Wait for that GC to
+ // finish before addressing the fact that we exceeded the blocking
+ // watermark again.
+ do {
+ native_blocking_gc_cond_->Wait(self);
+ } while (native_blocking_gcs_finished_ == initial_gcs_finished);
+ initial_gcs_finished++;
}
- // We have just run finalizers, update the native watermark since it is very likely that
- // finalizers released native managed allocations.
- UpdateMaxNativeFootprint();
- } else if (!IsGCRequestPending()) {
- if (IsGcConcurrent()) {
- RequestConcurrentGC(self, true); // Request non-sticky type.
+
+ // It's possible multiple threads have seen that we exceeded the
+ // blocking watermark. Ensure that only one of those threads runs the
+ // blocking GC. The rest of the threads should instead wait for the
+ // blocking GC to complete.
+ if (native_blocking_gc_in_progress_) {
+ do {
+ native_blocking_gc_cond_->Wait(self);
+ } while (native_blocking_gcs_finished_ == initial_gcs_finished);
} else {
- CollectGarbageInternal(gc_type, kGcCauseForNativeAlloc, false);
+ native_blocking_gc_in_progress_ = true;
+ run_gc = true;
}
}
+
+ if (run_gc) {
+ CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
+ RunFinalization(env, kNativeAllocationFinalizeTimeout);
+ CHECK(!env->ExceptionCheck());
+
+ MutexLock mu(self, *native_blocking_gc_lock_);
+ native_blocking_gc_in_progress_ = false;
+ native_blocking_gcs_finished_++;
+ native_blocking_gc_cond_->Broadcast(self);
+ }
+ } else if (new_value > NativeAllocationGcWatermark() && !IsGCRequestPending()) {
+ // Trigger another GC because there have been enough native bytes
+ // allocated since the last GC.
+ if (IsGcConcurrent()) {
+ RequestConcurrentGC(ThreadForEnv(env), /*force_full*/true);
+ } else {
+ CollectGarbageInternal(NonStickyGcType(), kGcCauseForNativeAlloc, false);
+ }
}
}
-void Heap::RegisterNativeFree(JNIEnv* env, size_t bytes) {
- size_t expected_size;
- {
- MutexLock mu(Thread::Current(), native_histogram_lock_);
- native_free_histogram_.AddValue(bytes);
- }
+void Heap::RegisterNativeFree(JNIEnv*, size_t bytes) {
+ // Take the bytes freed out of new_native_bytes_allocated_ first. If
+ // new_native_bytes_allocated_ reaches zero, take the remaining bytes freed
+ // out of old_native_bytes_allocated_ to ensure all freed bytes are
+ // accounted for.
+ size_t allocated;
+ size_t new_freed_bytes;
do {
- expected_size = native_bytes_allocated_.LoadRelaxed();
- if (UNLIKELY(bytes > expected_size)) {
- ScopedObjectAccess soa(env);
- env->ThrowNew(WellKnownClasses::java_lang_RuntimeException,
- StringPrintf("Attempted to free %zd native bytes with only %zd native bytes "
- "registered as allocated", bytes, expected_size).c_str());
- break;
- }
- } while (!native_bytes_allocated_.CompareExchangeWeakRelaxed(expected_size,
- expected_size - bytes));
+ allocated = new_native_bytes_allocated_.LoadRelaxed();
+ new_freed_bytes = std::min(allocated, bytes);
+ } while (!new_native_bytes_allocated_.CompareExchangeWeakRelaxed(allocated,
+ allocated - new_freed_bytes));
+ if (new_freed_bytes < bytes) {
+ old_native_bytes_allocated_.FetchAndSubRelaxed(bytes - new_freed_bytes);
+ }
}
size_t Heap::GetTotalMemory() const {
diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h
index 3a8e29b08a..a4d300b110 100644
--- a/runtime/gc/heap.h
+++ b/runtime/gc/heap.h
@@ -260,9 +260,8 @@ class Heap {
REQUIRES_SHARED(Locks::mutator_lock_);
void RegisterNativeAllocation(JNIEnv* env, size_t bytes)
- REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !native_histogram_lock_);
- void RegisterNativeFree(JNIEnv* env, size_t bytes)
- REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !native_histogram_lock_);
+ REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*native_blocking_gc_lock_);
+ void RegisterNativeFree(JNIEnv* env, size_t bytes);
// Change the allocator, updates entrypoints.
void ChangeAllocator(AllocatorType allocator)
@@ -562,7 +561,7 @@ class Heap {
space::Space* FindSpaceFromAddress(const void* ptr) const
REQUIRES_SHARED(Locks::mutator_lock_);
- void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_, !native_histogram_lock_);
+ void DumpForSigQuit(std::ostream& os) REQUIRES(!*gc_complete_lock_);
// Do a pending collector transition.
void DoPendingCollectorTransition() REQUIRES(!*gc_complete_lock_, !*pending_task_lock_);
@@ -679,7 +678,7 @@ class Heap {
// GC performance measuring
void DumpGcPerformanceInfo(std::ostream& os)
- REQUIRES(!*gc_complete_lock_, !native_histogram_lock_);
+ REQUIRES(!*gc_complete_lock_);
void ResetGcPerformanceInfo() REQUIRES(!*gc_complete_lock_);
// Thread pool.
@@ -979,10 +978,6 @@ class Heap {
void PostGcVerificationPaused(collector::GarbageCollector* gc)
REQUIRES(Locks::mutator_lock_, !*gc_complete_lock_);
- // Update the watermark for the native allocated bytes based on the current number of native
- // bytes allocated and the target utilization ratio.
- void UpdateMaxNativeFootprint();
-
// Find a collector based on GC type.
collector::GarbageCollector* FindCollectorByGcType(collector::GcType gc_type);
@@ -1066,6 +1061,31 @@ class Heap {
REQUIRES_SHARED(Locks::mutator_lock_)
REQUIRES(!*gc_complete_lock_, !*pending_task_lock_, !*backtrace_lock_);
+ collector::GcType NonStickyGcType() const {
+ return HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
+ }
+
+ // How large new_native_bytes_allocated_ can grow before we trigger a new
+ // GC.
+ ALWAYS_INLINE size_t NativeAllocationGcWatermark() const {
+ // Reuse max_free_ for the native allocation gc watermark, so that the
+ // native heap is treated in the same way as the Java heap in the case
+ // where the gc watermark update would exceed max_free_. Using max_free_
+ // instead of the target utilization means the watermark doesn't depend on
+ // the current number of registered native allocations.
+ return max_free_;
+ }
+
+ // How large new_native_bytes_allocated_ can grow while GC is in progress
+ // before we block the allocating thread to allow GC to catch up.
+ ALWAYS_INLINE size_t NativeAllocationBlockingGcWatermark() const {
+ // Historically the native allocations were bounded by growth_limit_. This
+ // uses that same value, dividing growth_limit_ by 2 to account for
+ // the fact that now the bound is relative to the number of retained
+ // registered native allocations rather than absolute.
+ return growth_limit_ / 2;
+ }
+
// All-known continuous spaces, where objects lie within fixed bounds.
std::vector<space::ContinuousSpace*> continuous_spaces_ GUARDED_BY(Locks::mutator_lock_);
@@ -1184,12 +1204,6 @@ class Heap {
// a GC should be triggered.
size_t max_allowed_footprint_;
- // The watermark at which a concurrent GC is requested by registerNativeAllocation.
- size_t native_footprint_gc_watermark_;
-
- // Whether or not we need to run finalizers in the next native allocation.
- bool native_need_to_run_finalization_;
-
// When num_bytes_allocated_ exceeds this amount then a concurrent GC should be requested so that
// it completes ahead of an allocation failing.
size_t concurrent_start_bytes_;
@@ -1203,13 +1217,25 @@ class Heap {
// Number of bytes allocated. Adjusted after each allocation and free.
Atomic<size_t> num_bytes_allocated_;
- // Bytes which are allocated and managed by native code but still need to be accounted for.
- Atomic<size_t> native_bytes_allocated_;
-
- // Native allocation stats.
- Mutex native_histogram_lock_;
- Histogram<uint64_t> native_allocation_histogram_;
- Histogram<uint64_t> native_free_histogram_;
+ // Number of registered native bytes allocated since the last time GC was
+ // triggered. Adjusted after each RegisterNativeAllocation and
+ // RegisterNativeFree. Used to determine when to trigger GC for native
+ // allocations.
+ // See the REDESIGN section of go/understanding-register-native-allocation.
+ Atomic<size_t> new_native_bytes_allocated_;
+
+ // Number of registered native bytes allocated prior to the last time GC was
+ // triggered, for debugging purposes. The current number of registered
+ // native bytes is determined by taking the sum of
+ // old_native_bytes_allocated_ and new_native_bytes_allocated_.
+ Atomic<size_t> old_native_bytes_allocated_;
+
+ // Used for synchronization of blocking GCs triggered by
+ // RegisterNativeAllocation.
+ Mutex* native_blocking_gc_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+ std::unique_ptr<ConditionVariable> native_blocking_gc_cond_ GUARDED_BY(native_blocking_gc_lock_);
+ bool native_blocking_gc_in_progress_ GUARDED_BY(native_blocking_gc_lock_);
+ uint32_t native_blocking_gcs_finished_ GUARDED_BY(native_blocking_gc_lock_);
// Number of bytes freed by thread local buffer revokes. This will
// cancel out the ahead-of-time bulk counting of bytes allocated in
diff --git a/runtime/monitor.cc b/runtime/monitor.cc
index 0ceb23a7a2..a32003e81b 100644
--- a/runtime/monitor.cc
+++ b/runtime/monitor.cc
@@ -356,40 +356,44 @@ void Monitor::Lock(Thread* self) {
// Do this before releasing the lock so that we don't get deflated.
size_t num_waiters = num_waiters_;
++num_waiters_;
+
+ // If systrace logging is enabled, first look at the lock owner. Acquiring the monitor's
+ // lock and then re-acquiring the mutator lock can deadlock.
+ bool started_trace = false;
+ if (ATRACE_ENABLED()) {
+ if (owner_ != nullptr) { // Did the owner_ give the lock up?
+ std::ostringstream oss;
+ std::string name;
+ owner_->GetThreadName(name);
+ oss << PrettyContentionInfo(name,
+ owner_->GetTid(),
+ owners_method,
+ owners_dex_pc,
+ num_waiters);
+ // Add info for contending thread.
+ uint32_t pc;
+ ArtMethod* m = self->GetCurrentMethod(&pc);
+ const char* filename;
+ int32_t line_number;
+ TranslateLocation(m, pc, &filename, &line_number);
+ oss << " blocking from "
+ << ArtMethod::PrettyMethod(m) << "(" << (filename != nullptr ? filename : "null")
+ << ":" << line_number << ")";
+ ATRACE_BEGIN(oss.str().c_str());
+ started_trace = true;
+ }
+ }
+
monitor_lock_.Unlock(self); // Let go of locks in order.
self->SetMonitorEnterObject(GetObject());
{
- uint32_t original_owner_thread_id = 0u;
ScopedThreadSuspension tsc(self, kBlocked); // Change to blocked and give up mutator_lock_.
+ uint32_t original_owner_thread_id = 0u;
{
// Reacquire monitor_lock_ without mutator_lock_ for Wait.
MutexLock mu2(self, monitor_lock_);
if (owner_ != nullptr) { // Did the owner_ give the lock up?
original_owner_thread_id = owner_->GetThreadId();
- if (ATRACE_ENABLED()) {
- std::ostringstream oss;
- {
- // Reacquire mutator_lock_ for getting the location info.
- ScopedObjectAccess soa(self);
- std::string name;
- owner_->GetThreadName(name);
- oss << PrettyContentionInfo(name,
- owner_->GetTid(),
- owners_method,
- owners_dex_pc,
- num_waiters);
- // Add info for contending thread.
- uint32_t pc;
- ArtMethod* m = self->GetCurrentMethod(&pc);
- const char* filename;
- int32_t line_number;
- TranslateLocation(m, pc, &filename, &line_number);
- oss << " blocking from "
- << ArtMethod::PrettyMethod(m) << "(" << (filename != nullptr ? filename : "null")
- << ":" << line_number << ")";
- }
- ATRACE_BEGIN(oss.str().c_str());
- }
monitor_contenders_.Wait(self); // Still contended so wait.
}
}
@@ -448,9 +452,11 @@ void Monitor::Lock(Thread* self) {
}
}
}
- ATRACE_END();
}
}
+ if (started_trace) {
+ ATRACE_END();
+ }
self->SetMonitorEnterObject(nullptr);
monitor_lock_.Lock(self); // Reacquire locks in order.
--num_waiters_;
diff --git a/runtime/openjdkjvmti/ti_heap.cc b/runtime/openjdkjvmti/ti_heap.cc
index 7b2521d63a..fe3e52b0c1 100644
--- a/runtime/openjdkjvmti/ti_heap.cc
+++ b/runtime/openjdkjvmti/ti_heap.cc
@@ -303,11 +303,11 @@ class FollowReferencesHelper FINAL {
art::Thread* thread = FindThread(info);
if (thread != nullptr) {
- art::mirror::Object* thread_obj = thread->GetPeer();
+ art::mirror::Object* thread_obj;
if (thread->IsStillStarting()) {
thread_obj = nullptr;
} else {
- thread_obj = thread->GetPeer();
+ thread_obj = thread->GetPeerFromOtherThread();
}
if (thread_obj != nullptr) {
ref_info->jni_local.thread_tag = tag_table_->GetTagOrZero(thread_obj);
@@ -333,11 +333,11 @@ class FollowReferencesHelper FINAL {
art::Thread* thread = FindThread(info);
if (thread != nullptr) {
- art::mirror::Object* thread_obj = thread->GetPeer();
+ art::mirror::Object* thread_obj;
if (thread->IsStillStarting()) {
thread_obj = nullptr;
} else {
- thread_obj = thread->GetPeer();
+ thread_obj = thread->GetPeerFromOtherThread();
}
if (thread_obj != nullptr) {
ref_info->stack_local.thread_tag = tag_table_->GetTagOrZero(thread_obj);
diff --git a/runtime/openjdkjvmti/ti_redefine.cc b/runtime/openjdkjvmti/ti_redefine.cc
index b7257f8994..058b93a042 100644
--- a/runtime/openjdkjvmti/ti_redefine.cc
+++ b/runtime/openjdkjvmti/ti_redefine.cc
@@ -48,7 +48,7 @@
#include "jit/jit_code_cache.h"
#include "jni_env_ext-inl.h"
#include "jvmti_allocator.h"
-#include "mirror/class.h"
+#include "mirror/class-inl.h"
#include "mirror/class_ext.h"
#include "mirror/object.h"
#include "object_lock.h"
@@ -491,6 +491,143 @@ void Redefiner::ClassRedefinition::FillObsoleteMethodMap(
}
}
+// Try and get the declared method. First try to get a virtual method then a direct method if that's
+// not found.
+static art::ArtMethod* FindMethod(art::Handle<art::mirror::Class> klass,
+ const char* name,
+ art::Signature sig) REQUIRES_SHARED(art::Locks::mutator_lock_) {
+ art::ArtMethod* m = klass->FindDeclaredVirtualMethod(name, sig, art::kRuntimePointerSize);
+ if (m == nullptr) {
+ m = klass->FindDeclaredDirectMethod(name, sig, art::kRuntimePointerSize);
+ }
+ return m;
+}
+
+bool Redefiner::ClassRedefinition::CheckSameMethods() {
+ art::StackHandleScope<1> hs(driver_->self_);
+ art::Handle<art::mirror::Class> h_klass(hs.NewHandle(GetMirrorClass()));
+ DCHECK_EQ(dex_file_->NumClassDefs(), 1u);
+
+ art::ClassDataItemIterator new_iter(*dex_file_,
+ dex_file_->GetClassData(dex_file_->GetClassDef(0)));
+
+ // Make sure we have the same number of methods.
+ uint32_t num_new_method = new_iter.NumVirtualMethods() + new_iter.NumDirectMethods();
+ uint32_t num_old_method = h_klass->GetDeclaredMethodsSlice(art::kRuntimePointerSize).size();
+ if (num_new_method != num_old_method) {
+ bool bigger = num_new_method > num_old_method;
+ RecordFailure(bigger ? ERR(UNSUPPORTED_REDEFINITION_METHOD_ADDED)
+ : ERR(UNSUPPORTED_REDEFINITION_METHOD_DELETED),
+ StringPrintf("Total number of declared methods changed from %d to %d",
+ num_old_method, num_new_method));
+ return false;
+ }
+
+ // Skip all of the fields. We should have already checked this.
+ while (new_iter.HasNextStaticField() || new_iter.HasNextInstanceField()) {
+ new_iter.Next();
+ }
+ // Check each of the methods. NB we don't need to specifically check for removals since the 2 dex
+ // files have the same number of methods, which means there must be an equal amount of additions
+ // and removals.
+ for (; new_iter.HasNextVirtualMethod() || new_iter.HasNextDirectMethod(); new_iter.Next()) {
+ // Get the data on the method we are searching for
+ const art::DexFile::MethodId& new_method_id = dex_file_->GetMethodId(new_iter.GetMemberIndex());
+ const char* new_method_name = dex_file_->GetMethodName(new_method_id);
+ art::Signature new_method_signature = dex_file_->GetMethodSignature(new_method_id);
+ art::ArtMethod* old_method = FindMethod(h_klass, new_method_name, new_method_signature);
+ // If we got past the check for the same number of methods above that means there must be at
+ // least one added and one removed method. We will return the ADDED failure message since it is
+ // easier to get a useful error report for it.
+ if (old_method == nullptr) {
+ RecordFailure(ERR(UNSUPPORTED_REDEFINITION_METHOD_ADDED),
+ StringPrintf("Unknown method '%s' (sig: %s) was added!",
+ new_method_name,
+ new_method_signature.ToString().c_str()));
+ return false;
+ }
+ // Since direct methods have different flags than virtual ones (specifically direct methods must
+ // have kAccPrivate or kAccStatic or kAccConstructor flags) we can tell if a method changes from
+ // virtual to direct.
+ uint32_t new_flags = new_iter.GetMethodAccessFlags();
+ if (new_flags != (old_method->GetAccessFlags() & art::kAccValidMethodFlags)) {
+ RecordFailure(ERR(UNSUPPORTED_REDEFINITION_METHOD_MODIFIERS_CHANGED),
+ StringPrintf("method '%s' (sig: %s) had different access flags",
+ new_method_name,
+ new_method_signature.ToString().c_str()));
+ return false;
+ }
+ }
+ return true;
+}
+
+bool Redefiner::ClassRedefinition::CheckSameFields() {
+ art::StackHandleScope<1> hs(driver_->self_);
+ art::Handle<art::mirror::Class> h_klass(hs.NewHandle(GetMirrorClass()));
+ DCHECK_EQ(dex_file_->NumClassDefs(), 1u);
+ art::ClassDataItemIterator new_iter(*dex_file_,
+ dex_file_->GetClassData(dex_file_->GetClassDef(0)));
+ const art::DexFile& old_dex_file = h_klass->GetDexFile();
+ art::ClassDataItemIterator old_iter(old_dex_file,
+ old_dex_file.GetClassData(*h_klass->GetClassDef()));
+ // Instance and static fields can be differentiated by their flags so no need to check them
+ // separately.
+ while (new_iter.HasNextInstanceField() || new_iter.HasNextStaticField()) {
+ // Get the data on the method we are searching for
+ const art::DexFile::FieldId& new_field_id = dex_file_->GetFieldId(new_iter.GetMemberIndex());
+ const char* new_field_name = dex_file_->GetFieldName(new_field_id);
+ const char* new_field_type = dex_file_->GetFieldTypeDescriptor(new_field_id);
+
+ if (!(old_iter.HasNextInstanceField() || old_iter.HasNextStaticField())) {
+ // We are missing the old version of this method!
+ RecordFailure(ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
+ StringPrintf("Unknown field '%s' (type: %s) added!",
+ new_field_name,
+ new_field_type));
+ return false;
+ }
+
+ const art::DexFile::FieldId& old_field_id = old_dex_file.GetFieldId(old_iter.GetMemberIndex());
+ const char* old_field_name = old_dex_file.GetFieldName(old_field_id);
+ const char* old_field_type = old_dex_file.GetFieldTypeDescriptor(old_field_id);
+
+ // Check name and type.
+ if (strcmp(old_field_name, new_field_name) != 0 ||
+ strcmp(old_field_type, new_field_type) != 0) {
+ RecordFailure(ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
+ StringPrintf("Field changed from '%s' (sig: %s) to '%s' (sig: %s)!",
+ old_field_name,
+ old_field_type,
+ new_field_name,
+ new_field_type));
+ return false;
+ }
+
+ // Since static fields have different flags than instance ones (specifically static fields must
+ // have the kAccStatic flag) we can tell if a field changes from static to instance.
+ if (new_iter.GetFieldAccessFlags() != old_iter.GetFieldAccessFlags()) {
+ RecordFailure(ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
+ StringPrintf("Field '%s' (sig: %s) had different access flags",
+ new_field_name,
+ new_field_type));
+ return false;
+ }
+
+ new_iter.Next();
+ old_iter.Next();
+ }
+ if (old_iter.HasNextInstanceField() || old_iter.HasNextStaticField()) {
+ RecordFailure(ERR(UNSUPPORTED_REDEFINITION_SCHEMA_CHANGED),
+ StringPrintf("field '%s' (sig: %s) is missing!",
+ old_dex_file.GetFieldName(old_dex_file.GetFieldId(
+ old_iter.GetMemberIndex())),
+ old_dex_file.GetFieldTypeDescriptor(old_dex_file.GetFieldId(
+ old_iter.GetMemberIndex()))));
+ return false;
+ }
+ return true;
+}
+
bool Redefiner::ClassRedefinition::CheckClass() {
// TODO Might just want to put it in a ObjPtr and NoSuspend assert.
art::StackHandleScope<1> hs(driver_->self_);
diff --git a/runtime/openjdkjvmti/ti_redefine.h b/runtime/openjdkjvmti/ti_redefine.h
index 5aa7dde55c..3209abbe64 100644
--- a/runtime/openjdkjvmti/ti_redefine.h
+++ b/runtime/openjdkjvmti/ti_redefine.h
@@ -177,17 +177,11 @@ class Redefiner {
// Checks that the class can even be redefined.
bool CheckRedefinable() REQUIRES_SHARED(art::Locks::mutator_lock_);
- // Checks that the dex file does not add/remove methods.
- bool CheckSameMethods() REQUIRES_SHARED(art::Locks::mutator_lock_) {
- LOG(WARNING) << "methods are not checked for modification currently";
- return true;
- }
+ // Checks that the dex file does not add/remove methods, or change their modifiers or types.
+ bool CheckSameMethods() REQUIRES_SHARED(art::Locks::mutator_lock_);
- // Checks that the dex file does not modify fields
- bool CheckSameFields() REQUIRES_SHARED(art::Locks::mutator_lock_) {
- LOG(WARNING) << "Fields are not checked for modification currently";
- return true;
- }
+ // Checks that the dex file does not modify fields types or modifiers.
+ bool CheckSameFields() REQUIRES_SHARED(art::Locks::mutator_lock_);
void UpdateJavaDexFile(art::ObjPtr<art::mirror::Object> java_dex_file,
art::ObjPtr<art::mirror::LongArray> new_cookie)
diff --git a/runtime/openjdkjvmti/ti_stack.cc b/runtime/openjdkjvmti/ti_stack.cc
index 4cf55a6a98..b5a6c6e1ee 100644
--- a/runtime/openjdkjvmti/ti_stack.cc
+++ b/runtime/openjdkjvmti/ti_stack.cc
@@ -377,7 +377,8 @@ jvmtiError StackUtil::GetAllStackTraces(jvmtiEnv* env,
jvmtiStackInfo& old_stack_info = stack_info_array.get()[i];
jvmtiStackInfo& new_stack_info = stack_info[i];
- jthread thread_peer = current->GetJniEnv()->AddLocalReference<jthread>(threads[i]->GetPeer());
+ jthread thread_peer = current->GetJniEnv()->AddLocalReference<jthread>(
+ threads[i]->GetPeerFromOtherThread());
new_stack_info.thread = thread_peer;
if (old_stack_info.frame_count > 0) {
@@ -453,7 +454,7 @@ jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env,
}
// Get the peer, and check whether we know it.
- art::ObjPtr<art::mirror::Object> peer = thread->GetPeer();
+ art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
for (size_t index = 0; index != handles.size(); ++index) {
if (peer == handles[index].Get()) {
// Found the thread.
diff --git a/runtime/openjdkjvmti/ti_thread.cc b/runtime/openjdkjvmti/ti_thread.cc
index b18a5cd746..00d4144415 100644
--- a/runtime/openjdkjvmti/ti_thread.cc
+++ b/runtime/openjdkjvmti/ti_thread.cc
@@ -200,7 +200,7 @@ jvmtiError ThreadUtil::GetThreadInfo(jvmtiEnv* env, jthread thread, jvmtiThreadI
info_ptr->is_daemon = self->IsDaemon();
- art::ObjPtr<art::mirror::Object> peer = self->GetPeer();
+ art::ObjPtr<art::mirror::Object> peer = self->GetPeerFromOtherThread();
// ThreadGroup.
if (peer != nullptr) {
@@ -458,7 +458,7 @@ jvmtiError ThreadUtil::GetAllThreads(jvmtiEnv* env,
continue;
}
- art::ObjPtr<art::mirror::Object> peer = thread->GetPeer();
+ art::ObjPtr<art::mirror::Object> peer = thread->GetPeerFromOtherThread();
if (peer != nullptr) {
peers.push_back(peer);
}
diff --git a/runtime/openjdkjvmti/ti_threadgroup.cc b/runtime/openjdkjvmti/ti_threadgroup.cc
index 35b1bfd920..e63ce6576a 100644
--- a/runtime/openjdkjvmti/ti_threadgroup.cc
+++ b/runtime/openjdkjvmti/ti_threadgroup.cc
@@ -174,7 +174,7 @@ static void GetThreads(art::Handle<art::mirror::Object> thread_group,
if (t->IsStillStarting()) {
continue;
}
- art::ObjPtr<art::mirror::Object> peer = t->GetPeer();
+ art::ObjPtr<art::mirror::Object> peer = t->GetPeerFromOtherThread();
if (peer == nullptr) {
continue;
}
diff --git a/runtime/stack.cc b/runtime/stack.cc
index 6e0569bb5d..c737fe49ea 100644
--- a/runtime/stack.cc
+++ b/runtime/stack.cc
@@ -96,13 +96,17 @@ bool ManagedStack::ShadowFramesContain(StackReference<mirror::Object>* shadow_fr
return false;
}
-StackVisitor::StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind)
- : StackVisitor(thread, context, walk_kind, 0) {}
+StackVisitor::StackVisitor(Thread* thread,
+ Context* context,
+ StackWalkKind walk_kind,
+ bool check_suspended)
+ : StackVisitor(thread, context, walk_kind, 0, check_suspended) {}
StackVisitor::StackVisitor(Thread* thread,
Context* context,
StackWalkKind walk_kind,
- size_t num_frames)
+ size_t num_frames,
+ bool check_suspended)
: thread_(thread),
walk_kind_(walk_kind),
cur_shadow_frame_(nullptr),
@@ -112,8 +116,11 @@ StackVisitor::StackVisitor(Thread* thread,
num_frames_(num_frames),
cur_depth_(0),
current_inlining_depth_(0),
- context_(context) {
- DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
+ context_(context),
+ check_suspended_(check_suspended) {
+ if (check_suspended_) {
+ DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
+ }
}
InlineInfo StackVisitor::GetCurrentInlineInfo() const {
@@ -788,7 +795,9 @@ QuickMethodFrameInfo StackVisitor::GetCurrentQuickFrameInfo() const {
template <StackVisitor::CountTransitions kCount>
void StackVisitor::WalkStack(bool include_transitions) {
- DCHECK(thread_ == Thread::Current() || thread_->IsSuspended());
+ if (check_suspended_) {
+ DCHECK(thread_ == Thread::Current() || thread_->IsSuspended());
+ }
CHECK_EQ(cur_depth_, 0U);
bool exit_stubs_installed = Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled();
uint32_t instrumentation_stack_depth = 0;
diff --git a/runtime/stack.h b/runtime/stack.h
index 9dceb2931d..90a0aee353 100644
--- a/runtime/stack.h
+++ b/runtime/stack.h
@@ -590,7 +590,10 @@ class StackVisitor {
};
protected:
- StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind);
+ StackVisitor(Thread* thread,
+ Context* context,
+ StackWalkKind walk_kind,
+ bool check_suspended = true);
bool GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -797,7 +800,11 @@ class StackVisitor {
private:
// Private constructor known in the case that num_frames_ has already been computed.
- StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind, size_t num_frames)
+ StackVisitor(Thread* thread,
+ Context* context,
+ StackWalkKind walk_kind,
+ size_t num_frames,
+ bool check_suspended = true)
REQUIRES_SHARED(Locks::mutator_lock_);
bool IsAccessibleRegister(uint32_t reg, bool is_float) const {
@@ -851,6 +858,7 @@ class StackVisitor {
protected:
Context* const context_;
+ const bool check_suspended_;
};
} // namespace art
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 632a380bf0..7b6540436a 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -65,6 +65,7 @@
#include "object_lock.h"
#include "quick_exception_handler.h"
#include "quick/quick_method_frame_info.h"
+#include "read_barrier-inl.h"
#include "reflection.h"
#include "runtime.h"
#include "runtime_callbacks.h"
@@ -1583,15 +1584,24 @@ void Thread::DumpState(std::ostream& os) const {
}
struct StackDumpVisitor : public StackVisitor {
- StackDumpVisitor(std::ostream& os_in, Thread* thread_in, Context* context, bool can_allocate_in)
+ StackDumpVisitor(std::ostream& os_in,
+ Thread* thread_in,
+ Context* context,
+ bool can_allocate_in,
+ bool check_suspended = true,
+ bool dump_locks_in = true)
REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread_in, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ : StackVisitor(thread_in,
+ context,
+ StackVisitor::StackWalkKind::kIncludeInlinedFrames,
+ check_suspended),
os(os_in),
can_allocate(can_allocate_in),
last_method(nullptr),
last_line_number(0),
repetition_count(0),
- frame_count(0) {}
+ frame_count(0),
+ dump_locks(dump_locks_in) {}
virtual ~StackDumpVisitor() {
if (frame_count == 0) {
@@ -1636,8 +1646,10 @@ struct StackDumpVisitor : public StackVisitor {
if (frame_count == 0) {
Monitor::DescribeWait(os, GetThread());
}
- if (can_allocate) {
+ if (can_allocate && dump_locks) {
// Visit locks, but do not abort on errors. This would trigger a nested abort.
+ // Skip visiting locks if dump_locks is false as it would cause a bad_mutexes_held in
+ // RegTypeCache::RegTypeCache due to thread_list_lock.
Monitor::VisitLocks(this, DumpLockedObject, &os, false);
}
}
@@ -1681,6 +1693,7 @@ struct StackDumpVisitor : public StackVisitor {
int last_line_number;
int repetition_count;
int frame_count;
+ const bool dump_locks;
};
static bool ShouldShowNativeStack(const Thread* thread)
@@ -1712,7 +1725,7 @@ static bool ShouldShowNativeStack(const Thread* thread)
return current_method != nullptr && current_method->IsNative();
}
-void Thread::DumpJavaStack(std::ostream& os) const {
+void Thread::DumpJavaStack(std::ostream& os, bool check_suspended, bool dump_locks) const {
// If flip_function is not null, it means we have run a checkpoint
// before the thread wakes up to execute the flip function and the
// thread roots haven't been forwarded. So the following access to
@@ -1741,7 +1754,7 @@ void Thread::DumpJavaStack(std::ostream& os) const {
std::unique_ptr<Context> context(Context::Create());
StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(),
- !tls32_.throwing_OutOfMemoryError);
+ !tls32_.throwing_OutOfMemoryError, check_suspended, dump_locks);
dumper.WalkStack();
if (have_exception) {
@@ -1767,10 +1780,15 @@ void Thread::DumpStack(std::ostream& os,
// If we're currently in native code, dump that stack before dumping the managed stack.
if (dump_native_stack && (dump_for_abort || force_dump_stack || ShouldShowNativeStack(this))) {
DumpKernelStack(os, GetTid(), " kernel: ", false);
- ArtMethod* method = GetCurrentMethod(nullptr, !(dump_for_abort || force_dump_stack));
+ ArtMethod* method =
+ GetCurrentMethod(nullptr,
+ /*check_suspended*/ !force_dump_stack,
+ /*abort_on_error*/ !(dump_for_abort || force_dump_stack));
DumpNativeStack(os, GetTid(), backtrace_map, " native: ", method);
}
- DumpJavaStack(os);
+ DumpJavaStack(os,
+ /*check_suspended*/ !force_dump_stack,
+ /*dump_locks*/ !force_dump_stack);
} else {
os << "Not able to dump stack of thread that isn't suspended";
}
@@ -1845,6 +1863,7 @@ Thread::Thread(bool daemon)
: tls32_(daemon),
wait_monitor_(nullptr),
interrupted_(false),
+ custom_tls_(nullptr),
can_call_into_java_(true) {
wait_mutex_ = new Mutex("a thread wait mutex");
wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_);
@@ -2918,9 +2937,12 @@ Context* Thread::GetLongJumpContext() {
// Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
// so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack.
struct CurrentMethodVisitor FINAL : public StackVisitor {
- CurrentMethodVisitor(Thread* thread, Context* context, bool abort_on_error)
+ CurrentMethodVisitor(Thread* thread, Context* context, bool check_suspended, bool abort_on_error)
REQUIRES_SHARED(Locks::mutator_lock_)
- : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
+ : StackVisitor(thread,
+ context,
+ StackVisitor::StackWalkKind::kIncludeInlinedFrames,
+ check_suspended),
this_object_(nullptr),
method_(nullptr),
dex_pc_(0),
@@ -2944,8 +2966,13 @@ struct CurrentMethodVisitor FINAL : public StackVisitor {
const bool abort_on_error_;
};
-ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error) const {
- CurrentMethodVisitor visitor(const_cast<Thread*>(this), nullptr, abort_on_error);
+ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc,
+ bool check_suspended,
+ bool abort_on_error) const {
+ CurrentMethodVisitor visitor(const_cast<Thread*>(this),
+ nullptr,
+ check_suspended,
+ abort_on_error);
visitor.WalkStack(false);
if (dex_pc != nullptr) {
*dex_pc = visitor.dex_pc_;
@@ -3457,4 +3484,15 @@ bool Thread::IsAotCompiler() {
return Runtime::Current()->IsAotCompiler();
}
+mirror::Object* Thread::GetPeerFromOtherThread() const {
+ mirror::Object* peer = GetPeer();
+ if (kUseReadBarrier && Current()->GetIsGcMarking()) {
+ // We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack
+ // may have not been flipped yet and peer may be a from-space (stale) ref. So explicitly
+ // mark/forward it here.
+ peer = art::ReadBarrier::Mark(peer);
+ }
+ return peer;
+}
+
} // namespace art
diff --git a/runtime/thread.h b/runtime/thread.h
index b59eac68e9..3a1b7da406 100644
--- a/runtime/thread.h
+++ b/runtime/thread.h
@@ -201,7 +201,9 @@ class Thread {
REQUIRES(!Locks::thread_suspend_count_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
- void DumpJavaStack(std::ostream& os) const
+ void DumpJavaStack(std::ostream& os,
+ bool check_suspended = true,
+ bool dump_locks = true) const
REQUIRES(!Locks::thread_suspend_count_lock_)
REQUIRES_SHARED(Locks::mutator_lock_);
@@ -359,6 +361,10 @@ class Thread {
CHECK(tlsPtr_.jpeer == nullptr);
return tlsPtr_.opeer;
}
+ // GetPeer is not safe if called on another thread in the middle of the CC thread flip and
+ // the thread's stack may have not been flipped yet and peer may be a from-space (stale) ref.
+ // This function will explicitly mark/forward it.
+ mirror::Object* GetPeerFromOtherThread() const REQUIRES_SHARED(Locks::mutator_lock_);
bool HasPeer() const {
return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
@@ -411,7 +417,9 @@ class Thread {
// Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
// abort the runtime iff abort_on_error is true.
- ArtMethod* GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error = true) const
+ ArtMethod* GetCurrentMethod(uint32_t* dex_pc,
+ bool check_suspended = true,
+ bool abort_on_error = true) const
REQUIRES_SHARED(Locks::mutator_lock_);
// Returns whether the given exception was thrown by the current Java method being executed