summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build/Android.common_test.mk2
-rw-r--r--cmdline/cmdline_parser.h4
-rw-r--r--runtime/gc/collector/concurrent_copying.cc88
-rw-r--r--runtime/gc/collector/concurrent_copying.h6
-rw-r--r--runtime/gc/heap.cc5
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc5
-rw-r--r--runtime/thread.cc12
-rw-r--r--runtime/trace.cc100
-rw-r--r--runtime/trace.h3
-rw-r--r--tools/libcore_failures.txt24
-rw-r--r--tools/timeout_dumper/timeout_dumper.cc128
11 files changed, 219 insertions, 158 deletions
diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk
index d8014bd55f..be1791b372 100644
--- a/build/Android.common_test.mk
+++ b/build/Android.common_test.mk
@@ -133,6 +133,7 @@ define build-art-test-dex
LOCAL_MODULE_PATH := $(3)
LOCAL_DEX_PREOPT_IMAGE_LOCATION := $(TARGET_CORE_IMG_OUT)
ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),)
+ LOCAL_MIN_SDK_VERSION := 19
LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex
endif
include $(BUILD_JAVA_LIBRARY)
@@ -148,6 +149,7 @@ define build-art-test-dex
LOCAL_JAVA_LIBRARIES := $(HOST_TEST_CORE_JARS)
LOCAL_DEX_PREOPT_IMAGE := $(HOST_CORE_IMG_LOCATION)
ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),)
+ LOCAL_MIN_SDK_VERSION := 19
LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex
endif
include $(BUILD_HOST_DALVIK_JAVA_LIBRARY)
diff --git a/cmdline/cmdline_parser.h b/cmdline/cmdline_parser.h
index 82c04e70f5..952be44b0e 100644
--- a/cmdline/cmdline_parser.h
+++ b/cmdline/cmdline_parser.h
@@ -206,7 +206,7 @@ struct CmdlineParser {
};
load_value_ = []() -> TArg& {
assert(false && "Should not be appending values to ignored arguments");
- return *reinterpret_cast<TArg*>(0); // Blow up.
+ __builtin_trap(); // Blow up.
};
save_value_specified_ = true;
@@ -270,7 +270,7 @@ struct CmdlineParser {
load_value_ = []() -> TArg& {
assert(false && "No load value function defined");
- return *reinterpret_cast<TArg*>(0); // Blow up.
+ __builtin_trap(); // Blow up.
};
}
diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc
index f7f3a8d628..3b57b0741d 100644
--- a/runtime/gc/collector/concurrent_copying.cc
+++ b/runtime/gc/collector/concurrent_copying.cc
@@ -95,7 +95,8 @@ ConcurrentCopying::ConcurrentCopying(Heap* heap,
weak_ref_access_enabled_(true),
copied_live_bytes_ratio_sum_(0.f),
gc_count_(0),
- inter_region_bitmap_(nullptr),
+ region_space_inter_region_bitmap_(nullptr),
+ non_moving_space_inter_region_bitmap_(nullptr),
reclaimed_bytes_ratio_sum_(0.f),
young_gen_(young_gen),
skipped_blocks_lock_("concurrent copying bytes blocks lock", kMarkSweepMarkStackLock),
@@ -286,12 +287,32 @@ void ConcurrentCopying::ActivateReadBarrierEntrypoints() {
gc_barrier_->Increment(self, barrier_count);
}
+void ConcurrentCopying::CreateInterRegionRefBitmaps() {
+ DCHECK(kEnableGenerationalConcurrentCopyingCollection);
+ DCHECK(region_space_inter_region_bitmap_ == nullptr);
+ DCHECK(non_moving_space_inter_region_bitmap_ == nullptr);
+ DCHECK(region_space_ != nullptr);
+ DCHECK(heap_->non_moving_space_ != nullptr);
+ // Region-space
+ region_space_inter_region_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
+ "region-space inter region ref bitmap",
+ reinterpret_cast<uint8_t*>(region_space_->Begin()),
+ region_space_->Limit() - region_space_->Begin()));
+ CHECK(region_space_inter_region_bitmap_ != nullptr)
+ << "Couldn't allocate region-space inter region ref bitmap";
+
+ // non-moving-space
+ non_moving_space_inter_region_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
+ "non-moving-space inter region ref bitmap",
+ reinterpret_cast<uint8_t*>(heap_->non_moving_space_->Begin()),
+ heap_->non_moving_space_->Limit() - heap_->non_moving_space_->Begin()));
+ CHECK(non_moving_space_inter_region_bitmap_ != nullptr)
+ << "Couldn't allocate non-moving-space inter region ref bitmap";
+}
+
void ConcurrentCopying::BindBitmaps() {
Thread* self = Thread::Current();
WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
- uintptr_t continuous_spaces_begin = UINTPTR_MAX;
- uintptr_t continuous_spaces_limit = 0;
- DCHECK(inter_region_bitmap_ == nullptr);
// Mark all of the spaces we never collect as immune.
for (const auto& space : heap_->GetContinuousSpaces()) {
if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect ||
@@ -301,6 +322,7 @@ void ConcurrentCopying::BindBitmaps() {
} else {
CHECK(!space->IsZygoteSpace());
CHECK(!space->IsImageSpace());
+ CHECK(space == region_space_ || space == heap_->non_moving_space_);
if (kEnableGenerationalConcurrentCopyingCollection) {
if (space == region_space_) {
region_space_bitmap_ = region_space_->GetMarkBitmap();
@@ -323,11 +345,6 @@ void ConcurrentCopying::BindBitmaps() {
// be captured after the thread-flip of this GC cycle, as that is when
// the young-gen for the next GC cycle starts getting populated.
heap_->GetCardTable()->ClearCardRange(space->Begin(), space->Limit());
-
- continuous_spaces_begin =
- std::min(continuous_spaces_begin, reinterpret_cast<uintptr_t>(space->Begin()));
- continuous_spaces_limit =
- std::max(continuous_spaces_limit, reinterpret_cast<uintptr_t>(space->Limit()));
}
} else {
if (space == region_space_) {
@@ -339,18 +356,10 @@ void ConcurrentCopying::BindBitmaps() {
}
}
}
- if (kEnableGenerationalConcurrentCopyingCollection) {
- if (young_gen_) {
- for (const auto& space : GetHeap()->GetDiscontinuousSpaces()) {
- CHECK(space->IsLargeObjectSpace());
- space->AsLargeObjectSpace()->CopyLiveToMarked();
- }
- } else {
- inter_region_bitmap_.reset(accounting::ContinuousSpaceBitmap::Create(
- "inter region ref bitmap",
- reinterpret_cast<uint8_t*>(continuous_spaces_begin),
- continuous_spaces_limit - continuous_spaces_begin));
- CHECK(inter_region_bitmap_ != nullptr) << "Couldn't allocate inter region ref bitmap";
+ if (kEnableGenerationalConcurrentCopyingCollection && young_gen_) {
+ for (const auto& space : GetHeap()->GetDiscontinuousSpaces()) {
+ CHECK(space->IsLargeObjectSpace());
+ space->AsLargeObjectSpace()->CopyLiveToMarked();
}
}
}
@@ -1112,12 +1121,22 @@ void ConcurrentCopying::AddLiveBytesAndScanRef(mirror::Object* ref) {
}
ComputeLiveBytesAndMarkRefFieldsVisitor</*kHandleInterRegionRefs*/ true>
visitor(this, obj_region_idx);
- ref->VisitReferences</*kVisitNativeRoots=*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+ ref->VisitReferences</*kVisitNativeRoots=*/ true, kDefaultVerifyFlags, kWithoutReadBarrier>(
visitor, visitor);
// Mark the corresponding card dirty if the object contains any
// inter-region reference.
if (visitor.ContainsInterRegionRefs()) {
- inter_region_bitmap_->Set(ref);
+ if (obj_region_idx == static_cast<size_t>(-1)) {
+ // If an inter-region ref has been found in a non-region-space, then it
+ // must be non-moving-space. This is because this function cannot be
+ // called on a immune-space object, and a large-object-space object has
+ // only class object reference, which is either in some immune-space, or
+ // in non-moving-space.
+ DCHECK(heap_->non_moving_space_->HasAddress(ref));
+ non_moving_space_inter_region_bitmap_->Set(ref);
+ } else {
+ region_space_inter_region_bitmap_->Set(ref);
+ }
}
}
@@ -1427,11 +1446,15 @@ void ConcurrentCopying::CopyingPhase() {
}
}
ScanDirtyObject</*kNoUnEvac*/ true>(obj);
- } else if (space != region_space_ || region_space_->IsInUnevacFromSpace(obj)) {
+ } else if (space != region_space_) {
+ DCHECK(space == heap_->non_moving_space_);
// We need to process un-evac references as they may be unprocessed,
// if they skipped the marking phase due to heap mutation.
ScanDirtyObject</*kNoUnEvac*/ false>(obj);
- inter_region_bitmap_->Clear(obj);
+ non_moving_space_inter_region_bitmap_->Clear(obj);
+ } else if (region_space_->IsInUnevacFromSpace(obj)) {
+ ScanDirtyObject</*kNoUnEvac*/ false>(obj);
+ region_space_inter_region_bitmap_->Clear(obj);
}
},
accounting::CardTable::kCardAged);
@@ -1443,16 +1466,20 @@ void ConcurrentCopying::CopyingPhase() {
ScanDirtyObject</*kNoUnEvac*/ true>(obj);
};
if (space == region_space_) {
- region_space_->ScanUnevacFromSpace(inter_region_bitmap_.get(), visitor);
+ region_space_->ScanUnevacFromSpace(region_space_inter_region_bitmap_.get(), visitor);
} else {
- inter_region_bitmap_->VisitMarkedRange(reinterpret_cast<uintptr_t>(space->Begin()),
- reinterpret_cast<uintptr_t>(space->End()),
- visitor);
+ DCHECK(space == heap_->non_moving_space_);
+ non_moving_space_inter_region_bitmap_->VisitMarkedRange(
+ reinterpret_cast<uintptr_t>(space->Begin()),
+ reinterpret_cast<uintptr_t>(space->End()),
+ visitor);
}
}
}
// Done scanning unevac space.
done_scanning_.store(true, std::memory_order_release);
+ // NOTE: inter-region-ref bitmaps can be cleared here to release memory, if needed.
+ // Currently we do it in ReclaimPhase().
if (kVerboseMode) {
LOG(INFO) << "GC end of ScanCardsForSpace";
}
@@ -3527,7 +3554,8 @@ void ConcurrentCopying::FinishPhase() {
// We do not currently use the region space cards at all, madvise them away to save ram.
heap_->GetCardTable()->ClearCardRange(region_space_->Begin(), region_space_->Limit());
} else if (kEnableGenerationalConcurrentCopyingCollection && !young_gen_) {
- inter_region_bitmap_.reset();
+ region_space_inter_region_bitmap_->Clear();
+ non_moving_space_inter_region_bitmap_->Clear();
}
{
MutexLock mu(self, skipped_blocks_lock_);
diff --git a/runtime/gc/collector/concurrent_copying.h b/runtime/gc/collector/concurrent_copying.h
index aabfc8e154..a2d48376a5 100644
--- a/runtime/gc/collector/concurrent_copying.h
+++ b/runtime/gc/collector/concurrent_copying.h
@@ -98,6 +98,9 @@ class ConcurrentCopying : public GarbageCollector {
return kCollectorTypeCC;
}
void RevokeAllThreadLocalBuffers() override;
+ // Creates inter-region ref bitmaps for region-space and non-moving-space.
+ // Gets called in Heap construction after the two spaces are created.
+ void CreateInterRegionRefBitmaps();
void SetRegionSpace(space::RegionSpace* region_space) {
DCHECK(region_space != nullptr);
region_space_ = region_space;
@@ -391,7 +394,8 @@ class ConcurrentCopying : public GarbageCollector {
size_t gc_count_;
// Bit is set if the corresponding object has inter-region references that
// were found during the marking phase of two-phase full-heap GC cycle.
- std::unique_ptr<accounting::ContinuousSpaceBitmap> inter_region_bitmap_;
+ std::unique_ptr<accounting::ContinuousSpaceBitmap> region_space_inter_region_bitmap_;
+ std::unique_ptr<accounting::ContinuousSpaceBitmap> non_moving_space_inter_region_bitmap_;
// reclaimed_bytes_ratio = reclaimed_bytes/num_allocated_bytes per GC cycle
float reclaimed_bytes_ratio_sum_;
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index d868aba767..bf8aaaeba7 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -666,6 +666,9 @@ Heap::Heap(size_t initial_size,
concurrent_copying_collector_->SetRegionSpace(region_space_);
if (kEnableGenerationalConcurrentCopyingCollection) {
young_concurrent_copying_collector_->SetRegionSpace(region_space_);
+ // At this point, non-moving space should be created.
+ DCHECK(non_moving_space_ != nullptr);
+ concurrent_copying_collector_->CreateInterRegionRefBitmaps();
}
garbage_collectors_.push_back(concurrent_copying_collector_);
if (kEnableGenerationalConcurrentCopyingCollection) {
@@ -2736,7 +2739,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
// active_concurrent_copying_collector_. So we should not concurrency here.
active_concurrent_copying_collector_ = (gc_type == collector::kGcTypeSticky) ?
young_concurrent_copying_collector_ : concurrent_copying_collector_;
- active_concurrent_copying_collector_->SetRegionSpace(region_space_);
+ DCHECK(active_concurrent_copying_collector_->RegionSpace() == region_space_);
}
collector = active_concurrent_copying_collector_;
break;
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index b7ac1e8fe3..9ce47490e7 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -240,11 +240,6 @@ static jlong ZygoteHooks_nativePreFork(JNIEnv* env, jclass) {
runtime->PreZygoteFork();
- if (Trace::GetMethodTracingMode() != TracingMode::kTracingInactive) {
- // Tracing active, pause it.
- Trace::Pause();
- }
-
// Grab thread before fork potentially makes Thread::pthread_key_self_ unusable.
return reinterpret_cast<jlong>(ThreadForEnv(env));
}
diff --git a/runtime/thread.cc b/runtime/thread.cc
index 8bec2d9673..f459f9c8fb 100644
--- a/runtime/thread.cc
+++ b/runtime/thread.cc
@@ -160,6 +160,7 @@ void Thread::SetIsGcMarkingAndUpdateEntrypoints(bool is_marking) {
}
void Thread::InitTlsEntryPoints() {
+ ScopedTrace trace("InitTlsEntryPoints");
// Insert a placeholder so we can easily tell if we call an unimplemented entry point.
uintptr_t* begin = reinterpret_cast<uintptr_t*>(&tlsPtr_.jni_entrypoints);
uintptr_t* end = reinterpret_cast<uintptr_t*>(
@@ -903,6 +904,8 @@ bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_en
tlsPtr_.pthread_self = pthread_self();
CHECK(is_started_);
+ ScopedTrace trace("Thread::Init");
+
SetUpAlternateSignalStack();
if (!InitStackHwm()) {
return false;
@@ -912,7 +915,10 @@ bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_en
RemoveSuspendTrigger();
InitCardTable();
InitTid();
- interpreter::InitInterpreterTls(this);
+ {
+ ScopedTrace trace2("InitInterpreterTls");
+ interpreter::InitInterpreterTls(this);
+ }
#ifdef ART_TARGET_ANDROID
__get_tls()[TLS_SLOT_ART_THREAD_SELF] = this;
@@ -936,6 +942,7 @@ bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_en
}
}
+ ScopedTrace trace3("ThreadList::Register");
thread_list->Register(this);
return true;
}
@@ -943,6 +950,7 @@ bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_en
template <typename PeerAction>
Thread* Thread::Attach(const char* thread_name, bool as_daemon, PeerAction peer_action) {
Runtime* runtime = Runtime::Current();
+ ScopedTrace trace("Thread::Attach");
if (runtime == nullptr) {
LOG(ERROR) << "Thread attaching to non-existent runtime: " <<
((thread_name != nullptr) ? thread_name : "(Unnamed)");
@@ -950,6 +958,7 @@ Thread* Thread::Attach(const char* thread_name, bool as_daemon, PeerAction peer_
}
Thread* self;
{
+ ScopedTrace trace2("Thread birth");
MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
if (runtime->IsShuttingDownLocked()) {
LOG(WARNING) << "Thread attaching while runtime is shutting down: " <<
@@ -1251,6 +1260,7 @@ static void GetThreadStack(pthread_t thread,
}
bool Thread::InitStackHwm() {
+ ScopedTrace trace("InitStackHwm");
void* read_stack_base;
size_t read_stack_size;
size_t read_guard_size;
diff --git a/runtime/trace.cc b/runtime/trace.cc
index f6c36cf989..ce955d8991 100644
--- a/runtime/trace.cc
+++ b/runtime/trace.cc
@@ -517,106 +517,6 @@ void Trace::Shutdown() {
}
}
-void Trace::Pause() {
- bool stop_alloc_counting = false;
- Runtime* runtime = Runtime::Current();
- Trace* the_trace = nullptr;
-
- Thread* const self = Thread::Current();
- pthread_t sampling_pthread = 0U;
- {
- MutexLock mu(self, *Locks::trace_lock_);
- if (the_trace_ == nullptr) {
- LOG(ERROR) << "Trace pause requested, but no trace currently running";
- return;
- } else {
- the_trace = the_trace_;
- sampling_pthread = sampling_pthread_;
- }
- }
-
- if (sampling_pthread != 0U) {
- {
- MutexLock mu(self, *Locks::trace_lock_);
- the_trace_ = nullptr;
- }
- CHECK_PTHREAD_CALL(pthread_join, (sampling_pthread, nullptr), "sampling thread shutdown");
- sampling_pthread_ = 0U;
- {
- MutexLock mu(self, *Locks::trace_lock_);
- the_trace_ = the_trace;
- }
- }
-
- if (the_trace != nullptr) {
- gc::ScopedGCCriticalSection gcs(self,
- gc::kGcCauseInstrumentation,
- gc::kCollectorTypeInstrumentation);
- ScopedSuspendAll ssa(__FUNCTION__);
- stop_alloc_counting = (the_trace->flags_ & Trace::kTraceCountAllocs) != 0;
-
- if (the_trace->trace_mode_ == TraceMode::kSampling) {
- MutexLock mu(self, *Locks::thread_list_lock_);
- runtime->GetThreadList()->ForEach(ClearThreadStackTraceAndClockBase, nullptr);
- } else {
- runtime->GetInstrumentation()->DisableMethodTracing(kTracerInstrumentationKey);
- runtime->GetInstrumentation()->RemoveListener(
- the_trace,
- instrumentation::Instrumentation::kMethodEntered |
- instrumentation::Instrumentation::kMethodExited |
- instrumentation::Instrumentation::kMethodUnwind);
- }
- }
-
- if (stop_alloc_counting) {
- // Can be racy since SetStatsEnabled is not guarded by any locks.
- Runtime::Current()->SetStatsEnabled(false);
- }
-}
-
-void Trace::Resume() {
- Thread* self = Thread::Current();
- Trace* the_trace;
- {
- MutexLock mu(self, *Locks::trace_lock_);
- if (the_trace_ == nullptr) {
- LOG(ERROR) << "No trace to resume (or sampling mode), ignoring this request";
- return;
- }
- the_trace = the_trace_;
- }
-
- Runtime* runtime = Runtime::Current();
-
- // Enable count of allocs if specified in the flags.
- bool enable_stats = (the_trace->flags_ & kTraceCountAllocs) != 0;
-
- {
- gc::ScopedGCCriticalSection gcs(self,
- gc::kGcCauseInstrumentation,
- gc::kCollectorTypeInstrumentation);
- ScopedSuspendAll ssa(__FUNCTION__);
-
- // Reenable.
- if (the_trace->trace_mode_ == TraceMode::kSampling) {
- CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, nullptr, &RunSamplingThread,
- reinterpret_cast<void*>(the_trace->interval_us_)), "Sampling profiler thread");
- } else {
- runtime->GetInstrumentation()->AddListener(the_trace,
- instrumentation::Instrumentation::kMethodEntered |
- instrumentation::Instrumentation::kMethodExited |
- instrumentation::Instrumentation::kMethodUnwind);
- // TODO: In full-PIC mode, we don't need to fully deopt.
- runtime->GetInstrumentation()->EnableMethodTracing(kTracerInstrumentationKey);
- }
- }
-
- // Can't call this when holding the mutator lock.
- if (enable_stats) {
- runtime->SetStatsEnabled(true);
- }
-}
-
TracingMode Trace::GetMethodTracingMode() {
MutexLock mu(Thread::Current(), *Locks::trace_lock_);
if (the_trace_ == nullptr) {
diff --git a/runtime/trace.h b/runtime/trace.h
index 108996231d..582f756ca9 100644
--- a/runtime/trace.h
+++ b/runtime/trace.h
@@ -156,9 +156,6 @@ class Trace final : public instrumentation::InstrumentationListener {
REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_,
!Locks::trace_lock_);
- static void Pause() REQUIRES(!Locks::trace_lock_, !Locks::thread_list_lock_);
- static void Resume() REQUIRES(!Locks::trace_lock_);
-
// Stop tracing. This will finish the trace and write it to file/send it via DDMS.
static void Stop()
REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::trace_lock_);
diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt
index c66f71d4d2..4dc5ced8c6 100644
--- a/tools/libcore_failures.txt
+++ b/tools/libcore_failures.txt
@@ -198,5 +198,29 @@
modes: [device],
bug: 116446372,
names: ["libcore.libcore.io.FdsanTest#testSocket"]
+},
+{
+ description: "Unexpected failures",
+ result: EXEC_FAILED,
+ bug: 121230364,
+ names: [
+ "libcore.libcore.net.InetAddressUtilsTest#parseNumericAddress[8]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[10]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[11]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[12]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[5]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[6]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[7]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[8]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_isNotNumericAddress[9]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[10]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[11]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[12]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[5]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[6]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[7]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[8]",
+ "libcore.libcore.net.InetAddressUtilsTest#test_parseNonNumericAddress[9]"
+ ]
}
]
diff --git a/tools/timeout_dumper/timeout_dumper.cc b/tools/timeout_dumper/timeout_dumper.cc
index 96d165c5c7..e04aefb26c 100644
--- a/tools/timeout_dumper/timeout_dumper.cc
+++ b/tools/timeout_dumper/timeout_dumper.cc
@@ -29,6 +29,7 @@
#include <thread>
#include <memory>
#include <set>
+#include <string>
#include <android-base/file.h>
#include <android-base/logging.h>
@@ -103,9 +104,22 @@ std::unique_ptr<std::string> FindAddr2line() {
}
}
- std::string path = std::string(".") + kAddr2linePath;
- if (access(path.c_str(), X_OK) == 0) {
- return std::make_unique<std::string>(path);
+ {
+ std::string path = std::string(".") + kAddr2linePath;
+ if (access(path.c_str(), X_OK) == 0) {
+ return std::make_unique<std::string>(path);
+ }
+ }
+
+ {
+ using android::base::Dirname;
+
+ std::string exec_dir = android::base::GetExecutableDirectory();
+ std::string derived_top = Dirname(Dirname(Dirname(Dirname(exec_dir))));
+ std::string path = derived_top + kAddr2linePath;
+ if (access(path.c_str(), X_OK) == 0) {
+ return std::make_unique<std::string>(path);
+ }
}
constexpr const char* kHostAddr2line = "/usr/bin/addr2line";
@@ -356,6 +370,91 @@ std::set<pid_t> PtraceSiblings(pid_t pid) {
return ret;
}
+void DumpABI(pid_t forked_pid) {
+ enum class ABI { kArm, kArm64, kMips, kMips64, kX86, kX86_64 };
+#if defined(__arm__)
+ constexpr ABI kDumperABI = ABI::kArm;
+#elif defined(__aarch64__)
+ constexpr ABI kDumperABI = ABI::kArm64;
+#elif defined(__mips__) && !defined(__LP64__)
+ constexpr ABI kDumperABI = ABI::kMips;
+#elif defined(__mips__) && defined(__LP64__)
+ constexpr ABI kDumperABI = ABI::kMips64;
+#elif defined(__i386__)
+ constexpr ABI kDumperABI = ABI::kX86;
+#elif defined(__x86_64__)
+ constexpr ABI kDumperABI = ABI::kX86_64;
+#else
+#error Unsupported architecture
+#endif
+
+ char data[1024]; // Should be more than enough.
+ struct iovec io_vec;
+ io_vec.iov_base = &data;
+ io_vec.iov_len = 1024;
+ ABI to_print;
+ if (0 != ::ptrace(PTRACE_GETREGSET, forked_pid, /* NT_PRSTATUS */ 1, &io_vec)) {
+ LOG(ERROR) << "Could not get registers to determine abi.";
+ // Use 64-bit as default.
+ switch (kDumperABI) {
+ case ABI::kArm:
+ case ABI::kArm64:
+ to_print = ABI::kArm64;
+ break;
+ case ABI::kMips:
+ case ABI::kMips64:
+ to_print = ABI::kMips64;
+ break;
+ case ABI::kX86:
+ case ABI::kX86_64:
+ to_print = ABI::kX86_64;
+ break;
+ default:
+ __builtin_unreachable();
+ }
+ } else {
+ // Check the length of the data. Assume that it's the same arch as the tool.
+ switch (kDumperABI) {
+ case ABI::kArm:
+ case ABI::kArm64:
+ to_print = io_vec.iov_len == 18 * sizeof(uint32_t) ? ABI::kArm : ABI::kArm64;
+ break;
+ case ABI::kMips:
+ case ABI::kMips64:
+ to_print = ABI::kMips64; // TODO Figure out how this should work.
+ break;
+ case ABI::kX86:
+ case ABI::kX86_64:
+ to_print = io_vec.iov_len == 17 * sizeof(uint32_t) ? ABI::kX86 : ABI::kX86_64;
+ break;
+ default:
+ __builtin_unreachable();
+ }
+ }
+ std::string abi_str;
+ switch (to_print) {
+ case ABI::kArm:
+ abi_str = "arm";
+ break;
+ case ABI::kArm64:
+ abi_str = "arm64";
+ break;
+ case ABI::kMips:
+ abi_str = "mips";
+ break;
+ case ABI::kMips64:
+ abi_str = "mips64";
+ break;
+ case ABI::kX86:
+ abi_str = "x86";
+ break;
+ case ABI::kX86_64:
+ abi_str = "x86_64";
+ break;
+ }
+ std::cerr << "ABI: '" << abi_str << "'" << std::endl;
+}
+
} // namespace ptrace
template <typename T>
@@ -495,19 +594,22 @@ void DumpThread(pid_t pid,
}
void DumpProcess(pid_t forked_pid, const std::atomic<bool>& saw_wif_stopped_for_main) {
+ LOG(ERROR) << "Timeout for process " << forked_pid;
+
CHECK_EQ(0, ::ptrace(PTRACE_ATTACH, forked_pid, 0, 0));
std::set<pid_t> tids = ptrace::PtraceSiblings(forked_pid);
tids.insert(forked_pid);
+ ptrace::DumpABI(forked_pid);
+
// Check whether we have and should use addr2line.
- std::unique_ptr<std::string> addr2line_path = addr2line::FindAddr2line();
- if (addr2line_path != nullptr) {
- LOG(ERROR) << "Found addr2line at " << *addr2line_path;
- } else {
- LOG(ERROR) << "Did not find usable addr2line";
+ std::unique_ptr<std::string> addr2line_path;
+ if (kUseAddr2line) {
+ addr2line_path = addr2line::FindAddr2line();
+ if (addr2line_path == nullptr) {
+ LOG(ERROR) << "Did not find usable addr2line";
+ }
}
- bool use_addr2line = kUseAddr2line && addr2line_path != nullptr;
- LOG(ERROR) << (use_addr2line ? "U" : "Not u") << "sing addr2line";
if (!WaitForMainSigStop(saw_wif_stopped_for_main)) {
LOG(ERROR) << "Did not receive SIGSTOP for pid " << forked_pid;
@@ -520,11 +622,7 @@ void DumpProcess(pid_t forked_pid, const std::atomic<bool>& saw_wif_stopped_for_
}
for (pid_t tid : tids) {
- DumpThread(forked_pid,
- tid,
- use_addr2line ? addr2line_path.get() : nullptr,
- " ",
- backtrace_map.get());
+ DumpThread(forked_pid, tid, addr2line_path.get(), " ", backtrace_map.get());
}
}