diff options
121 files changed, 2450 insertions, 1390 deletions
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index bd3a145368..127833233a 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -1555,7 +1555,7 @@ class ParallelCompilationManager { self->AssertNoPendingException(); CHECK_GT(work_units, 0U); - index_.StoreRelaxed(begin); + index_.store(begin, std::memory_order_relaxed); for (size_t i = 0; i < work_units; ++i) { thread_pool_->AddTask(self, new ForAllClosureLambda<Fn>(this, end, fn)); } @@ -1573,7 +1573,7 @@ class ParallelCompilationManager { } size_t NextIndex() { - return index_.FetchAndAddSequentiallyConsistent(1); + return index_.fetch_add(1, std::memory_order_seq_cst); } private: @@ -2317,6 +2317,7 @@ class InitializeClassVisitor : public CompilationVisitor { // The boot image case doesn't need to recursively initialize the dependencies with // special logic since the class linker already does this. can_init_static_fields = + ClassLinker::kAppImageMayContainStrings && !soa.Self()->IsExceptionPending() && is_superclass_initialized && NoClinitInDependency(klass, soa.Self(), &class_loader); @@ -2837,7 +2838,8 @@ void CompilerDriver::AddCompiledMethod(const MethodReference& method_ref, /*expected*/ nullptr, compiled_method); CHECK(result == MethodTable::kInsertResultSuccess); - non_relative_linker_patch_count_.FetchAndAddRelaxed(non_relative_linker_patch_count); + non_relative_linker_patch_count_.fetch_add(non_relative_linker_patch_count, + std::memory_order_relaxed); DCHECK(GetCompiledMethod(method_ref) != nullptr) << method_ref.PrettyMethod(); } @@ -2948,7 +2950,7 @@ bool CompilerDriver::IsMethodVerifiedWithoutFailures(uint32_t method_idx, } size_t CompilerDriver::GetNonRelativeLinkerPatchCount() const { - return non_relative_linker_patch_count_.LoadRelaxed(); + return non_relative_linker_patch_count_.load(std::memory_order_relaxed); } void CompilerDriver::SetRequiresConstructorBarrier(Thread* self, diff --git a/compiler/optimizing/code_sinking.cc b/compiler/optimizing/code_sinking.cc index f4760d661f..2e31d35584 100644 --- a/compiler/optimizing/code_sinking.cc +++ b/compiler/optimizing/code_sinking.cc @@ -214,6 +214,11 @@ static HInstruction* FindIdealPosition(HInstruction* instruction, DCHECK(target_block != nullptr); } + // Bail if the instruction can throw and we are about to move into a catch block. + if (instruction->CanThrow() && target_block->GetTryCatchInformation() != nullptr) { + return nullptr; + } + // Find insertion position. No need to filter anymore, as we have found a // target block. HInstruction* insert_pos = nullptr; diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc index 2b6f90540f..0b2297d157 100644 --- a/compiler/optimizing/instruction_simplifier.cc +++ b/compiler/optimizing/instruction_simplifier.cc @@ -852,7 +852,7 @@ void InstructionSimplifierVisitor::VisitBooleanNot(HBooleanNot* bool_not) { static HInstruction* NewIntegralAbs(ArenaAllocator* allocator, HInstruction* x, HInstruction* cursor) { - DataType::Type type = x->GetType(); + DataType::Type type = DataType::Kind(x->GetType()); DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64); HAbs* abs = new (allocator) HAbs(type, x, cursor->GetDexPc()); cursor->GetBlock()->InsertInstructionBefore(abs, cursor); @@ -865,7 +865,7 @@ static HInstruction* NewIntegralMinMax(ArenaAllocator* allocator, HInstruction* y, HInstruction* cursor, bool is_min) { - DataType::Type type = x->GetType(); + DataType::Type type = DataType::Kind(x->GetType()); DCHECK(type == DataType::Type::kInt32 || type == DataType::Type::kInt64); HBinaryOperation* minmax = nullptr; if (is_min) { @@ -939,9 +939,9 @@ void InstructionSimplifierVisitor::VisitSelect(HSelect* select) { DataType::Type t_type = true_value->GetType(); DataType::Type f_type = false_value->GetType(); // Here we have a <cmp> b ? true_value : false_value. - // Test if both values are same-typed int or long. - if (t_type == f_type && - (t_type == DataType::Type::kInt32 || t_type == DataType::Type::kInt64)) { + // Test if both values are compatible integral types (resulting + // MIN/MAX/ABS type will be int or long, like the condition). + if (DataType::IsIntegralType(t_type) && DataType::Kind(t_type) == DataType::Kind(f_type)) { // Try to replace typical integral MIN/MAX/ABS constructs. if ((cmp == kCondLT || cmp == kCondLE || cmp == kCondGT || cmp == kCondGE) && ((a == true_value && b == false_value) || diff --git a/compiler/utils/atomic_dex_ref_map-inl.h b/compiler/utils/atomic_dex_ref_map-inl.h index 7977e8201f..4bd323dadb 100644 --- a/compiler/utils/atomic_dex_ref_map-inl.h +++ b/compiler/utils/atomic_dex_ref_map-inl.h @@ -70,7 +70,7 @@ inline bool AtomicDexRefMap<DexFileReferenceType, Value>::Get(const DexFileRefer if (array == nullptr) { return false; } - *out = (*array)[ref.index].LoadRelaxed(); + *out = (*array)[ref.index].load(std::memory_order_relaxed); return true; } @@ -81,8 +81,8 @@ inline bool AtomicDexRefMap<DexFileReferenceType, Value>::Remove(const DexFileRe if (array == nullptr) { return false; } - *out = (*array)[ref.index].LoadRelaxed(); - (*array)[ref.index].StoreSequentiallyConsistent(nullptr); + *out = (*array)[ref.index].load(std::memory_order_relaxed); + (*array)[ref.index].store(nullptr, std::memory_order_seq_cst); return true; } @@ -121,7 +121,7 @@ inline void AtomicDexRefMap<DexFileReferenceType, Value>::Visit(const Visitor& v const DexFile* dex_file = pair.first; const ElementArray& elements = pair.second; for (size_t i = 0; i < elements.size(); ++i) { - visitor(DexFileReference(dex_file, i), elements[i].LoadRelaxed()); + visitor(DexFileReference(dex_file, i), elements[i].load(std::memory_order_relaxed)); } } } @@ -130,7 +130,7 @@ template <typename DexFileReferenceType, typename Value> inline void AtomicDexRefMap<DexFileReferenceType, Value>::ClearEntries() { for (auto& it : arrays_) { for (auto& element : it.second) { - element.StoreRelaxed(nullptr); + element.store(nullptr, std::memory_order_relaxed); } } } diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index 9b370178f7..6950b93e51 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -1609,11 +1609,9 @@ class Dex2Oat FINAL { // Unzip or copy dex files straight to the oat file. std::vector<std::unique_ptr<MemMap>> opened_dex_files_map; std::vector<std::unique_ptr<const DexFile>> opened_dex_files; - // No need to verify the dex file for: - // 1) Dexlayout since it does the verification. It also may not pass the verification since - // we don't update the dex checksum. - // 2) when we have a vdex file, which means it was already verified. - const bool verify = !DoDexLayoutOptimizations() && (input_vdex_file_ == nullptr); + // No need to verify the dex file when we have a vdex file, which means it was already + // verified. + const bool verify = (input_vdex_file_ == nullptr); if (!oat_writers_[i]->WriteAndOpenDexFiles( vdex_files_[i].get(), rodata_.back(), diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc index 5590c8b3ab..0cd39ac11b 100644 --- a/dex2oat/dex2oat_test.cc +++ b/dex2oat/dex2oat_test.cc @@ -2013,4 +2013,84 @@ TEST_F(Dex2oatTest, QuickenedInput) { ASSERT_EQ(vdex_unquickened->FlushCloseOrErase(), 0) << "Could not flush and close"; } +// Test that compact dex generation with invalid dex files doesn't crash dex2oat. b/75970654 +TEST_F(Dex2oatTest, CompactDexInvalidSource) { + ScratchFile invalid_dex; + { + FILE* file = fdopen(invalid_dex.GetFd(), "w+b"); + ZipWriter writer(file); + writer.StartEntry("classes.dex", ZipWriter::kAlign32); + DexFile::Header header = {}; + StandardDexFile::WriteMagic(header.magic_); + StandardDexFile::WriteCurrentVersion(header.magic_); + header.file_size_ = 4 * KB; + header.data_size_ = 4 * KB; + header.data_off_ = 10 * MB; + header.map_off_ = 10 * MB; + header.class_defs_off_ = 10 * MB; + header.class_defs_size_ = 10000; + ASSERT_GE(writer.WriteBytes(&header, sizeof(header)), 0); + writer.FinishEntry(); + writer.Finish(); + ASSERT_EQ(invalid_dex.GetFile()->Flush(), 0); + } + const std::string dex_location = invalid_dex.GetFilename(); + const std::string odex_location = GetOdexDir() + "/output.odex"; + std::string error_msg; + int status = GenerateOdexForTestWithStatus( + {dex_location}, + odex_location, + CompilerFilter::kQuicken, + &error_msg, + { "--compact-dex-level=fast" }); + ASSERT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) != 0) << status << " " << output_; +} + +// Test that dex2oat with a CompactDex file in the APK fails. +TEST_F(Dex2oatTest, CompactDexInZip) { + CompactDexFile::Header header = {}; + CompactDexFile::WriteMagic(header.magic_); + CompactDexFile::WriteCurrentVersion(header.magic_); + header.file_size_ = sizeof(CompactDexFile::Header); + header.data_off_ = 10 * MB; + header.map_off_ = 10 * MB; + header.class_defs_off_ = 10 * MB; + header.class_defs_size_ = 10000; + // Create a zip containing the invalid dex. + ScratchFile invalid_dex_zip; + { + FILE* file = fdopen(invalid_dex_zip.GetFd(), "w+b"); + ZipWriter writer(file); + writer.StartEntry("classes.dex", ZipWriter::kCompress); + ASSERT_GE(writer.WriteBytes(&header, sizeof(header)), 0); + writer.FinishEntry(); + writer.Finish(); + ASSERT_EQ(invalid_dex_zip.GetFile()->Flush(), 0); + } + // Create the dex file directly. + ScratchFile invalid_dex; + { + ASSERT_GE(invalid_dex.GetFile()->WriteFully(&header, sizeof(header)), 0); + ASSERT_EQ(invalid_dex.GetFile()->Flush(), 0); + } + std::string error_msg; + int status = 0u; + + status = GenerateOdexForTestWithStatus( + { invalid_dex_zip.GetFilename() }, + GetOdexDir() + "/output_apk.odex", + CompilerFilter::kQuicken, + &error_msg, + { "--compact-dex-level=fast" }); + ASSERT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) != 0) << status << " " << output_; + + status = GenerateOdexForTestWithStatus( + { invalid_dex.GetFilename() }, + GetOdexDir() + "/output.odex", + CompilerFilter::kQuicken, + &error_msg, + { "--compact-dex-level=fast" }); + ASSERT_TRUE(WIFEXITED(status) && WEXITSTATUS(status) != 0) << status << " " << output_; +} + } // namespace art diff --git a/libartbase/base/allocator.cc b/libartbase/base/allocator.cc index a42414507b..17da789b36 100644 --- a/libartbase/base/allocator.cc +++ b/libartbase/base/allocator.cc @@ -83,9 +83,9 @@ void Dump(std::ostream& os) { if (kEnableTrackingAllocator) { os << "Dumping native memory usage\n"; for (size_t i = 0; i < kAllocatorTagCount; ++i) { - uint64_t bytes_used = g_bytes_used[i].LoadRelaxed(); + uint64_t bytes_used = g_bytes_used[i].load(std::memory_order_relaxed); uint64_t max_bytes_used = g_max_bytes_used[i]; - uint64_t total_bytes_used = g_total_bytes_used[i].LoadRelaxed(); + uint64_t total_bytes_used = g_total_bytes_used[i].load(std::memory_order_relaxed); if (total_bytes_used != 0) { os << static_cast<AllocatorTag>(i) << " active=" << bytes_used << " max=" << max_bytes_used << " total=" << total_bytes_used << "\n"; diff --git a/libartbase/base/allocator.h b/libartbase/base/allocator.h index d92fe193e6..7ddbacf716 100644 --- a/libartbase/base/allocator.h +++ b/libartbase/base/allocator.h @@ -84,15 +84,15 @@ extern Atomic<uint64_t> g_total_bytes_used[kAllocatorTagCount]; void Dump(std::ostream& os); inline void RegisterAllocation(AllocatorTag tag, size_t bytes) { - g_total_bytes_used[tag].FetchAndAddSequentiallyConsistent(bytes); - size_t new_bytes = g_bytes_used[tag].FetchAndAddSequentiallyConsistent(bytes) + bytes; + g_total_bytes_used[tag].fetch_add(bytes, std::memory_order_seq_cst); + size_t new_bytes = g_bytes_used[tag].fetch_add(bytes, std::memory_order_seq_cst) + bytes; if (g_max_bytes_used[tag] < new_bytes) { g_max_bytes_used[tag] = new_bytes; } } inline void RegisterFree(AllocatorTag tag, size_t bytes) { - g_bytes_used[tag].FetchAndSubSequentiallyConsistent(bytes); + g_bytes_used[tag].fetch_sub(bytes, std::memory_order_seq_cst); } } // namespace TrackedAllocators diff --git a/libartbase/base/atomic.h b/libartbase/base/atomic.h index fd34cc6143..f736667ca8 100644 --- a/libartbase/base/atomic.h +++ b/libartbase/base/atomic.h @@ -35,94 +35,28 @@ class PACKED(sizeof(T)) Atomic : public std::atomic<T> { explicit Atomic<T>(T value) : std::atomic<T>(value) { } - // Load from memory without ordering or synchronization constraints. - T LoadRelaxed() const { - return this->load(std::memory_order_relaxed); - } - - // Load from memory with acquire ordering. - T LoadAcquire() const { - return this->load(std::memory_order_acquire); - } - - // Word tearing allowed, but may race. - // TODO: Optimize? - // There has been some discussion of eventually disallowing word - // tearing for Java data loads. + // Load data from an atomic variable with Java data memory order semantics. + // + // Promises memory access semantics of ordinary Java data. + // Does not order other memory accesses. + // Long and double accesses may be performed 32 bits at a time. + // There are no "cache coherence" guarantees; e.g. loads from the same location may be reordered. + // In contrast to normal C++ accesses, racing accesses are allowed. T LoadJavaData() const { return this->load(std::memory_order_relaxed); } - // Load from memory with a total ordering. - // Corresponds exactly to a Java volatile load. - T LoadSequentiallyConsistent() const { - return this->load(std::memory_order_seq_cst); - } - - // Store to memory without ordering or synchronization constraints. - void StoreRelaxed(T desired_value) { - this->store(desired_value, std::memory_order_relaxed); - } - - // Word tearing allowed, but may race. + // Store data in an atomic variable with Java data memory ordering semantics. + // + // Promises memory access semantics of ordinary Java data. + // Does not order other memory accesses. + // Long and double accesses may be performed 32 bits at a time. + // There are no "cache coherence" guarantees; e.g. loads from the same location may be reordered. + // In contrast to normal C++ accesses, racing accesses are allowed. void StoreJavaData(T desired_value) { this->store(desired_value, std::memory_order_relaxed); } - // Store to memory with release ordering. - void StoreRelease(T desired_value) { - this->store(desired_value, std::memory_order_release); - } - - // Store to memory with a total ordering. - void StoreSequentiallyConsistent(T desired_value) { - this->store(desired_value, std::memory_order_seq_cst); - } - - // Atomically replace the value with desired_value. - T ExchangeRelaxed(T desired_value) { - return this->exchange(desired_value, std::memory_order_relaxed); - } - - // Atomically replace the value with desired_value. - T ExchangeSequentiallyConsistent(T desired_value) { - return this->exchange(desired_value, std::memory_order_seq_cst); - } - - // Atomically replace the value with desired_value. - T ExchangeAcquire(T desired_value) { - return this->exchange(desired_value, std::memory_order_acquire); - } - - // Atomically replace the value with desired_value. - T ExchangeRelease(T desired_value) { - return this->exchange(desired_value, std::memory_order_release); - } - - // Atomically replace the value with desired_value if it matches the expected_value. - // Participates in total ordering of atomic operations. Returns true on success, false otherwise. - // If the value does not match, updates the expected_value argument with the value that was - // atomically read for the failed comparison. - bool CompareAndExchangeStrongSequentiallyConsistent(T* expected_value, T desired_value) { - return this->compare_exchange_strong(*expected_value, desired_value, std::memory_order_seq_cst); - } - - // Atomically replace the value with desired_value if it matches the expected_value. - // Participates in total ordering of atomic operations. Returns true on success, false otherwise. - // If the value does not match, updates the expected_value argument with the value that was - // atomically read for the failed comparison. - bool CompareAndExchangeStrongAcquire(T* expected_value, T desired_value) { - return this->compare_exchange_strong(*expected_value, desired_value, std::memory_order_acquire); - } - - // Atomically replace the value with desired_value if it matches the expected_value. - // Participates in total ordering of atomic operations. Returns true on success, false otherwise. - // If the value does not match, updates the expected_value argument with the value that was - // atomically read for the failed comparison. - bool CompareAndExchangeStrongRelease(T* expected_value, T desired_value) { - return this->compare_exchange_strong(*expected_value, desired_value, std::memory_order_release); - } - // Atomically replace the value with desired_value if it matches the expected_value. // Participates in total ordering of atomic operations. bool CompareAndSetStrongSequentiallyConsistent(T expected_value, T desired_value) { @@ -166,66 +100,8 @@ class PACKED(sizeof(T)) Atomic : public std::atomic<T> { return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_release); } - T FetchAndAddSequentiallyConsistent(const T value) { - return this->fetch_add(value, std::memory_order_seq_cst); // Return old_value. - } - - T FetchAndAddRelaxed(const T value) { - return this->fetch_add(value, std::memory_order_relaxed); // Return old_value. - } - - T FetchAndAddAcquire(const T value) { - return this->fetch_add(value, std::memory_order_acquire); // Return old_value. - } - - T FetchAndAddRelease(const T value) { - return this->fetch_add(value, std::memory_order_acquire); // Return old_value. - } - - T FetchAndSubSequentiallyConsistent(const T value) { - return this->fetch_sub(value, std::memory_order_seq_cst); // Return old value. - } - - T FetchAndSubRelaxed(const T value) { - return this->fetch_sub(value, std::memory_order_relaxed); // Return old value. - } - - T FetchAndBitwiseAndSequentiallyConsistent(const T value) { - return this->fetch_and(value, std::memory_order_seq_cst); // Return old_value. - } - - T FetchAndBitwiseAndAcquire(const T value) { - return this->fetch_and(value, std::memory_order_acquire); // Return old_value. - } - - T FetchAndBitwiseAndRelease(const T value) { - return this->fetch_and(value, std::memory_order_release); // Return old_value. - } - - T FetchAndBitwiseOrSequentiallyConsistent(const T value) { - return this->fetch_or(value, std::memory_order_seq_cst); // Return old_value. - } - - T FetchAndBitwiseOrAcquire(const T value) { - return this->fetch_or(value, std::memory_order_acquire); // Return old_value. - } - - T FetchAndBitwiseOrRelease(const T value) { - return this->fetch_or(value, std::memory_order_release); // Return old_value. - } - - T FetchAndBitwiseXorSequentiallyConsistent(const T value) { - return this->fetch_xor(value, std::memory_order_seq_cst); // Return old_value. - } - - T FetchAndBitwiseXorAcquire(const T value) { - return this->fetch_xor(value, std::memory_order_acquire); // Return old_value. - } - - T FetchAndBitwiseXorRelease(const T value) { - return this->fetch_xor(value, std::memory_order_release); // Return old_value. - } - + // Returns the address of the current atomic variable. This is only used by futex() which is + // declared to take a volatile address (see base/mutex-inl.h). volatile T* Address() { return reinterpret_cast<T*>(this); } diff --git a/libdexfile/dex/dex_file_loader.cc b/libdexfile/dex/dex_file_loader.cc index 758a2f0599..1e0f5ac6ae 100644 --- a/libdexfile/dex/dex_file_loader.cc +++ b/libdexfile/dex/dex_file_loader.cc @@ -348,6 +348,8 @@ std::unique_ptr<DexFile> DexFileLoader::OpenCommon(const uint8_t* base, location_checksum, oat_dex_file, std::move(container))); + // Disable verification for CompactDex input. + verify = false; } else { *error_msg = "Invalid or truncated dex file"; } diff --git a/openjdkjvmti/events.cc b/openjdkjvmti/events.cc index 07b1529adb..de678711fc 100644 --- a/openjdkjvmti/events.cc +++ b/openjdkjvmti/events.cc @@ -940,9 +940,6 @@ void EventHandler::SetupTraceListener(JvmtiMethodTraceListener* listener, } art::ScopedThreadStateChange stsc(art::Thread::Current(), art::ThreadState::kNative); art::instrumentation::Instrumentation* instr = art::Runtime::Current()->GetInstrumentation(); - art::gc::ScopedGCCriticalSection gcs(art::Thread::Current(), - art::gc::kGcCauseInstrumentation, - art::gc::kCollectorTypeInstrumentation); art::ScopedSuspendAll ssa("jvmti method tracing installation"); if (enable) { instr->AddListener(listener, new_events); diff --git a/openjdkjvmti/ti_method.cc b/openjdkjvmti/ti_method.cc index 83d64ef1d8..bf2e6cd104 100644 --- a/openjdkjvmti/ti_method.cc +++ b/openjdkjvmti/ti_method.cc @@ -42,6 +42,7 @@ #include "dex/dex_file_types.h" #include "dex/modifiers.h" #include "events-inl.h" +#include "gc_root-inl.h" #include "jit/jit.h" #include "jni_internal.h" #include "mirror/class-inl.h" @@ -546,13 +547,12 @@ jvmtiError MethodUtil::IsMethodSynthetic(jvmtiEnv* env, jmethodID m, jboolean* i class CommonLocalVariableClosure : public art::Closure { public: - CommonLocalVariableClosure(art::Thread* caller, - jint depth, - jint slot) - : result_(ERR(INTERNAL)), caller_(caller), depth_(depth), slot_(slot) {} + CommonLocalVariableClosure(jint depth, jint slot) + : result_(ERR(INTERNAL)), depth_(depth), slot_(slot) {} void Run(art::Thread* self) OVERRIDE REQUIRES(art::Locks::mutator_lock_) { art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current()); + art::ScopedAssertNoThreadSuspension sants("CommonLocalVariableClosure::Run"); std::unique_ptr<art::Context> context(art::Context::Create()); FindFrameAtDepthVisitor visitor(self, context.get(), depth_); visitor.WalkStack(); @@ -597,17 +597,17 @@ class CommonLocalVariableClosure : public art::Closure { } } - jvmtiError GetResult() const { + virtual jvmtiError GetResult() { return result_; } protected: virtual jvmtiError Execute(art::ArtMethod* method, art::StackVisitor& visitor) - REQUIRES(art::Locks::mutator_lock_) = 0; + REQUIRES_SHARED(art::Locks::mutator_lock_) = 0; virtual jvmtiError GetTypeError(art::ArtMethod* method, art::Primitive::Type type, const std::string& descriptor) - REQUIRES(art::Locks::mutator_lock_) = 0; + REQUIRES_SHARED(art::Locks::mutator_lock_) = 0; jvmtiError GetSlotType(art::ArtMethod* method, uint32_t dex_pc, @@ -674,25 +674,35 @@ class CommonLocalVariableClosure : public art::Closure { } jvmtiError result_; - art::Thread* caller_; jint depth_; jint slot_; }; class GetLocalVariableClosure : public CommonLocalVariableClosure { public: - GetLocalVariableClosure(art::Thread* caller, - jint depth, + GetLocalVariableClosure(jint depth, jint slot, art::Primitive::Type type, jvalue* val) - : CommonLocalVariableClosure(caller, depth, slot), type_(type), val_(val) {} + : CommonLocalVariableClosure(depth, slot), + type_(type), + val_(val), + obj_val_(nullptr) {} + + virtual jvmtiError GetResult() REQUIRES_SHARED(art::Locks::mutator_lock_) { + if (result_ == OK && type_ == art::Primitive::kPrimNot) { + val_->l = obj_val_.IsNull() + ? nullptr + : art::Thread::Current()->GetJniEnv()->AddLocalReference<jobject>(obj_val_.Read()); + } + return CommonLocalVariableClosure::GetResult(); + } protected: jvmtiError GetTypeError(art::ArtMethod* method ATTRIBUTE_UNUSED, art::Primitive::Type slot_type, const std::string& descriptor ATTRIBUTE_UNUSED) - OVERRIDE REQUIRES(art::Locks::mutator_lock_) { + OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { switch (slot_type) { case art::Primitive::kPrimByte: case art::Primitive::kPrimChar: @@ -712,7 +722,7 @@ class GetLocalVariableClosure : public CommonLocalVariableClosure { } jvmtiError Execute(art::ArtMethod* method, art::StackVisitor& visitor) - OVERRIDE REQUIRES(art::Locks::mutator_lock_) { + OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { switch (type_) { case art::Primitive::kPrimNot: { uint32_t ptr_val; @@ -722,8 +732,8 @@ class GetLocalVariableClosure : public CommonLocalVariableClosure { &ptr_val)) { return ERR(OPAQUE_FRAME); } - art::ObjPtr<art::mirror::Object> obj(reinterpret_cast<art::mirror::Object*>(ptr_val)); - val_->l = obj.IsNull() ? nullptr : caller_->GetJniEnv()->AddLocalReference<jobject>(obj); + obj_val_ = art::GcRoot<art::mirror::Object>( + reinterpret_cast<art::mirror::Object*>(ptr_val)); break; } case art::Primitive::kPrimInt: @@ -760,6 +770,7 @@ class GetLocalVariableClosure : public CommonLocalVariableClosure { private: art::Primitive::Type type_; jvalue* val_; + art::GcRoot<art::mirror::Object> obj_val_; }; jvmtiError MethodUtil::GetLocalVariableGeneric(jvmtiEnv* env ATTRIBUTE_UNUSED, @@ -782,9 +793,12 @@ jvmtiError MethodUtil::GetLocalVariableGeneric(jvmtiEnv* env ATTRIBUTE_UNUSED, art::Locks::thread_list_lock_->ExclusiveUnlock(self); return err; } - GetLocalVariableClosure c(self, depth, slot, type, val); - // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. - if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(target, &c)) { + art::ScopedAssertNoThreadSuspension sants("Performing GetLocalVariable"); + GetLocalVariableClosure c(depth, slot, type, val); + // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. We + // need to avoid suspending as we wait for the checkpoint to occur since we are (potentially) + // transfering a GcRoot across threads. + if (!target->RequestSynchronousCheckpoint(&c, art::ThreadState::kRunnable)) { return ERR(THREAD_NOT_ALIVE); } else { return c.GetResult(); @@ -798,13 +812,13 @@ class SetLocalVariableClosure : public CommonLocalVariableClosure { jint slot, art::Primitive::Type type, jvalue val) - : CommonLocalVariableClosure(caller, depth, slot), type_(type), val_(val) {} + : CommonLocalVariableClosure(depth, slot), caller_(caller), type_(type), val_(val) {} protected: jvmtiError GetTypeError(art::ArtMethod* method, art::Primitive::Type slot_type, const std::string& descriptor) - OVERRIDE REQUIRES(art::Locks::mutator_lock_) { + OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { switch (slot_type) { case art::Primitive::kPrimNot: { if (type_ != art::Primitive::kPrimNot) { @@ -840,7 +854,7 @@ class SetLocalVariableClosure : public CommonLocalVariableClosure { } jvmtiError Execute(art::ArtMethod* method, art::StackVisitor& visitor) - OVERRIDE REQUIRES(art::Locks::mutator_lock_) { + OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { switch (type_) { case art::Primitive::kPrimNot: { uint32_t ptr_val; @@ -887,6 +901,7 @@ class SetLocalVariableClosure : public CommonLocalVariableClosure { } private: + art::Thread* caller_; art::Primitive::Type type_; jvalue val_; }; @@ -913,7 +928,7 @@ jvmtiError MethodUtil::SetLocalVariableGeneric(jvmtiEnv* env ATTRIBUTE_UNUSED, } SetLocalVariableClosure c(self, depth, slot, type, val); // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. - if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(target, &c)) { + if (!target->RequestSynchronousCheckpoint(&c)) { return ERR(THREAD_NOT_ALIVE); } else { return c.GetResult(); @@ -922,13 +937,13 @@ jvmtiError MethodUtil::SetLocalVariableGeneric(jvmtiEnv* env ATTRIBUTE_UNUSED, class GetLocalInstanceClosure : public art::Closure { public: - GetLocalInstanceClosure(art::Thread* caller, jint depth, jobject* val) + explicit GetLocalInstanceClosure(jint depth) : result_(ERR(INTERNAL)), - caller_(caller), depth_(depth), - val_(val) {} + val_(nullptr) {} void Run(art::Thread* self) OVERRIDE REQUIRES(art::Locks::mutator_lock_) { + art::ScopedAssertNoThreadSuspension sants("GetLocalInstanceClosure::Run"); art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current()); std::unique_ptr<art::Context> context(art::Context::Create()); FindFrameAtDepthVisitor visitor(self, context.get(), depth_); @@ -939,19 +954,22 @@ class GetLocalInstanceClosure : public art::Closure { return; } result_ = OK; - art::ObjPtr<art::mirror::Object> obj = visitor.GetThisObject(); - *val_ = obj.IsNull() ? nullptr : caller_->GetJniEnv()->AddLocalReference<jobject>(obj); + val_ = art::GcRoot<art::mirror::Object>(visitor.GetThisObject()); } - jvmtiError GetResult() const { + jvmtiError GetResult(jobject* data_out) REQUIRES_SHARED(art::Locks::mutator_lock_) { + if (result_ == OK) { + *data_out = val_.IsNull() + ? nullptr + : art::Thread::Current()->GetJniEnv()->AddLocalReference<jobject>(val_.Read()); + } return result_; } private: jvmtiError result_; - art::Thread* caller_; jint depth_; - jobject* val_; + art::GcRoot<art::mirror::Object> val_; }; jvmtiError MethodUtil::GetLocalInstance(jvmtiEnv* env ATTRIBUTE_UNUSED, @@ -970,12 +988,15 @@ jvmtiError MethodUtil::GetLocalInstance(jvmtiEnv* env ATTRIBUTE_UNUSED, art::Locks::thread_list_lock_->ExclusiveUnlock(self); return err; } - GetLocalInstanceClosure c(self, depth, data); - // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. - if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(target, &c)) { + art::ScopedAssertNoThreadSuspension sants("Performing GetLocalInstance"); + GetLocalInstanceClosure c(depth); + // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. We + // need to avoid suspending as we wait for the checkpoint to occur since we are (potentially) + // transfering a GcRoot across threads. + if (!target->RequestSynchronousCheckpoint(&c, art::ThreadState::kRunnable)) { return ERR(THREAD_NOT_ALIVE); } else { - return c.GetResult(); + return c.GetResult(data); } } diff --git a/openjdkjvmti/ti_monitor.cc b/openjdkjvmti/ti_monitor.cc index 94408ba186..1cfc64a61d 100644 --- a/openjdkjvmti/ti_monitor.cc +++ b/openjdkjvmti/ti_monitor.cc @@ -37,6 +37,7 @@ #include <mutex> #include "art_jvmti.h" +#include "gc_root-inl.h" #include "monitor.h" #include "runtime.h" #include "scoped_thread_state_change-inl.h" @@ -351,19 +352,17 @@ jvmtiError MonitorUtil::GetCurrentContendedMonitor(jvmtiEnv* env ATTRIBUTE_UNUSE } struct GetContendedMonitorClosure : public art::Closure { public: - explicit GetContendedMonitorClosure(art::Thread* current, jobject* out) - : result_thread_(current), out_(out) {} + GetContendedMonitorClosure() : out_(nullptr) {} void Run(art::Thread* target_thread) REQUIRES_SHARED(art::Locks::mutator_lock_) { + art::ScopedAssertNoThreadSuspension sants("GetContendedMonitorClosure::Run"); switch (target_thread->GetState()) { // These three we are actually currently waiting on a monitor and have sent the appropriate // events (if anyone is listening). case art::kBlocked: case art::kTimedWaiting: case art::kWaiting: { - art::mirror::Object* mon = art::Monitor::GetContendedMonitor(target_thread); - *out_ = (mon == nullptr) ? nullptr - : result_thread_->GetJniEnv()->AddLocalReference<jobject>(mon); + out_ = art::GcRoot<art::mirror::Object>(art::Monitor::GetContendedMonitor(target_thread)); return; } case art::kTerminated: @@ -390,22 +389,30 @@ jvmtiError MonitorUtil::GetCurrentContendedMonitor(jvmtiEnv* env ATTRIBUTE_UNUSE case art::kStarting: case art::kNative: case art::kSuspended: { - // We aren't currently (explicitly) waiting for a monitor anything so just return null. - *out_ = nullptr; + // We aren't currently (explicitly) waiting for a monitor so just return null. return; } } } + jobject GetResult() REQUIRES_SHARED(art::Locks::mutator_lock_) { + return out_.IsNull() + ? nullptr + : art::Thread::Current()->GetJniEnv()->AddLocalReference<jobject>(out_.Read()); + } + private: - art::Thread* result_thread_; - jobject* out_; + art::GcRoot<art::mirror::Object> out_; }; - GetContendedMonitorClosure closure(self, monitor); - // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. - if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(target, &closure)) { + art::ScopedAssertNoThreadSuspension sants("Performing GetCurrentContendedMonitor"); + GetContendedMonitorClosure closure; + // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. We + // need to avoid suspending as we wait for the checkpoint to occur since we are (potentially) + // transfering a GcRoot across threads. + if (!target->RequestSynchronousCheckpoint(&closure, art::ThreadState::kRunnable)) { return ERR(THREAD_NOT_ALIVE); } + *monitor = closure.GetResult(); return OK; } diff --git a/openjdkjvmti/ti_stack.cc b/openjdkjvmti/ti_stack.cc index 373944f179..41a649b5e3 100644 --- a/openjdkjvmti/ti_stack.cc +++ b/openjdkjvmti/ti_stack.cc @@ -258,7 +258,7 @@ jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED, static_cast<size_t>(start_depth), static_cast<size_t>(max_frame_count)); // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. - if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(thread, &closure)) { + if (!thread->RequestSynchronousCheckpoint(&closure)) { return ERR(THREAD_NOT_ALIVE); } *count_ptr = static_cast<jint>(closure.index); @@ -269,7 +269,7 @@ jvmtiError StackUtil::GetStackTrace(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED, } else { GetStackTraceVectorClosure closure(0, 0); // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. - if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(thread, &closure)) { + if (!thread->RequestSynchronousCheckpoint(&closure)) { return ERR(THREAD_NOT_ALIVE); } @@ -484,7 +484,7 @@ jvmtiError StackUtil::GetThreadListStackTraces(jvmtiEnv* env, *stack_info_ptr = nullptr; return ERR(NONE); } - if (stack_info_ptr == nullptr || stack_info_ptr == nullptr) { + if (thread_list == nullptr || stack_info_ptr == nullptr) { return ERR(NULL_POINTER); } @@ -713,7 +713,7 @@ jvmtiError StackUtil::GetFrameCount(jvmtiEnv* env ATTRIBUTE_UNUSED, GetFrameCountClosure closure; // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. - if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(thread, &closure)) { + if (!thread->RequestSynchronousCheckpoint(&closure)) { return ERR(THREAD_NOT_ALIVE); } @@ -803,7 +803,7 @@ jvmtiError StackUtil::GetFrameLocation(jvmtiEnv* env ATTRIBUTE_UNUSED, GetLocationClosure closure(static_cast<size_t>(depth)); // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. - if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(thread, &closure)) { + if (!thread->RequestSynchronousCheckpoint(&closure)) { return ERR(THREAD_NOT_ALIVE); } @@ -882,8 +882,8 @@ struct MonitorVisitor : public art::StackVisitor, public art::SingleRootVisitor template<typename Fn> struct MonitorInfoClosure : public art::Closure { public: - MonitorInfoClosure(art::ScopedObjectAccess& soa, Fn handle_results) - : soa_(soa), err_(OK), handle_results_(handle_results) {} + explicit MonitorInfoClosure(Fn handle_results) + : err_(OK), handle_results_(handle_results) {} void Run(art::Thread* target) OVERRIDE REQUIRES_SHARED(art::Locks::mutator_lock_) { art::Locks::mutator_lock_->AssertSharedHeld(art::Thread::Current()); @@ -893,7 +893,7 @@ struct MonitorInfoClosure : public art::Closure { // Find any other monitors, including ones acquired in native code. art::RootInfo root_info(art::kRootVMInternal); target->GetJniEnv()->VisitMonitorRoots(&visitor, root_info); - err_ = handle_results_(soa_, visitor); + err_ = handle_results_(visitor); } jvmtiError GetError() { @@ -901,17 +901,18 @@ struct MonitorInfoClosure : public art::Closure { } private: - art::ScopedObjectAccess& soa_; jvmtiError err_; Fn handle_results_; }; template <typename Fn> -static jvmtiError GetOwnedMonitorInfoCommon(jthread thread, Fn handle_results) { +static jvmtiError GetOwnedMonitorInfoCommon(const art::ScopedObjectAccessAlreadyRunnable& soa, + jthread thread, + Fn handle_results) + REQUIRES_SHARED(art::Locks::mutator_lock_) { art::Thread* self = art::Thread::Current(); - art::ScopedObjectAccess soa(self); - MonitorInfoClosure<Fn> closure(soa, handle_results); + MonitorInfoClosure<Fn> closure(handle_results); bool called_method = false; { art::Locks::thread_list_lock_->ExclusiveLock(self); @@ -924,7 +925,7 @@ static jvmtiError GetOwnedMonitorInfoCommon(jthread thread, Fn handle_results) { if (target != self) { called_method = true; // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. - if (!ThreadUtil::RequestGCSafeSynchronousCheckpoint(target, &closure)) { + if (!target->RequestSynchronousCheckpoint(&closure)) { return ERR(THREAD_NOT_ALIVE); } } else { @@ -948,47 +949,64 @@ jvmtiError StackUtil::GetOwnedMonitorStackDepthInfo(jvmtiEnv* env, if (info_cnt == nullptr || info_ptr == nullptr) { return ERR(NULL_POINTER); } - auto handle_fun = [&] (art::ScopedObjectAccess& soa, MonitorVisitor& visitor) - REQUIRES_SHARED(art::Locks::mutator_lock_) { - auto nbytes = sizeof(jvmtiMonitorStackDepthInfo) * visitor.monitors.size(); - jvmtiError err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(info_ptr)); - if (err != OK) { - return err; - } - *info_cnt = visitor.monitors.size(); + art::ScopedObjectAccess soa(art::Thread::Current()); + std::vector<art::GcRoot<art::mirror::Object>> mons; + std::vector<uint32_t> depths; + auto handle_fun = [&] (MonitorVisitor& visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) { for (size_t i = 0; i < visitor.monitors.size(); i++) { - (*info_ptr)[i] = { - soa.Env()->AddLocalReference<jobject>(visitor.monitors[i].Get()), - visitor.stack_depths[i] - }; + mons.push_back(art::GcRoot<art::mirror::Object>(visitor.monitors[i].Get())); + depths.push_back(visitor.stack_depths[i]); } return OK; }; - return GetOwnedMonitorInfoCommon(thread, handle_fun); + jvmtiError err = GetOwnedMonitorInfoCommon(soa, thread, handle_fun); + if (err != OK) { + return err; + } + auto nbytes = sizeof(jvmtiMonitorStackDepthInfo) * mons.size(); + err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(info_ptr)); + if (err != OK) { + return err; + } + *info_cnt = mons.size(); + for (uint32_t i = 0; i < mons.size(); i++) { + (*info_ptr)[i] = { + soa.AddLocalReference<jobject>(mons[i].Read()), + static_cast<jint>(depths[i]) + }; + } + return err; } jvmtiError StackUtil::GetOwnedMonitorInfo(jvmtiEnv* env, jthread thread, jint* owned_monitor_count_ptr, jobject** owned_monitors_ptr) { - if (owned_monitors_ptr == nullptr || owned_monitors_ptr == nullptr) { + if (owned_monitor_count_ptr == nullptr || owned_monitors_ptr == nullptr) { return ERR(NULL_POINTER); } - auto handle_fun = [&] (art::ScopedObjectAccess& soa, MonitorVisitor& visitor) - REQUIRES_SHARED(art::Locks::mutator_lock_) { - auto nbytes = sizeof(jobject) * visitor.monitors.size(); - jvmtiError err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(owned_monitors_ptr)); - if (err != OK) { - return err; - } - *owned_monitor_count_ptr = visitor.monitors.size(); + art::ScopedObjectAccess soa(art::Thread::Current()); + std::vector<art::GcRoot<art::mirror::Object>> mons; + auto handle_fun = [&] (MonitorVisitor& visitor) REQUIRES_SHARED(art::Locks::mutator_lock_) { for (size_t i = 0; i < visitor.monitors.size(); i++) { - (*owned_monitors_ptr)[i] = - soa.Env()->AddLocalReference<jobject>(visitor.monitors[i].Get()); + mons.push_back(art::GcRoot<art::mirror::Object>(visitor.monitors[i].Get())); } return OK; }; - return GetOwnedMonitorInfoCommon(thread, handle_fun); + jvmtiError err = GetOwnedMonitorInfoCommon(soa, thread, handle_fun); + if (err != OK) { + return err; + } + auto nbytes = sizeof(jobject) * mons.size(); + err = env->Allocate(nbytes, reinterpret_cast<unsigned char**>(owned_monitors_ptr)); + if (err != OK) { + return err; + } + *owned_monitor_count_ptr = mons.size(); + for (uint32_t i = 0; i < mons.size(); i++) { + (*owned_monitors_ptr)[i] = soa.AddLocalReference<jobject>(mons[i].Read()); + } + return err; } jvmtiError StackUtil::NotifyFramePop(jvmtiEnv* env, jthread thread, jint depth) { diff --git a/openjdkjvmti/ti_thread.cc b/openjdkjvmti/ti_thread.cc index 555c5a725b..414139c7b4 100644 --- a/openjdkjvmti/ti_thread.cc +++ b/openjdkjvmti/ti_thread.cc @@ -1077,7 +1077,7 @@ jvmtiError ThreadUtil::StopThread(jvmtiEnv* env ATTRIBUTE_UNUSED, }; StopThreadClosure c(exc); // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. - if (RequestGCSafeSynchronousCheckpoint(target, &c)) { + if (target->RequestSynchronousCheckpoint(&c)) { return OK; } else { // Something went wrong, probably the thread died. @@ -1100,29 +1100,4 @@ jvmtiError ThreadUtil::InterruptThread(jvmtiEnv* env ATTRIBUTE_UNUSED, jthread t return OK; } -class GcCriticalSectionClosure : public art::Closure { - public: - explicit GcCriticalSectionClosure(art::Closure* wrapped) : wrapped_(wrapped) {} - - void Run(art::Thread* self) OVERRIDE { - if (art::kIsDebugBuild) { - art::Locks::thread_list_lock_->AssertNotHeld(art::Thread::Current()); - } - // This might block as it waits for any in-progress GCs to finish but this is fine since we - // released the Thread-list-lock prior to calling this in RequestSynchronousCheckpoint. - art::gc::ScopedGCCriticalSection sgccs(art::Thread::Current(), - art::gc::kGcCauseDebugger, - art::gc::kCollectorTypeDebugger); - wrapped_->Run(self); - } - - private: - art::Closure* wrapped_; -}; - -bool ThreadUtil::RequestGCSafeSynchronousCheckpoint(art::Thread* thr, art::Closure* function) { - GcCriticalSectionClosure gccsc(function); - return thr->RequestSynchronousCheckpoint(&gccsc); -} - } // namespace openjdkjvmti diff --git a/openjdkjvmti/ti_thread.h b/openjdkjvmti/ti_thread.h index 341bffe51e..c6b6af1035 100644 --- a/openjdkjvmti/ti_thread.h +++ b/openjdkjvmti/ti_thread.h @@ -134,16 +134,6 @@ class ThreadUtil { REQUIRES(!art::Locks::user_code_suspension_lock_, !art::Locks::thread_suspend_count_lock_); - // This will request a synchronous checkpoint in such a way as to prevent gc races if a local - // variable is taken from one thread's stack and placed in the stack of another thread. - // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. This is - // due to the fact that Thread::Current() needs to go to sleep to allow the targeted thread to - // execute the checkpoint for us if it is Runnable. - static bool RequestGCSafeSynchronousCheckpoint(art::Thread* thr, art::Closure* function) - REQUIRES_SHARED(art::Locks::mutator_lock_) - RELEASE(art::Locks::thread_list_lock_) - REQUIRES(!art::Locks::thread_suspend_count_lock_); - private: // We need to make sure only one thread tries to suspend threads at a time so we can get the // 'suspend-only-once' behavior the spec requires. Internally, ART considers suspension to be a diff --git a/runtime/barrier.cc b/runtime/barrier.cc index 4329a5a245..8d3cf450f3 100644 --- a/runtime/barrier.cc +++ b/runtime/barrier.cc @@ -31,6 +31,9 @@ Barrier::Barrier(int count) condition_("GC barrier condition", lock_) { } +template void Barrier::Increment<Barrier::kAllowHoldingLocks>(Thread* self, int delta); +template void Barrier::Increment<Barrier::kDisallowHoldingLocks>(Thread* self, int delta); + void Barrier::Pass(Thread* self) { MutexLock mu(self, lock_); SetCountLocked(self, count_ - 1); @@ -45,6 +48,7 @@ void Barrier::Init(Thread* self, int count) { SetCountLocked(self, count); } +template <Barrier::LockHandling locks> void Barrier::Increment(Thread* self, int delta) { MutexLock mu(self, lock_); SetCountLocked(self, count_ + delta); @@ -57,7 +61,11 @@ void Barrier::Increment(Thread* self, int delta) { // be decremented to zero and a Broadcast will be made on the // condition variable, thus waking this up. while (count_ != 0) { - condition_.Wait(self); + if (locks == kAllowHoldingLocks) { + condition_.WaitHoldingLocks(self); + } else { + condition_.Wait(self); + } } } diff --git a/runtime/barrier.h b/runtime/barrier.h index d7c4661b99..8a38c4c310 100644 --- a/runtime/barrier.h +++ b/runtime/barrier.h @@ -35,6 +35,11 @@ namespace art { // TODO: Maybe give this a better name. class Barrier { public: + enum LockHandling { + kAllowHoldingLocks, + kDisallowHoldingLocks, + }; + explicit Barrier(int count); virtual ~Barrier(); @@ -50,7 +55,9 @@ class Barrier { // If these calls are made in that situation, the offending thread is likely to go back // to sleep, resulting in a deadlock. - // Increment the count by delta, wait on condition if count is non zero. + // Increment the count by delta, wait on condition if count is non zero. If LockHandling is + // kAllowHoldingLocks we will not check that all locks are released when waiting. + template <Barrier::LockHandling locks = kDisallowHoldingLocks> void Increment(Thread* self, int delta) REQUIRES(!lock_); // Increment the count by delta, wait on condition if count is non zero, with a timeout. Returns diff --git a/runtime/barrier_test.cc b/runtime/barrier_test.cc index 04bb6bab1e..88075ba368 100644 --- a/runtime/barrier_test.cc +++ b/runtime/barrier_test.cc @@ -69,18 +69,18 @@ TEST_F(BarrierTest, CheckWait) { thread_pool.AddTask(self, new CheckWaitTask(&barrier, &count1, &count2)); } thread_pool.StartWorkers(self); - while (count1.LoadRelaxed() != num_threads) { + while (count1.load(std::memory_order_relaxed) != num_threads) { timeout_barrier.Increment(self, 1, 100); // sleep 100 msecs } // Count 2 should still be zero since no thread should have gone past the barrier. - EXPECT_EQ(0, count2.LoadRelaxed()); + EXPECT_EQ(0, count2.load(std::memory_order_relaxed)); // Perform one additional Wait(), allowing pool threads to proceed. barrier.Wait(self); // Wait for all the threads to finish. thread_pool.Wait(self, true, false); // Both counts should be equal to num_threads now. - EXPECT_EQ(count1.LoadRelaxed(), num_threads); - EXPECT_EQ(count2.LoadRelaxed(), num_threads); + EXPECT_EQ(count1.load(std::memory_order_relaxed), num_threads); + EXPECT_EQ(count2.load(std::memory_order_relaxed), num_threads); timeout_barrier.Init(self, 0); // Reset to zero for destruction. } @@ -124,7 +124,7 @@ TEST_F(BarrierTest, CheckPass) { // Wait for all the tasks to complete using the barrier. barrier.Increment(self, expected_total_tasks); // The total number of completed tasks should be equal to expected_total_tasks. - EXPECT_EQ(count.LoadRelaxed(), expected_total_tasks); + EXPECT_EQ(count.load(std::memory_order_relaxed), expected_total_tasks); } } // namespace art diff --git a/runtime/base/mutex-inl.h b/runtime/base/mutex-inl.h index d6dbab4606..dfa14b91f0 100644 --- a/runtime/base/mutex-inl.h +++ b/runtime/base/mutex-inl.h @@ -161,7 +161,7 @@ inline void ReaderWriterMutex::SharedLock(Thread* self) { #if ART_USE_FUTEXES bool done = false; do { - int32_t cur_state = state_.LoadRelaxed(); + int32_t cur_state = state_.load(std::memory_order_relaxed); if (LIKELY(cur_state >= 0)) { // Add as an extra reader. done = state_.CompareAndSetWeakAcquire(cur_state, cur_state + 1); @@ -185,7 +185,7 @@ inline void ReaderWriterMutex::SharedUnlock(Thread* self) { #if ART_USE_FUTEXES bool done = false; do { - int32_t cur_state = state_.LoadRelaxed(); + int32_t cur_state = state_.load(std::memory_order_relaxed); if (LIKELY(cur_state > 0)) { // Reduce state by 1 and impose lock release load/store ordering. // Note, the relaxed loads below musn't reorder before the CompareAndSet. @@ -193,8 +193,8 @@ inline void ReaderWriterMutex::SharedUnlock(Thread* self) { // a status bit into the state on contention. done = state_.CompareAndSetWeakSequentiallyConsistent(cur_state, cur_state - 1); if (done && (cur_state - 1) == 0) { // Weak CAS may fail spuriously. - if (num_pending_writers_.LoadRelaxed() > 0 || - num_pending_readers_.LoadRelaxed() > 0) { + if (num_pending_writers_.load(std::memory_order_relaxed) > 0 || + num_pending_readers_.load(std::memory_order_relaxed) > 0) { // Wake any exclusive waiters as there are now no readers. futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0); } @@ -221,7 +221,7 @@ inline bool Mutex::IsExclusiveHeld(const Thread* self) const { } inline pid_t Mutex::GetExclusiveOwnerTid() const { - return exclusive_owner_.LoadRelaxed(); + return exclusive_owner_.load(std::memory_order_relaxed); } inline void Mutex::AssertExclusiveHeld(const Thread* self) const { @@ -248,16 +248,16 @@ inline bool ReaderWriterMutex::IsExclusiveHeld(const Thread* self) const { inline pid_t ReaderWriterMutex::GetExclusiveOwnerTid() const { #if ART_USE_FUTEXES - int32_t state = state_.LoadRelaxed(); + int32_t state = state_.load(std::memory_order_relaxed); if (state == 0) { return 0; // No owner. } else if (state > 0) { return -1; // Shared. } else { - return exclusive_owner_.LoadRelaxed(); + return exclusive_owner_.load(std::memory_order_relaxed); } #else - return exclusive_owner_.LoadRelaxed(); + return exclusive_owner_.load(std::memory_order_relaxed); #endif } diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc index a1f30b6794..73b464119e 100644 --- a/runtime/base/mutex.cc +++ b/runtime/base/mutex.cc @@ -128,15 +128,15 @@ class ScopedAllMutexesLock FINAL { public: explicit ScopedAllMutexesLock(const BaseMutex* mutex) : mutex_(mutex) { for (uint32_t i = 0; - !gAllMutexData->all_mutexes_guard.CompareAndSetWeakAcquire(0, mutex); + !gAllMutexData->all_mutexes_guard.CompareAndSetWeakAcquire(nullptr, mutex); ++i) { BackOff(i); } } ~ScopedAllMutexesLock() { - DCHECK_EQ(gAllMutexData->all_mutexes_guard.LoadRelaxed(), mutex_); - gAllMutexData->all_mutexes_guard.StoreRelease(0); + DCHECK_EQ(gAllMutexData->all_mutexes_guard.load(std::memory_order_relaxed), mutex_); + gAllMutexData->all_mutexes_guard.store(nullptr, std::memory_order_release); } private: @@ -147,15 +147,17 @@ class Locks::ScopedExpectedMutexesOnWeakRefAccessLock FINAL { public: explicit ScopedExpectedMutexesOnWeakRefAccessLock(const BaseMutex* mutex) : mutex_(mutex) { for (uint32_t i = 0; - !Locks::expected_mutexes_on_weak_ref_access_guard_.CompareAndSetWeakAcquire(0, mutex); + !Locks::expected_mutexes_on_weak_ref_access_guard_.CompareAndSetWeakAcquire(nullptr, + mutex); ++i) { BackOff(i); } } ~ScopedExpectedMutexesOnWeakRefAccessLock() { - DCHECK_EQ(Locks::expected_mutexes_on_weak_ref_access_guard_.LoadRelaxed(), mutex_); - Locks::expected_mutexes_on_weak_ref_access_guard_.StoreRelease(0); + DCHECK_EQ(Locks::expected_mutexes_on_weak_ref_access_guard_.load(std::memory_order_relaxed), + mutex_); + Locks::expected_mutexes_on_weak_ref_access_guard_.store(nullptr, std::memory_order_release); } private: @@ -293,7 +295,7 @@ void BaseMutex::CheckSafeToWait(Thread* self) { void BaseMutex::ContentionLogData::AddToWaitTime(uint64_t value) { if (kLogLockContentions) { // Atomically add value to wait_time. - wait_time.FetchAndAddSequentiallyConsistent(value); + wait_time.fetch_add(value, std::memory_order_seq_cst); } } @@ -306,19 +308,19 @@ void BaseMutex::RecordContention(uint64_t blocked_tid, data->AddToWaitTime(nano_time_blocked); ContentionLogEntry* log = data->contention_log; // This code is intentionally racy as it is only used for diagnostics. - uint32_t slot = data->cur_content_log_entry.LoadRelaxed(); + int32_t slot = data->cur_content_log_entry.load(std::memory_order_relaxed); if (log[slot].blocked_tid == blocked_tid && log[slot].owner_tid == blocked_tid) { ++log[slot].count; } else { uint32_t new_slot; do { - slot = data->cur_content_log_entry.LoadRelaxed(); + slot = data->cur_content_log_entry.load(std::memory_order_relaxed); new_slot = (slot + 1) % kContentionLogSize; } while (!data->cur_content_log_entry.CompareAndSetWeakRelaxed(slot, new_slot)); log[new_slot].blocked_tid = blocked_tid; log[new_slot].owner_tid = owner_tid; - log[new_slot].count.StoreRelaxed(1); + log[new_slot].count.store(1, std::memory_order_relaxed); } } } @@ -327,8 +329,8 @@ void BaseMutex::DumpContention(std::ostream& os) const { if (kLogLockContentions) { const ContentionLogData* data = contention_log_data_; const ContentionLogEntry* log = data->contention_log; - uint64_t wait_time = data->wait_time.LoadRelaxed(); - uint32_t contention_count = data->contention_count.LoadRelaxed(); + uint64_t wait_time = data->wait_time.load(std::memory_order_relaxed); + uint32_t contention_count = data->contention_count.load(std::memory_order_relaxed); if (contention_count == 0) { os << "never contended"; } else { @@ -340,7 +342,7 @@ void BaseMutex::DumpContention(std::ostream& os) const { for (size_t i = 0; i < kContentionLogSize; ++i) { uint64_t blocked_tid = log[i].blocked_tid; uint64_t owner_tid = log[i].owner_tid; - uint32_t count = log[i].count.LoadRelaxed(); + uint32_t count = log[i].count.load(std::memory_order_relaxed); if (count > 0) { auto it = most_common_blocked.find(blocked_tid); if (it != most_common_blocked.end()) { @@ -386,8 +388,8 @@ void BaseMutex::DumpContention(std::ostream& os) const { Mutex::Mutex(const char* name, LockLevel level, bool recursive) : BaseMutex(name, level), exclusive_owner_(0), recursive_(recursive), recursion_count_(0) { #if ART_USE_FUTEXES - DCHECK_EQ(0, state_.LoadRelaxed()); - DCHECK_EQ(0, num_contenders_.LoadRelaxed()); + DCHECK_EQ(0, state_.load(std::memory_order_relaxed)); + DCHECK_EQ(0, num_contenders_.load(std::memory_order_relaxed)); #else CHECK_MUTEX_CALL(pthread_mutex_init, (&mutex_, nullptr)); #endif @@ -402,7 +404,7 @@ static bool IsSafeToCallAbortSafe() { Mutex::~Mutex() { bool safe_to_call_abort = Locks::IsSafeToCallAbortRacy(); #if ART_USE_FUTEXES - if (state_.LoadRelaxed() != 0) { + if (state_.load(std::memory_order_relaxed) != 0) { LOG(safe_to_call_abort ? FATAL : WARNING) << "destroying mutex with owner: " << GetExclusiveOwnerTid(); } else { @@ -410,7 +412,7 @@ Mutex::~Mutex() { LOG(safe_to_call_abort ? FATAL : WARNING) << "unexpectedly found an owner on unlocked mutex " << name_; } - if (num_contenders_.LoadSequentiallyConsistent() != 0) { + if (num_contenders_.load(std::memory_order_seq_cst) != 0) { LOG(safe_to_call_abort ? FATAL : WARNING) << "unexpectedly found a contender on mutex " << name_; } @@ -436,7 +438,7 @@ void Mutex::ExclusiveLock(Thread* self) { #if ART_USE_FUTEXES bool done = false; do { - int32_t cur_state = state_.LoadRelaxed(); + int32_t cur_state = state_.load(std::memory_order_relaxed); if (LIKELY(cur_state == 0)) { // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition. done = state_.CompareAndSetWeakAcquire(0 /* cur_state */, 1 /* new state */); @@ -457,12 +459,12 @@ void Mutex::ExclusiveLock(Thread* self) { num_contenders_--; } } while (!done); - DCHECK_EQ(state_.LoadRelaxed(), 1); + DCHECK_EQ(state_.load(std::memory_order_relaxed), 1); #else CHECK_MUTEX_CALL(pthread_mutex_lock, (&mutex_)); #endif DCHECK_EQ(GetExclusiveOwnerTid(), 0); - exclusive_owner_.StoreRelaxed(SafeGetTid(self)); + exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed); RegisterAsLocked(self); } recursion_count_++; @@ -482,7 +484,7 @@ bool Mutex::ExclusiveTryLock(Thread* self) { #if ART_USE_FUTEXES bool done = false; do { - int32_t cur_state = state_.LoadRelaxed(); + int32_t cur_state = state_.load(std::memory_order_relaxed); if (cur_state == 0) { // Change state from 0 to 1 and impose load/store ordering appropriate for lock acquisition. done = state_.CompareAndSetWeakAcquire(0 /* cur_state */, 1 /* new state */); @@ -490,7 +492,7 @@ bool Mutex::ExclusiveTryLock(Thread* self) { return false; } } while (!done); - DCHECK_EQ(state_.LoadRelaxed(), 1); + DCHECK_EQ(state_.load(std::memory_order_relaxed), 1); #else int result = pthread_mutex_trylock(&mutex_); if (result == EBUSY) { @@ -502,7 +504,7 @@ bool Mutex::ExclusiveTryLock(Thread* self) { } #endif DCHECK_EQ(GetExclusiveOwnerTid(), 0); - exclusive_owner_.StoreRelaxed(SafeGetTid(self)); + exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed); RegisterAsLocked(self); } recursion_count_++; @@ -539,10 +541,10 @@ void Mutex::ExclusiveUnlock(Thread* self) { #if ART_USE_FUTEXES bool done = false; do { - int32_t cur_state = state_.LoadRelaxed(); + int32_t cur_state = state_.load(std::memory_order_relaxed); if (LIKELY(cur_state == 1)) { // We're no longer the owner. - exclusive_owner_.StoreRelaxed(0); + exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed); // Change state to 0 and impose load/store ordering appropriate for lock release. // Note, the relaxed loads below mustn't reorder before the CompareAndSet. // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing @@ -550,7 +552,7 @@ void Mutex::ExclusiveUnlock(Thread* self) { done = state_.CompareAndSetWeakSequentiallyConsistent(cur_state, 0 /* new state */); if (LIKELY(done)) { // Spurious fail? // Wake a contender. - if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) { + if (UNLIKELY(num_contenders_.load(std::memory_order_relaxed) > 0)) { futex(state_.Address(), FUTEX_WAKE, 1, nullptr, nullptr, 0); } } @@ -569,7 +571,7 @@ void Mutex::ExclusiveUnlock(Thread* self) { } } while (!done); #else - exclusive_owner_.StoreRelaxed(0); + exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed); CHECK_MUTEX_CALL(pthread_mutex_unlock, (&mutex_)); #endif } @@ -593,7 +595,7 @@ void Mutex::WakeupToRespondToEmptyCheckpoint() { #if ART_USE_FUTEXES // Wake up all the waiters so they will respond to the emtpy checkpoint. DCHECK(should_respond_to_empty_checkpoint_request_); - if (UNLIKELY(num_contenders_.LoadRelaxed() > 0)) { + if (UNLIKELY(num_contenders_.load(std::memory_order_relaxed) > 0)) { futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0); } #else @@ -610,15 +612,15 @@ ReaderWriterMutex::ReaderWriterMutex(const char* name, LockLevel level) #if !ART_USE_FUTEXES CHECK_MUTEX_CALL(pthread_rwlock_init, (&rwlock_, nullptr)); #endif - exclusive_owner_.StoreRelaxed(0); + exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed); } ReaderWriterMutex::~ReaderWriterMutex() { #if ART_USE_FUTEXES - CHECK_EQ(state_.LoadRelaxed(), 0); + CHECK_EQ(state_.load(std::memory_order_relaxed), 0); CHECK_EQ(GetExclusiveOwnerTid(), 0); - CHECK_EQ(num_pending_readers_.LoadRelaxed(), 0); - CHECK_EQ(num_pending_writers_.LoadRelaxed(), 0); + CHECK_EQ(num_pending_readers_.load(std::memory_order_relaxed), 0); + CHECK_EQ(num_pending_writers_.load(std::memory_order_relaxed), 0); #else // We can't use CHECK_MUTEX_CALL here because on shutdown a suspended daemon thread // may still be using locks. @@ -637,7 +639,7 @@ void ReaderWriterMutex::ExclusiveLock(Thread* self) { #if ART_USE_FUTEXES bool done = false; do { - int32_t cur_state = state_.LoadRelaxed(); + int32_t cur_state = state_.load(std::memory_order_relaxed); if (LIKELY(cur_state == 0)) { // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition. done = state_.CompareAndSetWeakAcquire(0 /* cur_state*/, -1 /* new state */); @@ -658,12 +660,12 @@ void ReaderWriterMutex::ExclusiveLock(Thread* self) { --num_pending_writers_; } } while (!done); - DCHECK_EQ(state_.LoadRelaxed(), -1); + DCHECK_EQ(state_.load(std::memory_order_relaxed), -1); #else CHECK_MUTEX_CALL(pthread_rwlock_wrlock, (&rwlock_)); #endif DCHECK_EQ(GetExclusiveOwnerTid(), 0); - exclusive_owner_.StoreRelaxed(SafeGetTid(self)); + exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed); RegisterAsLocked(self); AssertExclusiveHeld(self); } @@ -676,10 +678,10 @@ void ReaderWriterMutex::ExclusiveUnlock(Thread* self) { #if ART_USE_FUTEXES bool done = false; do { - int32_t cur_state = state_.LoadRelaxed(); + int32_t cur_state = state_.load(std::memory_order_relaxed); if (LIKELY(cur_state == -1)) { // We're no longer the owner. - exclusive_owner_.StoreRelaxed(0); + exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed); // Change state from -1 to 0 and impose load/store ordering appropriate for lock release. // Note, the relaxed loads below musn't reorder before the CompareAndSet. // TODO: the ordering here is non-trivial as state is split across 3 fields, fix by placing @@ -687,8 +689,8 @@ void ReaderWriterMutex::ExclusiveUnlock(Thread* self) { done = state_.CompareAndSetWeakSequentiallyConsistent(-1 /* cur_state*/, 0 /* new state */); if (LIKELY(done)) { // Weak CAS may fail spuriously. // Wake any waiters. - if (UNLIKELY(num_pending_readers_.LoadRelaxed() > 0 || - num_pending_writers_.LoadRelaxed() > 0)) { + if (UNLIKELY(num_pending_readers_.load(std::memory_order_relaxed) > 0 || + num_pending_writers_.load(std::memory_order_relaxed) > 0)) { futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0); } } @@ -697,7 +699,7 @@ void ReaderWriterMutex::ExclusiveUnlock(Thread* self) { } } while (!done); #else - exclusive_owner_.StoreRelaxed(0); + exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed); CHECK_MUTEX_CALL(pthread_rwlock_unlock, (&rwlock_)); #endif } @@ -710,7 +712,7 @@ bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32 timespec end_abs_ts; InitTimeSpec(true, CLOCK_MONOTONIC, ms, ns, &end_abs_ts); do { - int32_t cur_state = state_.LoadRelaxed(); + int32_t cur_state = state_.load(std::memory_order_relaxed); if (cur_state == 0) { // Change state from 0 to -1 and impose load/store ordering appropriate for lock acquisition. done = state_.CompareAndSetWeakAcquire(0 /* cur_state */, -1 /* new state */); @@ -753,7 +755,7 @@ bool ReaderWriterMutex::ExclusiveLockWithTimeout(Thread* self, int64_t ms, int32 PLOG(FATAL) << "pthread_rwlock_timedwrlock failed for " << name_; } #endif - exclusive_owner_.StoreRelaxed(SafeGetTid(self)); + exclusive_owner_.store(SafeGetTid(self), std::memory_order_relaxed); RegisterAsLocked(self); AssertSharedHeld(self); return true; @@ -782,7 +784,7 @@ bool ReaderWriterMutex::SharedTryLock(Thread* self) { #if ART_USE_FUTEXES bool done = false; do { - int32_t cur_state = state_.LoadRelaxed(); + int32_t cur_state = state_.load(std::memory_order_relaxed); if (cur_state >= 0) { // Add as an extra reader and impose load/store ordering appropriate for lock acquisition. done = state_.CompareAndSetWeakAcquire(cur_state, cur_state + 1); @@ -822,9 +824,9 @@ void ReaderWriterMutex::Dump(std::ostream& os) const { << " level=" << static_cast<int>(level_) << " owner=" << GetExclusiveOwnerTid() #if ART_USE_FUTEXES - << " state=" << state_.LoadSequentiallyConsistent() - << " num_pending_writers=" << num_pending_writers_.LoadSequentiallyConsistent() - << " num_pending_readers=" << num_pending_readers_.LoadSequentiallyConsistent() + << " state=" << state_.load(std::memory_order_seq_cst) + << " num_pending_writers=" << num_pending_writers_.load(std::memory_order_seq_cst) + << " num_pending_readers=" << num_pending_readers_.load(std::memory_order_seq_cst) #endif << " "; DumpContention(os); @@ -844,8 +846,8 @@ void ReaderWriterMutex::WakeupToRespondToEmptyCheckpoint() { #if ART_USE_FUTEXES // Wake up all the waiters so they will respond to the emtpy checkpoint. DCHECK(should_respond_to_empty_checkpoint_request_); - if (UNLIKELY(num_pending_readers_.LoadRelaxed() > 0 || - num_pending_writers_.LoadRelaxed() > 0)) { + if (UNLIKELY(num_pending_readers_.load(std::memory_order_relaxed) > 0 || + num_pending_writers_.load(std::memory_order_relaxed) > 0)) { futex(state_.Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0); } #else @@ -856,7 +858,7 @@ void ReaderWriterMutex::WakeupToRespondToEmptyCheckpoint() { ConditionVariable::ConditionVariable(const char* name, Mutex& guard) : name_(name), guard_(guard) { #if ART_USE_FUTEXES - DCHECK_EQ(0, sequence_.LoadRelaxed()); + DCHECK_EQ(0, sequence_.load(std::memory_order_relaxed)); num_waiters_ = 0; #else pthread_condattr_t cond_attrs; @@ -899,7 +901,7 @@ void ConditionVariable::Broadcast(Thread* self) { sequence_++; // Indicate the broadcast occurred. bool done = false; do { - int32_t cur_sequence = sequence_.LoadRelaxed(); + int32_t cur_sequence = sequence_.load(std::memory_order_relaxed); // Requeue waiters onto mutex. The waiter holds the contender count on the mutex high ensuring // mutex unlocks will awaken the requeued waiter thread. done = futex(sequence_.Address(), FUTEX_CMP_REQUEUE, 0, @@ -948,7 +950,7 @@ void ConditionVariable::WaitHoldingLocks(Thread* self) { // Ensure the Mutex is contended so that requeued threads are awoken. guard_.num_contenders_++; guard_.recursion_count_ = 1; - int32_t cur_sequence = sequence_.LoadRelaxed(); + int32_t cur_sequence = sequence_.load(std::memory_order_relaxed); guard_.ExclusiveUnlock(self); if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, nullptr, nullptr, 0) != 0) { // Futex failed, check it is an expected error. @@ -974,14 +976,14 @@ void ConditionVariable::WaitHoldingLocks(Thread* self) { CHECK_GE(num_waiters_, 0); num_waiters_--; // We awoke and so no longer require awakes from the guard_'s unlock. - CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0); + CHECK_GE(guard_.num_contenders_.load(std::memory_order_relaxed), 0); guard_.num_contenders_--; #else pid_t old_owner = guard_.GetExclusiveOwnerTid(); - guard_.exclusive_owner_.StoreRelaxed(0); + guard_.exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed); guard_.recursion_count_ = 0; CHECK_MUTEX_CALL(pthread_cond_wait, (&cond_, &guard_.mutex_)); - guard_.exclusive_owner_.StoreRelaxed(old_owner); + guard_.exclusive_owner_.store(old_owner, std::memory_order_relaxed); #endif guard_.recursion_count_ = old_recursion_count; } @@ -999,7 +1001,7 @@ bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) { // Ensure the Mutex is contended so that requeued threads are awoken. guard_.num_contenders_++; guard_.recursion_count_ = 1; - int32_t cur_sequence = sequence_.LoadRelaxed(); + int32_t cur_sequence = sequence_.load(std::memory_order_relaxed); guard_.ExclusiveUnlock(self); if (futex(sequence_.Address(), FUTEX_WAIT, cur_sequence, &rel_ts, nullptr, 0) != 0) { if (errno == ETIMEDOUT) { @@ -1015,7 +1017,7 @@ bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) { CHECK_GE(num_waiters_, 0); num_waiters_--; // We awoke and so no longer require awakes from the guard_'s unlock. - CHECK_GE(guard_.num_contenders_.LoadRelaxed(), 0); + CHECK_GE(guard_.num_contenders_.load(std::memory_order_relaxed), 0); guard_.num_contenders_--; #else #if !defined(__APPLE__) @@ -1024,7 +1026,7 @@ bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) { int clock = CLOCK_REALTIME; #endif pid_t old_owner = guard_.GetExclusiveOwnerTid(); - guard_.exclusive_owner_.StoreRelaxed(0); + guard_.exclusive_owner_.store(0 /* pid */, std::memory_order_relaxed); guard_.recursion_count_ = 0; timespec ts; InitTimeSpec(true, clock, ms, ns, &ts); @@ -1035,7 +1037,7 @@ bool ConditionVariable::TimedWait(Thread* self, int64_t ms, int32_t ns) { errno = rc; PLOG(FATAL) << "TimedWait failed for " << name_; } - guard_.exclusive_owner_.StoreRelaxed(old_owner); + guard_.exclusive_owner_.store(old_owner, std::memory_order_relaxed); #endif guard_.recursion_count_ = old_recursion_count; return timed_out; @@ -1254,12 +1256,13 @@ void Locks::InitConditions() { } void Locks::SetClientCallback(ClientCallback* safe_to_call_abort_cb) { - safe_to_call_abort_callback.StoreRelease(safe_to_call_abort_cb); + safe_to_call_abort_callback.store(safe_to_call_abort_cb, std::memory_order_release); } // Helper to allow checking shutdown while ignoring locking requirements. bool Locks::IsSafeToCallAbortRacy() { - Locks::ClientCallback* safe_to_call_abort_cb = safe_to_call_abort_callback.LoadAcquire(); + Locks::ClientCallback* safe_to_call_abort_cb = + safe_to_call_abort_callback.load(std::memory_order_acquire); return safe_to_call_abort_cb != nullptr && safe_to_call_abort_cb(); } diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h index 437661798f..b0eb23d327 100644 --- a/runtime/base/mutex.h +++ b/runtime/base/mutex.h @@ -224,7 +224,7 @@ class BaseMutex { public: bool HasEverContended() const { if (kLogLockContentions) { - return contention_log_data_->contention_count.LoadSequentiallyConsistent() > 0; + return contention_log_data_->contention_count.load(std::memory_order_seq_cst) > 0; } return false; } diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index 8a29ff33b7..8b64b8def0 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -1340,7 +1340,7 @@ void AppImageClassLoadersAndDexCachesHelper::Update( } } } - { + if (ClassLinker::kAppImageMayContainStrings) { // Fixup all the literal strings happens at app images which are supposed to be interned. ScopedTrace timing("Fixup String Intern in image and dex_cache"); const auto& image_header = space->GetImageHeader(); @@ -5863,14 +5863,6 @@ bool ClassLinker::LinkVirtualMethods( // smaller as we go on. uint32_t hash_index = hash_table.FindAndRemove(&super_method_name_comparator); if (hash_index != hash_table.GetNotFoundIndex()) { - // Run a check whether we are going to override a method which is hidden - // to `klass`, but ignore the result as we only warn at the moment. - // We cannot do this test earlier because we need to establish that - // a method is being overridden first. ShouldBlockAccessToMember would - // print bogus warnings otherwise. - hiddenapi::ShouldBlockAccessToMember( - super_method, klass->GetClassLoader(), hiddenapi::kOverride); - ArtMethod* virtual_method = klass->GetVirtualMethodDuringLinking( hash_index, image_pointer_size_); if (super_method->IsFinal()) { diff --git a/runtime/class_linker.h b/runtime/class_linker.h index d05e78fb40..2f6b754521 100644 --- a/runtime/class_linker.h +++ b/runtime/class_linker.h @@ -152,6 +152,8 @@ class ClassLinker { kClassRootsMax, }; + static constexpr bool kAppImageMayContainStrings = false; + explicit ClassLinker(InternTable* intern_table); virtual ~ClassLinker(); diff --git a/runtime/class_table-inl.h b/runtime/class_table-inl.h index c59e2e881d..5da5470c1a 100644 --- a/runtime/class_table-inl.h +++ b/runtime/class_table-inl.h @@ -88,7 +88,7 @@ bool ClassTable::Visit(const Visitor& visitor) { template<ReadBarrierOption kReadBarrierOption> inline mirror::Class* ClassTable::TableSlot::Read() const { - const uint32_t before = data_.LoadRelaxed(); + const uint32_t before = data_.load(std::memory_order_relaxed); ObjPtr<mirror::Class> const before_ptr(ExtractPtr(before)); ObjPtr<mirror::Class> const after_ptr( GcRoot<mirror::Class>(before_ptr).Read<kReadBarrierOption>()); @@ -102,7 +102,7 @@ inline mirror::Class* ClassTable::TableSlot::Read() const { template<typename Visitor> inline void ClassTable::TableSlot::VisitRoot(const Visitor& visitor) const { - const uint32_t before = data_.LoadRelaxed(); + const uint32_t before = data_.load(std::memory_order_relaxed); ObjPtr<mirror::Class> before_ptr(ExtractPtr(before)); GcRoot<mirror::Class> root(before_ptr); visitor.VisitRoot(root.AddressWithoutBarrier()); diff --git a/runtime/class_table.h b/runtime/class_table.h index 3e90fe2768..0b08041dbd 100644 --- a/runtime/class_table.h +++ b/runtime/class_table.h @@ -53,14 +53,14 @@ class ClassTable { public: TableSlot() : data_(0u) {} - TableSlot(const TableSlot& copy) : data_(copy.data_.LoadRelaxed()) {} + TableSlot(const TableSlot& copy) : data_(copy.data_.load(std::memory_order_relaxed)) {} explicit TableSlot(ObjPtr<mirror::Class> klass); TableSlot(ObjPtr<mirror::Class> klass, uint32_t descriptor_hash); TableSlot& operator=(const TableSlot& copy) { - data_.StoreRelaxed(copy.data_.LoadRelaxed()); + data_.store(copy.data_.load(std::memory_order_relaxed), std::memory_order_relaxed); return *this; } @@ -69,7 +69,7 @@ class ClassTable { } uint32_t Hash() const { - return MaskHash(data_.LoadRelaxed()); + return MaskHash(data_.load(std::memory_order_relaxed)); } static uint32_t MaskHash(uint32_t hash) { diff --git a/runtime/dex/art_dex_file_loader.cc b/runtime/dex/art_dex_file_loader.cc index c456764834..9802c6904b 100644 --- a/runtime/dex/art_dex_file_loader.cc +++ b/runtime/dex/art_dex_file_loader.cc @@ -205,6 +205,12 @@ std::unique_ptr<const DexFile> ArtDexFileLoader::Open(const std::string& locatio error_msg, std::make_unique<MemMapContainer>(std::move(map)), /*verify_result*/ nullptr); + // Opening CompactDex is only supported from vdex files. + if (dex_file != nullptr && dex_file->IsCompactDexFile()) { + *error_msg = StringPrintf("Opening CompactDex file '%s' is only supported from vdex files", + location.c_str()); + return nullptr; + } return dex_file; } @@ -329,6 +335,12 @@ std::unique_ptr<const DexFile> ArtDexFileLoader::OpenFile(int fd, std::make_unique<MemMapContainer>(std::move(map)), /*verify_result*/ nullptr); + // Opening CompactDex is only supported from vdex files. + if (dex_file != nullptr && dex_file->IsCompactDexFile()) { + *error_msg = StringPrintf("Opening CompactDex file '%s' is only supported from vdex files", + location.c_str()); + return nullptr; + } return dex_file; } @@ -397,6 +409,11 @@ std::unique_ptr<const DexFile> ArtDexFileLoader::OpenOneDexFileFromZip( error_msg, std::make_unique<MemMapContainer>(std::move(map)), &verify_result); + if (dex_file != nullptr && dex_file->IsCompactDexFile()) { + *error_msg = StringPrintf("Opening CompactDex file '%s' is only supported from vdex files", + location.c_str()); + return nullptr; + } if (dex_file == nullptr) { if (verify_result == VerifyResult::kVerifyNotAttempted) { *error_code = ZipOpenErrorCode::kDexFileError; diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index 7a0850d4b8..2284100564 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -2338,10 +2338,6 @@ extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** ArtMethod* called = *sp; DCHECK(called->IsNative()) << called->PrettyMethod(true); Runtime* runtime = Runtime::Current(); - jit::Jit* jit = runtime->GetJit(); - if (jit != nullptr) { - jit->AddSamples(self, called, 1u, /*with_backedges*/ false); - } uint32_t shorty_len = 0; const char* shorty = called->GetShorty(&shorty_len); bool critical_native = called->IsCriticalNative(); @@ -2367,6 +2363,12 @@ extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, ArtMethod** self->VerifyStack(); + // We can now walk the stack if needed by JIT GC from MethodEntered() for JIT-on-first-use. + jit::Jit* jit = runtime->GetJit(); + if (jit != nullptr) { + jit->MethodEntered(self, called); + } + uint32_t cookie; uint32_t* sp32; // Skip calling JniMethodStart for @CriticalNative. diff --git a/runtime/gc/accounting/atomic_stack.h b/runtime/gc/accounting/atomic_stack.h index 6b103bfe1b..7a4bd87b12 100644 --- a/runtime/gc/accounting/atomic_stack.h +++ b/runtime/gc/accounting/atomic_stack.h @@ -74,8 +74,8 @@ class AtomicStack { void Reset() { DCHECK(mem_map_.get() != nullptr); DCHECK(begin_ != nullptr); - front_index_.StoreRelaxed(0); - back_index_.StoreRelaxed(0); + front_index_.store(0, std::memory_order_relaxed); + back_index_.store(0, std::memory_order_relaxed); debug_is_sorted_ = true; mem_map_->MadviseDontNeedAndZero(); } @@ -103,7 +103,7 @@ class AtomicStack { int32_t index; int32_t new_index; do { - index = back_index_.LoadRelaxed(); + index = back_index_.load(std::memory_order_relaxed); new_index = index + num_slots; if (UNLIKELY(static_cast<size_t>(new_index) >= growth_limit_)) { // Stack overflow. @@ -134,31 +134,32 @@ class AtomicStack { if (kIsDebugBuild) { debug_is_sorted_ = false; } - const int32_t index = back_index_.LoadRelaxed(); + const int32_t index = back_index_.load(std::memory_order_relaxed); DCHECK_LT(static_cast<size_t>(index), growth_limit_); - back_index_.StoreRelaxed(index + 1); + back_index_.store(index + 1, std::memory_order_relaxed); begin_[index].Assign(value); } T* PopBack() REQUIRES_SHARED(Locks::mutator_lock_) { - DCHECK_GT(back_index_.LoadRelaxed(), front_index_.LoadRelaxed()); + DCHECK_GT(back_index_.load(std::memory_order_relaxed), + front_index_.load(std::memory_order_relaxed)); // Decrement the back index non atomically. - back_index_.StoreRelaxed(back_index_.LoadRelaxed() - 1); - return begin_[back_index_.LoadRelaxed()].AsMirrorPtr(); + back_index_.store(back_index_.load(std::memory_order_relaxed) - 1, std::memory_order_relaxed); + return begin_[back_index_.load(std::memory_order_relaxed)].AsMirrorPtr(); } // Take an item from the front of the stack. T PopFront() { - int32_t index = front_index_.LoadRelaxed(); - DCHECK_LT(index, back_index_.LoadRelaxed()); - front_index_.StoreRelaxed(index + 1); + int32_t index = front_index_.load(std::memory_order_relaxed); + DCHECK_LT(index, back_index_.load(std::memory_order_relaxed)); + front_index_.store(index + 1, std::memory_order_relaxed); return begin_[index]; } // Pop a number of elements. void PopBackCount(int32_t n) { DCHECK_GE(Size(), static_cast<size_t>(n)); - back_index_.StoreRelaxed(back_index_.LoadRelaxed() - n); + back_index_.store(back_index_.load(std::memory_order_relaxed) - n, std::memory_order_relaxed); } bool IsEmpty() const { @@ -170,15 +171,17 @@ class AtomicStack { } size_t Size() const { - DCHECK_LE(front_index_.LoadRelaxed(), back_index_.LoadRelaxed()); - return back_index_.LoadRelaxed() - front_index_.LoadRelaxed(); + DCHECK_LE(front_index_.load(std::memory_order_relaxed), + back_index_.load(std::memory_order_relaxed)); + return + back_index_.load(std::memory_order_relaxed) - front_index_.load(std::memory_order_relaxed); } StackReference<T>* Begin() const { - return begin_ + front_index_.LoadRelaxed(); + return begin_ + front_index_.load(std::memory_order_relaxed); } StackReference<T>* End() const { - return begin_ + back_index_.LoadRelaxed(); + return begin_ + back_index_.load(std::memory_order_relaxed); } size_t Capacity() const { @@ -193,11 +196,11 @@ class AtomicStack { } void Sort() { - int32_t start_back_index = back_index_.LoadRelaxed(); - int32_t start_front_index = front_index_.LoadRelaxed(); + int32_t start_back_index = back_index_.load(std::memory_order_relaxed); + int32_t start_front_index = front_index_.load(std::memory_order_relaxed); std::sort(Begin(), End(), ObjectComparator()); - CHECK_EQ(start_back_index, back_index_.LoadRelaxed()); - CHECK_EQ(start_front_index, front_index_.LoadRelaxed()); + CHECK_EQ(start_back_index, back_index_.load(std::memory_order_relaxed)); + CHECK_EQ(start_front_index, front_index_.load(std::memory_order_relaxed)); if (kIsDebugBuild) { debug_is_sorted_ = true; } @@ -236,7 +239,7 @@ class AtomicStack { } int32_t index; do { - index = back_index_.LoadRelaxed(); + index = back_index_.load(std::memory_order_relaxed); if (UNLIKELY(static_cast<size_t>(index) >= limit)) { // Stack overflow. return false; diff --git a/runtime/gc/accounting/bitmap-inl.h b/runtime/gc/accounting/bitmap-inl.h index a71b212af3..a4273e5ff6 100644 --- a/runtime/gc/accounting/bitmap-inl.h +++ b/runtime/gc/accounting/bitmap-inl.h @@ -37,7 +37,7 @@ inline bool Bitmap::AtomicTestAndSetBit(uintptr_t bit_index) { auto* atomic_entry = reinterpret_cast<Atomic<uintptr_t>*>(&bitmap_begin_[word_index]); uintptr_t old_word; do { - old_word = atomic_entry->LoadRelaxed(); + old_word = atomic_entry->load(std::memory_order_relaxed); // Fast path: The bit is already set. if ((old_word & word_mask) != 0) { DCHECK(TestBit(bit_index)); diff --git a/runtime/gc/accounting/card_table-inl.h b/runtime/gc/accounting/card_table-inl.h index 14f5d0e1c6..d9c0418f4a 100644 --- a/runtime/gc/accounting/card_table-inl.h +++ b/runtime/gc/accounting/card_table-inl.h @@ -43,7 +43,7 @@ static inline bool byte_cas(uint8_t old_value, uint8_t new_value, uint8_t* addre Atomic<uintptr_t>* word_atomic = reinterpret_cast<Atomic<uintptr_t>*>(address); // Word with the byte we are trying to cas cleared. - const uintptr_t cur_word = word_atomic->LoadRelaxed() & + const uintptr_t cur_word = word_atomic->load(std::memory_order_relaxed) & ~(static_cast<uintptr_t>(0xFF) << shift_in_bits); const uintptr_t old_word = cur_word | (static_cast<uintptr_t>(old_value) << shift_in_bits); const uintptr_t new_word = cur_word | (static_cast<uintptr_t>(new_value) << shift_in_bits); diff --git a/runtime/gc/accounting/space_bitmap-inl.h b/runtime/gc/accounting/space_bitmap-inl.h index 384e3c2f4c..d460e00075 100644 --- a/runtime/gc/accounting/space_bitmap-inl.h +++ b/runtime/gc/accounting/space_bitmap-inl.h @@ -41,7 +41,7 @@ inline bool SpaceBitmap<kAlignment>::AtomicTestAndSet(const mirror::Object* obj) DCHECK_LT(index, bitmap_size_ / sizeof(intptr_t)) << " bitmap_size_ = " << bitmap_size_; uintptr_t old_word; do { - old_word = atomic_entry->LoadRelaxed(); + old_word = atomic_entry->load(std::memory_order_relaxed); // Fast path: The bit is already set. if ((old_word & mask) != 0) { DCHECK(Test(obj)); @@ -59,7 +59,8 @@ inline bool SpaceBitmap<kAlignment>::Test(const mirror::Object* obj) const { DCHECK(bitmap_begin_ != nullptr); DCHECK_GE(addr, heap_begin_); const uintptr_t offset = addr - heap_begin_; - return (bitmap_begin_[OffsetToIndex(offset)].LoadRelaxed() & OffsetToMask(offset)) != 0; + size_t index = OffsetToIndex(offset); + return (bitmap_begin_[index].load(std::memory_order_relaxed) & OffsetToMask(offset)) != 0; } template<size_t kAlignment> @@ -119,7 +120,7 @@ inline void SpaceBitmap<kAlignment>::VisitMarkedRange(uintptr_t visit_begin, // Traverse the middle, full part. for (size_t i = index_start + 1; i < index_end; ++i) { - uintptr_t w = bitmap_begin_[i].LoadRelaxed(); + uintptr_t w = bitmap_begin_[i].load(std::memory_order_relaxed); if (w != 0) { const uintptr_t ptr_base = IndexToOffset(i) + heap_begin_; // Iterate on the bits set in word `w`, from the least to the most significant bit. @@ -168,7 +169,7 @@ void SpaceBitmap<kAlignment>::Walk(Visitor&& visitor) { uintptr_t end = OffsetToIndex(HeapLimit() - heap_begin_ - 1); Atomic<uintptr_t>* bitmap_begin = bitmap_begin_; for (uintptr_t i = 0; i <= end; ++i) { - uintptr_t w = bitmap_begin[i].LoadRelaxed(); + uintptr_t w = bitmap_begin[i].load(std::memory_order_relaxed); if (w != 0) { uintptr_t ptr_base = IndexToOffset(i) + heap_begin_; do { @@ -192,7 +193,7 @@ inline bool SpaceBitmap<kAlignment>::Modify(const mirror::Object* obj) { const uintptr_t mask = OffsetToMask(offset); DCHECK_LT(index, bitmap_size_ / sizeof(intptr_t)) << " bitmap_size_ = " << bitmap_size_; Atomic<uintptr_t>* atomic_entry = &bitmap_begin_[index]; - uintptr_t old_word = atomic_entry->LoadRelaxed(); + uintptr_t old_word = atomic_entry->load(std::memory_order_relaxed); if (kSetBit) { // Check the bit before setting the word incase we are trying to mark a read only bitmap // like an image space bitmap. This bitmap is mapped as read only and will fault if we @@ -200,10 +201,10 @@ inline bool SpaceBitmap<kAlignment>::Modify(const mirror::Object* obj) { // occur if we check before setting the bit. This also prevents dirty pages that would // occur if the bitmap was read write and we did not check the bit. if ((old_word & mask) == 0) { - atomic_entry->StoreRelaxed(old_word | mask); + atomic_entry->store(old_word | mask, std::memory_order_relaxed); } } else { - atomic_entry->StoreRelaxed(old_word & ~mask); + atomic_entry->store(old_word & ~mask, std::memory_order_relaxed); } DCHECK_EQ(Test(obj), kSetBit); return (old_word & mask) != 0; diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc index 0247564a8c..d84288f676 100644 --- a/runtime/gc/accounting/space_bitmap.cc +++ b/runtime/gc/accounting/space_bitmap.cc @@ -145,7 +145,7 @@ void SpaceBitmap<kAlignment>::CopyFrom(SpaceBitmap* source_bitmap) { Atomic<uintptr_t>* const src = source_bitmap->Begin(); Atomic<uintptr_t>* const dest = Begin(); for (size_t i = 0; i < count; ++i) { - dest[i].StoreRelaxed(src[i].LoadRelaxed()); + dest[i].store(src[i].load(std::memory_order_relaxed), std::memory_order_relaxed); } } @@ -184,7 +184,8 @@ void SpaceBitmap<kAlignment>::SweepWalk(const SpaceBitmap<kAlignment>& live_bitm Atomic<uintptr_t>* live = live_bitmap.bitmap_begin_; Atomic<uintptr_t>* mark = mark_bitmap.bitmap_begin_; for (size_t i = start; i <= end; i++) { - uintptr_t garbage = live[i].LoadRelaxed() & ~mark[i].LoadRelaxed(); + uintptr_t garbage = + live[i].load(std::memory_order_relaxed) & ~mark[i].load(std::memory_order_relaxed); if (UNLIKELY(garbage != 0)) { uintptr_t ptr_base = IndexToOffset(i) + live_bitmap.heap_begin_; do { diff --git a/runtime/gc/collector/concurrent_copying-inl.h b/runtime/gc/collector/concurrent_copying-inl.h index 56983be8fa..6e345fb2f2 100644 --- a/runtime/gc/collector/concurrent_copying-inl.h +++ b/runtime/gc/collector/concurrent_copying-inl.h @@ -78,13 +78,13 @@ inline mirror::Object* ConcurrentCopying::MarkImmuneSpace(mirror::Object* ref) { if (kIsDebugBuild) { if (Thread::Current() == thread_running_gc_) { DCHECK(!kGrayImmuneObject || - updated_all_immune_objects_.LoadRelaxed() || + updated_all_immune_objects_.load(std::memory_order_relaxed) || gc_grays_immune_objects_); } else { DCHECK(kGrayImmuneObject); } } - if (!kGrayImmuneObject || updated_all_immune_objects_.LoadRelaxed()) { + if (!kGrayImmuneObject || updated_all_immune_objects_.load(std::memory_order_relaxed)) { return ref; } // This may or may not succeed, which is ok because the object may already be gray. diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index b10c504dd5..bb5167f15d 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -291,14 +291,14 @@ void ConcurrentCopying::InitializePhase() { rb_mark_bit_stack_full_ = false; mark_from_read_barrier_measurements_ = measure_read_barrier_slow_path_; if (measure_read_barrier_slow_path_) { - rb_slow_path_ns_.StoreRelaxed(0); - rb_slow_path_count_.StoreRelaxed(0); - rb_slow_path_count_gc_.StoreRelaxed(0); + rb_slow_path_ns_.store(0, std::memory_order_relaxed); + rb_slow_path_count_.store(0, std::memory_order_relaxed); + rb_slow_path_count_gc_.store(0, std::memory_order_relaxed); } immune_spaces_.Reset(); - bytes_moved_.StoreRelaxed(0); - objects_moved_.StoreRelaxed(0); + bytes_moved_.store(0, std::memory_order_relaxed); + objects_moved_.store(0, std::memory_order_relaxed); GcCause gc_cause = GetCurrentIteration()->GetGcCause(); if (gc_cause == kGcCauseExplicit || gc_cause == kGcCauseCollectorTransition || @@ -308,7 +308,7 @@ void ConcurrentCopying::InitializePhase() { force_evacuate_all_ = false; } if (kUseBakerReadBarrier) { - updated_all_immune_objects_.StoreRelaxed(false); + updated_all_immune_objects_.store(false, std::memory_order_relaxed); // GC may gray immune objects in the thread flip. gc_grays_immune_objects_ = true; if (kIsDebugBuild) { @@ -350,7 +350,7 @@ class ConcurrentCopying::ThreadFlipVisitor : public Closure, public RootVisitor concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread); reinterpret_cast<Atomic<size_t>*>( &concurrent_copying_->from_space_num_objects_at_first_pause_)-> - FetchAndAddSequentiallyConsistent(thread_local_objects); + fetch_add(thread_local_objects, std::memory_order_seq_cst); } else { concurrent_copying_->region_space_->RevokeThreadLocalBuffers(thread); } @@ -430,7 +430,8 @@ class ConcurrentCopying::FlipCallback : public Closure { cc->from_space_num_bytes_at_first_pause_ = cc->region_space_->GetBytesAllocated(); } cc->is_marking_ = true; - cc->mark_stack_mode_.StoreRelaxed(ConcurrentCopying::kMarkStackModeThreadLocal); + cc->mark_stack_mode_.store(ConcurrentCopying::kMarkStackModeThreadLocal, + std::memory_order_relaxed); if (kIsDebugBuild) { cc->region_space_->AssertAllRegionLiveBytesZeroOrCleared(); } @@ -728,7 +729,7 @@ void ConcurrentCopying::GrayAllNewlyDirtyImmuneObjects() { } // Since all of the objects that may point to other spaces are gray, we can avoid all the read // barriers in the immune spaces. - updated_all_immune_objects_.StoreRelaxed(true); + updated_all_immune_objects_.store(true, std::memory_order_relaxed); } void ConcurrentCopying::SwapStacks() { @@ -816,7 +817,7 @@ void ConcurrentCopying::MarkingPhase() { if (kUseBakerReadBarrier) { // This release fence makes the field updates in the above loop visible before allowing mutator // getting access to immune objects without graying it first. - updated_all_immune_objects_.StoreRelease(true); + updated_all_immune_objects_.store(true, std::memory_order_release); // Now whiten immune objects concurrently accessed and grayed by mutators. We can't do this in // the above loop because we would incorrectly disable the read barrier by whitening an object // which may point to an unscanned, white object, breaking the to-space invariant. @@ -1018,8 +1019,8 @@ void ConcurrentCopying::DisableMarking() { heap_->rb_table_->ClearAll(); DCHECK(heap_->rb_table_->IsAllCleared()); } - is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(1); - mark_stack_mode_.StoreSequentiallyConsistent(kMarkStackModeOff); + is_mark_stack_push_disallowed_.store(1, std::memory_order_seq_cst); + mark_stack_mode_.store(kMarkStackModeOff, std::memory_order_seq_cst); } void ConcurrentCopying::PushOntoFalseGrayStack(mirror::Object* ref) { @@ -1069,11 +1070,11 @@ void ConcurrentCopying::ExpandGcMarkStack() { } void ConcurrentCopying::PushOntoMarkStack(mirror::Object* to_ref) { - CHECK_EQ(is_mark_stack_push_disallowed_.LoadRelaxed(), 0) + CHECK_EQ(is_mark_stack_push_disallowed_.load(std::memory_order_relaxed), 0) << " " << to_ref << " " << mirror::Object::PrettyTypeOf(to_ref); Thread* self = Thread::Current(); // TODO: pass self as an argument from call sites? CHECK(thread_running_gc_ != nullptr); - MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed(); + MarkStackMode mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed); if (LIKELY(mark_stack_mode == kMarkStackModeThreadLocal)) { if (LIKELY(self == thread_running_gc_)) { // If GC-running thread, use the GC mark stack instead of a thread-local mark stack. @@ -1412,7 +1413,7 @@ bool ConcurrentCopying::ProcessMarkStackOnce() { CHECK(self == thread_running_gc_); CHECK(self->GetThreadLocalMarkStack() == nullptr); size_t count = 0; - MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed(); + MarkStackMode mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed); if (mark_stack_mode == kMarkStackModeThreadLocal) { // Process the thread-local mark stacks and the GC mark stack. count += ProcessThreadLocalMarkStacks(/* disable_weak_ref_access */ false, @@ -1597,10 +1598,10 @@ void ConcurrentCopying::SwitchToSharedMarkStackMode() { CHECK(thread_running_gc_ != nullptr); CHECK_EQ(self, thread_running_gc_); CHECK(self->GetThreadLocalMarkStack() == nullptr); - MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed(); + MarkStackMode before_mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed); CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode), static_cast<uint32_t>(kMarkStackModeThreadLocal)); - mark_stack_mode_.StoreRelaxed(kMarkStackModeShared); + mark_stack_mode_.store(kMarkStackModeShared, std::memory_order_relaxed); DisableWeakRefAccessCallback dwrac(this); // Process the thread local mark stacks one last time after switching to the shared mark stack // mode and disable weak ref accesses. @@ -1615,10 +1616,10 @@ void ConcurrentCopying::SwitchToGcExclusiveMarkStackMode() { CHECK(thread_running_gc_ != nullptr); CHECK_EQ(self, thread_running_gc_); CHECK(self->GetThreadLocalMarkStack() == nullptr); - MarkStackMode before_mark_stack_mode = mark_stack_mode_.LoadRelaxed(); + MarkStackMode before_mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed); CHECK_EQ(static_cast<uint32_t>(before_mark_stack_mode), static_cast<uint32_t>(kMarkStackModeShared)); - mark_stack_mode_.StoreRelaxed(kMarkStackModeGcExclusive); + mark_stack_mode_.store(kMarkStackModeGcExclusive, std::memory_order_relaxed); QuasiAtomic::ThreadFenceForConstructor(); if (kVerboseMode) { LOG(INFO) << "Switched to GC exclusive mark stack mode"; @@ -1630,7 +1631,7 @@ void ConcurrentCopying::CheckEmptyMarkStack() { CHECK(thread_running_gc_ != nullptr); CHECK_EQ(self, thread_running_gc_); CHECK(self->GetThreadLocalMarkStack() == nullptr); - MarkStackMode mark_stack_mode = mark_stack_mode_.LoadRelaxed(); + MarkStackMode mark_stack_mode = mark_stack_mode_.load(std::memory_order_relaxed); if (mark_stack_mode == kMarkStackModeThreadLocal) { // Thread-local mark stack mode. RevokeThreadLocalMarkStacks(false, nullptr); @@ -1738,9 +1739,9 @@ void ConcurrentCopying::ReclaimPhase() { } IssueEmptyCheckpoint(); // Disable the check. - is_mark_stack_push_disallowed_.StoreSequentiallyConsistent(0); + is_mark_stack_push_disallowed_.store(0, std::memory_order_seq_cst); if (kUseBakerReadBarrier) { - updated_all_immune_objects_.StoreSequentiallyConsistent(false); + updated_all_immune_objects_.store(false, std::memory_order_seq_cst); } CheckEmptyMarkStack(); } @@ -1753,10 +1754,10 @@ void ConcurrentCopying::ReclaimPhase() { const uint64_t from_objects = region_space_->GetObjectsAllocatedInFromSpace(); const uint64_t unevac_from_bytes = region_space_->GetBytesAllocatedInUnevacFromSpace(); const uint64_t unevac_from_objects = region_space_->GetObjectsAllocatedInUnevacFromSpace(); - uint64_t to_bytes = bytes_moved_.LoadSequentiallyConsistent(); - cumulative_bytes_moved_.FetchAndAddRelaxed(to_bytes); - uint64_t to_objects = objects_moved_.LoadSequentiallyConsistent(); - cumulative_objects_moved_.FetchAndAddRelaxed(to_objects); + uint64_t to_bytes = bytes_moved_.load(std::memory_order_seq_cst); + cumulative_bytes_moved_.fetch_add(to_bytes, std::memory_order_relaxed); + uint64_t to_objects = objects_moved_.load(std::memory_order_seq_cst); + cumulative_objects_moved_.fetch_add(to_objects, std::memory_order_relaxed); if (kEnableFromSpaceAccountingCheck) { CHECK_EQ(from_space_num_objects_at_first_pause_, from_objects + unevac_from_objects); CHECK_EQ(from_space_num_bytes_at_first_pause_, from_bytes + unevac_from_bytes); @@ -1787,12 +1788,12 @@ void ConcurrentCopying::ReclaimPhase() { << " unevac_from_space size=" << region_space_->UnevacFromSpaceSize() << " to_space size=" << region_space_->ToSpaceSize(); LOG(INFO) << "(before) num_bytes_allocated=" - << heap_->num_bytes_allocated_.LoadSequentiallyConsistent(); + << heap_->num_bytes_allocated_.load(std::memory_order_seq_cst); } RecordFree(ObjectBytePair(freed_objects, freed_bytes)); if (kVerboseMode) { LOG(INFO) << "(after) num_bytes_allocated=" - << heap_->num_bytes_allocated_.LoadSequentiallyConsistent(); + << heap_->num_bytes_allocated_.load(std::memory_order_seq_cst); } } @@ -2042,7 +2043,7 @@ void ConcurrentCopying::AssertToSpaceInvariantInNonMovingSpace(mirror::Object* o if (Thread::Current() == thread_running_gc_ && !gc_grays_immune_objects_) { return; } - bool updated_all_immune_objects = updated_all_immune_objects_.LoadSequentiallyConsistent(); + bool updated_all_immune_objects = updated_all_immune_objects_.load(std::memory_order_seq_cst); CHECK(updated_all_immune_objects || ref->GetReadBarrierState() == ReadBarrier::GrayState()) << "Unmarked immune space ref. obj=" << obj << " rb_state=" << (obj != nullptr ? obj->GetReadBarrierState() : 0U) @@ -2165,7 +2166,7 @@ inline void ConcurrentCopying::VisitRoots( mirror::Object* expected_ref = ref; mirror::Object* new_ref = to_ref; do { - if (expected_ref != addr->LoadRelaxed()) { + if (expected_ref != addr->load(std::memory_order_relaxed)) { // It was updated by the mutator. break; } @@ -2184,7 +2185,7 @@ inline void ConcurrentCopying::MarkRoot(mirror::CompressedReference<mirror::Obje auto new_ref = mirror::CompressedReference<mirror::Object>::FromMirrorPtr(to_ref); // If the cas fails, then it was updated by the mutator. do { - if (ref != addr->LoadRelaxed().AsMirrorPtr()) { + if (ref != addr->load(std::memory_order_relaxed).AsMirrorPtr()) { // It was updated by the mutator. break; } @@ -2378,8 +2379,9 @@ mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref, fall_back_to_non_moving = true; if (kVerboseMode) { LOG(INFO) << "Out of memory in the to-space. Fall back to non-moving. skipped_bytes=" - << to_space_bytes_skipped_.LoadSequentiallyConsistent() - << " skipped_objects=" << to_space_objects_skipped_.LoadSequentiallyConsistent(); + << to_space_bytes_skipped_.load(std::memory_order_seq_cst) + << " skipped_objects=" + << to_space_objects_skipped_.load(std::memory_order_seq_cst); } fall_back_to_non_moving = true; to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size, @@ -2431,9 +2433,9 @@ mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref, region_space_->FreeLarge</*kForEvac*/ true>(to_ref, bytes_allocated); } else { // Record the lost copy for later reuse. - heap_->num_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes_allocated); - to_space_bytes_skipped_.FetchAndAddSequentiallyConsistent(bytes_allocated); - to_space_objects_skipped_.FetchAndAddSequentiallyConsistent(1); + heap_->num_bytes_allocated_.fetch_add(bytes_allocated, std::memory_order_seq_cst); + to_space_bytes_skipped_.fetch_add(bytes_allocated, std::memory_order_seq_cst); + to_space_objects_skipped_.fetch_add(1, std::memory_order_seq_cst); MutexLock mu(Thread::Current(), skipped_blocks_lock_); skipped_blocks_map_.insert(std::make_pair(bytes_allocated, reinterpret_cast<uint8_t*>(to_ref))); @@ -2477,8 +2479,8 @@ mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref, bool success = from_ref->CasLockWordWeakRelaxed(old_lock_word, new_lock_word); if (LIKELY(success)) { // The CAS succeeded. - objects_moved_.FetchAndAddRelaxed(1); - bytes_moved_.FetchAndAddRelaxed(region_space_alloc_size); + objects_moved_.fetch_add(1, std::memory_order_relaxed); + bytes_moved_.fetch_add(region_space_alloc_size, std::memory_order_relaxed); if (LIKELY(!fall_back_to_non_moving)) { DCHECK(region_space_->IsInToSpace(to_ref)); } else { @@ -2704,9 +2706,10 @@ void ConcurrentCopying::FinishPhase() { } if (measure_read_barrier_slow_path_) { MutexLock mu(self, rb_slow_path_histogram_lock_); - rb_slow_path_time_histogram_.AdjustAndAddValue(rb_slow_path_ns_.LoadRelaxed()); - rb_slow_path_count_total_ += rb_slow_path_count_.LoadRelaxed(); - rb_slow_path_count_gc_total_ += rb_slow_path_count_gc_.LoadRelaxed(); + rb_slow_path_time_histogram_.AdjustAndAddValue( + rb_slow_path_ns_.load(std::memory_order_relaxed)); + rb_slow_path_count_total_ += rb_slow_path_count_.load(std::memory_order_relaxed); + rb_slow_path_count_gc_total_ += rb_slow_path_count_gc_.load(std::memory_order_relaxed); } } @@ -2760,15 +2763,15 @@ void ConcurrentCopying::RevokeAllThreadLocalBuffers() { mirror::Object* ConcurrentCopying::MarkFromReadBarrierWithMeasurements(mirror::Object* from_ref) { if (Thread::Current() != thread_running_gc_) { - rb_slow_path_count_.FetchAndAddRelaxed(1u); + rb_slow_path_count_.fetch_add(1u, std::memory_order_relaxed); } else { - rb_slow_path_count_gc_.FetchAndAddRelaxed(1u); + rb_slow_path_count_gc_.fetch_add(1u, std::memory_order_relaxed); } ScopedTrace tr(__FUNCTION__); const uint64_t start_time = measure_read_barrier_slow_path_ ? NanoTime() : 0u; mirror::Object* ret = Mark(from_ref); if (measure_read_barrier_slow_path_) { - rb_slow_path_ns_.FetchAndAddRelaxed(NanoTime() - start_time); + rb_slow_path_ns_.fetch_add(NanoTime() - start_time, std::memory_order_relaxed); } return ret; } @@ -2787,8 +2790,10 @@ void ConcurrentCopying::DumpPerformanceInfo(std::ostream& os) { if (rb_slow_path_count_gc_total_ > 0) { os << "GC slow path count " << rb_slow_path_count_gc_total_ << "\n"; } - os << "Cumulative bytes moved " << cumulative_bytes_moved_.LoadRelaxed() << "\n"; - os << "Cumulative objects moved " << cumulative_objects_moved_.LoadRelaxed() << "\n"; + os << "Cumulative bytes moved " + << cumulative_bytes_moved_.load(std::memory_order_relaxed) << "\n"; + os << "Cumulative objects moved " + << cumulative_objects_moved_.load(std::memory_order_relaxed) << "\n"; os << "Peak regions allocated " << region_space_->GetMaxPeakNumNonFreeRegions() << " (" diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc index 9ab965ec78..23359640fe 100644 --- a/runtime/gc/collector/mark_sweep.cc +++ b/runtime/gc/collector/mark_sweep.cc @@ -116,21 +116,21 @@ void MarkSweep::InitializePhase() { mark_stack_ = heap_->GetMarkStack(); DCHECK(mark_stack_ != nullptr); immune_spaces_.Reset(); - no_reference_class_count_.StoreRelaxed(0); - normal_count_.StoreRelaxed(0); - class_count_.StoreRelaxed(0); - object_array_count_.StoreRelaxed(0); - other_count_.StoreRelaxed(0); - reference_count_.StoreRelaxed(0); - large_object_test_.StoreRelaxed(0); - large_object_mark_.StoreRelaxed(0); - overhead_time_ .StoreRelaxed(0); - work_chunks_created_.StoreRelaxed(0); - work_chunks_deleted_.StoreRelaxed(0); - mark_null_count_.StoreRelaxed(0); - mark_immune_count_.StoreRelaxed(0); - mark_fastpath_count_.StoreRelaxed(0); - mark_slowpath_count_.StoreRelaxed(0); + no_reference_class_count_.store(0, std::memory_order_relaxed); + normal_count_.store(0, std::memory_order_relaxed); + class_count_.store(0, std::memory_order_relaxed); + object_array_count_.store(0, std::memory_order_relaxed); + other_count_.store(0, std::memory_order_relaxed); + reference_count_.store(0, std::memory_order_relaxed); + large_object_test_.store(0, std::memory_order_relaxed); + large_object_mark_.store(0, std::memory_order_relaxed); + overhead_time_ .store(0, std::memory_order_relaxed); + work_chunks_created_.store(0, std::memory_order_relaxed); + work_chunks_deleted_.store(0, std::memory_order_relaxed); + mark_null_count_.store(0, std::memory_order_relaxed); + mark_immune_count_.store(0, std::memory_order_relaxed); + mark_fastpath_count_.store(0, std::memory_order_relaxed); + mark_slowpath_count_.store(0, std::memory_order_relaxed); { // TODO: I don't think we should need heap bitmap lock to Get the mark bitmap. ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); @@ -724,7 +724,7 @@ class MarkSweep::MarkStackTask : public Task { if (kUseFinger) { std::atomic_thread_fence(std::memory_order_seq_cst); if (reinterpret_cast<uintptr_t>(ref) >= - static_cast<uintptr_t>(mark_sweep_->atomic_finger_.LoadRelaxed())) { + static_cast<uintptr_t>(mark_sweep_->atomic_finger_.load(std::memory_order_relaxed))) { return; } } @@ -1046,7 +1046,7 @@ void MarkSweep::RecursiveMark() { // This function does not handle heap end increasing, so we must use the space end. uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin()); uintptr_t end = reinterpret_cast<uintptr_t>(space->End()); - atomic_finger_.StoreRelaxed(AtomicInteger::MaxValue()); + atomic_finger_.store(AtomicInteger::MaxValue(), std::memory_order_relaxed); // Create a few worker tasks. const size_t n = thread_count * 2; @@ -1405,8 +1405,8 @@ void MarkSweep::ProcessMarkStackParallel(size_t thread_count) { thread_pool->Wait(self, true, true); thread_pool->StopWorkers(self); mark_stack_->Reset(); - CHECK_EQ(work_chunks_created_.LoadSequentiallyConsistent(), - work_chunks_deleted_.LoadSequentiallyConsistent()) + CHECK_EQ(work_chunks_created_.load(std::memory_order_seq_cst), + work_chunks_deleted_.load(std::memory_order_seq_cst)) << " some of the work chunks were leaked"; } @@ -1462,28 +1462,32 @@ void MarkSweep::FinishPhase() { if (kCountScannedTypes) { VLOG(gc) << "MarkSweep scanned" - << " no reference objects=" << no_reference_class_count_.LoadRelaxed() - << " normal objects=" << normal_count_.LoadRelaxed() - << " classes=" << class_count_.LoadRelaxed() - << " object arrays=" << object_array_count_.LoadRelaxed() - << " references=" << reference_count_.LoadRelaxed() - << " other=" << other_count_.LoadRelaxed(); + << " no reference objects=" << no_reference_class_count_.load(std::memory_order_relaxed) + << " normal objects=" << normal_count_.load(std::memory_order_relaxed) + << " classes=" << class_count_.load(std::memory_order_relaxed) + << " object arrays=" << object_array_count_.load(std::memory_order_relaxed) + << " references=" << reference_count_.load(std::memory_order_relaxed) + << " other=" << other_count_.load(std::memory_order_relaxed); } if (kCountTasks) { - VLOG(gc) << "Total number of work chunks allocated: " << work_chunks_created_.LoadRelaxed(); + VLOG(gc) + << "Total number of work chunks allocated: " + << work_chunks_created_.load(std::memory_order_relaxed); } if (kMeasureOverhead) { - VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_.LoadRelaxed()); + VLOG(gc) << "Overhead time " << PrettyDuration(overhead_time_.load(std::memory_order_relaxed)); } if (kProfileLargeObjects) { - VLOG(gc) << "Large objects tested " << large_object_test_.LoadRelaxed() - << " marked " << large_object_mark_.LoadRelaxed(); + VLOG(gc) + << "Large objects tested " << large_object_test_.load(std::memory_order_relaxed) + << " marked " << large_object_mark_.load(std::memory_order_relaxed); } if (kCountMarkedObjects) { - VLOG(gc) << "Marked: null=" << mark_null_count_.LoadRelaxed() - << " immune=" << mark_immune_count_.LoadRelaxed() - << " fastpath=" << mark_fastpath_count_.LoadRelaxed() - << " slowpath=" << mark_slowpath_count_.LoadRelaxed(); + VLOG(gc) + << "Marked: null=" << mark_null_count_.load(std::memory_order_relaxed) + << " immune=" << mark_immune_count_.load(std::memory_order_relaxed) + << " fastpath=" << mark_fastpath_count_.load(std::memory_order_relaxed) + << " slowpath=" << mark_slowpath_count_.load(std::memory_order_relaxed); } CHECK(mark_stack_->IsEmpty()); // Ensure that the mark stack is empty. mark_stack_->Reset(); diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h index 41ee18350d..948d23303c 100644 --- a/runtime/gc/heap-inl.h +++ b/runtime/gc/heap-inl.h @@ -156,7 +156,7 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, pre_fence_visitor(obj, usable_size); QuasiAtomic::ThreadFenceForConstructor(); size_t num_bytes_allocated_before = - num_bytes_allocated_.FetchAndAddRelaxed(bytes_tl_bulk_allocated); + num_bytes_allocated_.fetch_add(bytes_tl_bulk_allocated, std::memory_order_relaxed); new_num_bytes_allocated = num_bytes_allocated_before + bytes_tl_bulk_allocated; if (bytes_tl_bulk_allocated > 0) { // Only trace when we get an increase in the number of bytes allocated. This happens when @@ -187,7 +187,7 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self, DCHECK(allocation_records_ != nullptr); allocation_records_->RecordAllocation(self, &obj, bytes_allocated); } - AllocationListener* l = alloc_listener_.LoadSequentiallyConsistent(); + AllocationListener* l = alloc_listener_.load(std::memory_order_seq_cst); if (l != nullptr) { // Same as above. We assume that a listener that was once stored will never be deleted. // Otherwise we'd have to perform this under a lock. @@ -393,7 +393,7 @@ inline bool Heap::ShouldAllocLargeObject(ObjPtr<mirror::Class> c, size_t byte_co inline bool Heap::IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size, bool grow) { - size_t new_footprint = num_bytes_allocated_.LoadSequentiallyConsistent() + alloc_size; + size_t new_footprint = num_bytes_allocated_.load(std::memory_order_seq_cst) + alloc_size; if (UNLIKELY(new_footprint > max_allowed_footprint_)) { if (UNLIKELY(new_footprint > growth_limit_)) { return true; diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index a725ec40b6..52afb3850c 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -549,7 +549,7 @@ Heap::Heap(size_t initial_size, AddRememberedSet(non_moving_space_rem_set); } // TODO: Count objects in the image space here? - num_bytes_allocated_.StoreRelaxed(0); + num_bytes_allocated_.store(0, std::memory_order_relaxed); mark_stack_.reset(accounting::ObjectStack::Create("mark stack", kDefaultMarkStackSize, kDefaultMarkStackSize)); const size_t alloc_stack_capacity = max_allocation_stack_size_ + kAllocationStackReserveSize; @@ -1053,7 +1053,8 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) { } os << "Registered native bytes allocated: " - << old_native_bytes_allocated_.LoadRelaxed() + new_native_bytes_allocated_.LoadRelaxed() + << (old_native_bytes_allocated_.load(std::memory_order_relaxed) + + new_native_bytes_allocated_.load(std::memory_order_relaxed)) << "\n"; BaseMutex::DumpAll(os); @@ -1120,11 +1121,7 @@ void Heap::DumpBlockingGcCountRateHistogram(std::ostream& os) const { ALWAYS_INLINE static inline AllocationListener* GetAndOverwriteAllocationListener( Atomic<AllocationListener*>* storage, AllocationListener* new_value) { - AllocationListener* old; - do { - old = storage->LoadSequentiallyConsistent(); - } while (!storage->CompareAndSetStrongSequentiallyConsistent(old, new_value)); - return old; + return storage->exchange(new_value); } Heap::~Heap() { @@ -1142,12 +1139,11 @@ Heap::~Heap() { delete thread_flip_lock_; delete pending_task_lock_; delete backtrace_lock_; - if (unique_backtrace_count_.LoadRelaxed() != 0 || seen_backtrace_count_.LoadRelaxed() != 0) { - LOG(INFO) << "gc stress unique=" << unique_backtrace_count_.LoadRelaxed() - << " total=" << seen_backtrace_count_.LoadRelaxed() + - unique_backtrace_count_.LoadRelaxed(); + uint64_t unique_count = unique_backtrace_count_.load(std::memory_order_relaxed); + uint64_t seen_count = seen_backtrace_count_.load(std::memory_order_relaxed); + if (unique_count != 0 || seen_count != 0) { + LOG(INFO) << "gc stress unique=" << unique_count << " total=" << (unique_count + seen_count); } - VLOG(heap) << "Finished ~Heap()"; } @@ -1493,7 +1489,7 @@ void Heap::VerifyObjectBody(ObjPtr<mirror::Object> obj) { } // Ignore early dawn of the universe verifications. - if (UNLIKELY(static_cast<size_t>(num_bytes_allocated_.LoadRelaxed()) < 10 * KB)) { + if (UNLIKELY(num_bytes_allocated_.load(std::memory_order_relaxed) < 10 * KB)) { return; } CHECK_ALIGNED(obj.Ptr(), kObjectAlignment) << "Object isn't aligned"; @@ -1525,9 +1521,10 @@ void Heap::RecordFree(uint64_t freed_objects, int64_t freed_bytes) { // Use signed comparison since freed bytes can be negative when background compaction foreground // transitions occurs. This is caused by the moving objects from a bump pointer space to a // free list backed space typically increasing memory footprint due to padding and binning. - DCHECK_LE(freed_bytes, static_cast<int64_t>(num_bytes_allocated_.LoadRelaxed())); + DCHECK_LE(freed_bytes, + static_cast<int64_t>(num_bytes_allocated_.load(std::memory_order_relaxed))); // Note: This relies on 2s complement for handling negative freed_bytes. - num_bytes_allocated_.FetchAndSubSequentiallyConsistent(static_cast<ssize_t>(freed_bytes)); + num_bytes_allocated_.fetch_sub(static_cast<ssize_t>(freed_bytes)); if (Runtime::Current()->HasStatsEnabled()) { RuntimeStats* thread_stats = Thread::Current()->GetStats(); thread_stats->freed_objects += freed_objects; @@ -1544,10 +1541,10 @@ void Heap::RecordFreeRevoke() { // ahead-of-time, bulk counting of bytes allocated in rosalloc thread-local buffers. // If there's a concurrent revoke, ok to not necessarily reset num_bytes_freed_revoke_ // all the way to zero exactly as the remainder will be subtracted at the next GC. - size_t bytes_freed = num_bytes_freed_revoke_.LoadSequentiallyConsistent(); - CHECK_GE(num_bytes_freed_revoke_.FetchAndSubSequentiallyConsistent(bytes_freed), + size_t bytes_freed = num_bytes_freed_revoke_.load(); + CHECK_GE(num_bytes_freed_revoke_.fetch_sub(bytes_freed), bytes_freed) << "num_bytes_freed_revoke_ underflow"; - CHECK_GE(num_bytes_allocated_.FetchAndSubSequentiallyConsistent(bytes_freed), + CHECK_GE(num_bytes_allocated_.fetch_sub(bytes_freed), bytes_freed) << "num_bytes_allocated_ underflow"; GetCurrentGcIteration()->SetFreedRevoke(bytes_freed); } @@ -1703,13 +1700,13 @@ mirror::Object* Heap::AllocateInternalWithGc(Thread* self, // Always print that we ran homogeneous space compation since this can cause jank. VLOG(heap) << "Ran heap homogeneous space compaction, " << " requested defragmentation " - << count_requested_homogeneous_space_compaction_.LoadSequentiallyConsistent() + << count_requested_homogeneous_space_compaction_.load() << " performed defragmentation " - << count_performed_homogeneous_space_compaction_.LoadSequentiallyConsistent() + << count_performed_homogeneous_space_compaction_.load() << " ignored homogeneous space compaction " - << count_ignored_homogeneous_space_compaction_.LoadSequentiallyConsistent() + << count_ignored_homogeneous_space_compaction_.load() << " delayed count = " - << count_delayed_oom_.LoadSequentiallyConsistent(); + << count_delayed_oom_.load(); } break; } @@ -1972,7 +1969,7 @@ void Heap::TransitionCollector(CollectorType collector_type) { VLOG(heap) << "TransitionCollector: " << static_cast<int>(collector_type_) << " -> " << static_cast<int>(collector_type); uint64_t start_time = NanoTime(); - uint32_t before_allocated = num_bytes_allocated_.LoadSequentiallyConsistent(); + uint32_t before_allocated = num_bytes_allocated_.load(); Runtime* const runtime = Runtime::Current(); Thread* const self = Thread::Current(); ScopedThreadStateChange tsc(self, kWaitingPerformingGc); @@ -2110,7 +2107,7 @@ void Heap::TransitionCollector(CollectorType collector_type) { ScopedObjectAccess soa(self); soa.Vm()->UnloadNativeLibraries(); } - int32_t after_allocated = num_bytes_allocated_.LoadSequentiallyConsistent(); + int32_t after_allocated = num_bytes_allocated_.load(std::memory_order_seq_cst); int32_t delta_allocated = before_allocated - after_allocated; std::string saved_str; if (delta_allocated >= 0) { @@ -2559,7 +2556,9 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, // Move all bytes from new_native_bytes_allocated_ to // old_native_bytes_allocated_ now that GC has been triggered, resetting // new_native_bytes_allocated_ to zero in the process. - old_native_bytes_allocated_.FetchAndAddRelaxed(new_native_bytes_allocated_.ExchangeRelaxed(0)); + old_native_bytes_allocated_.fetch_add( + new_native_bytes_allocated_.exchange(0, std::memory_order_relaxed), + std::memory_order_relaxed); } DCHECK_LT(gc_type, collector::kGcTypeMax); @@ -2759,7 +2758,7 @@ class VerifyReferenceVisitor : public SingleRootVisitor { : heap_(heap), fail_count_(fail_count), verify_referent_(verify_referent) {} size_t GetFailureCount() const { - return fail_count_->LoadSequentiallyConsistent(); + return fail_count_->load(std::memory_order_seq_cst); } void operator()(ObjPtr<mirror::Class> klass ATTRIBUTE_UNUSED, ObjPtr<mirror::Reference> ref) const @@ -2811,7 +2810,7 @@ class VerifyReferenceVisitor : public SingleRootVisitor { // Verify that the reference is live. return true; } - if (fail_count_->FetchAndAddSequentiallyConsistent(1) == 0) { + if (fail_count_->fetch_add(1, std::memory_order_seq_cst) == 0) { // Print message on only on first failure to prevent spam. LOG(ERROR) << "!!!!!!!!!!!!!!Heap corruption detected!!!!!!!!!!!!!!!!!!!"; } @@ -2924,7 +2923,7 @@ class VerifyObjectVisitor { } size_t GetFailureCount() const { - return fail_count_->LoadSequentiallyConsistent(); + return fail_count_->load(std::memory_order_seq_cst); } private: @@ -3605,7 +3604,7 @@ static bool CanAddHeapTask(Thread* self) REQUIRES(!Locks::runtime_shutdown_lock_ } void Heap::ClearConcurrentGCRequest() { - concurrent_gc_pending_.StoreRelaxed(false); + concurrent_gc_pending_.store(false, std::memory_order_relaxed); } void Heap::RequestConcurrentGC(Thread* self, GcCause cause, bool force_full) { @@ -3732,8 +3731,9 @@ void Heap::RevokeThreadLocalBuffers(Thread* thread) { if (rosalloc_space_ != nullptr) { size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread); if (freed_bytes_revoke > 0U) { - num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke); - CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed()); + num_bytes_freed_revoke_.fetch_add(freed_bytes_revoke, std::memory_order_seq_cst); + CHECK_GE(num_bytes_allocated_.load(std::memory_order_relaxed), + num_bytes_freed_revoke_.load(std::memory_order_relaxed)); } } if (bump_pointer_space_ != nullptr) { @@ -3748,8 +3748,9 @@ void Heap::RevokeRosAllocThreadLocalBuffers(Thread* thread) { if (rosalloc_space_ != nullptr) { size_t freed_bytes_revoke = rosalloc_space_->RevokeThreadLocalBuffers(thread); if (freed_bytes_revoke > 0U) { - num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke); - CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed()); + num_bytes_freed_revoke_.fetch_add(freed_bytes_revoke, std::memory_order_seq_cst); + CHECK_GE(num_bytes_allocated_.load(std::memory_order_relaxed), + num_bytes_freed_revoke_.load(std::memory_order_relaxed)); } } } @@ -3758,8 +3759,9 @@ void Heap::RevokeAllThreadLocalBuffers() { if (rosalloc_space_ != nullptr) { size_t freed_bytes_revoke = rosalloc_space_->RevokeAllThreadLocalBuffers(); if (freed_bytes_revoke > 0U) { - num_bytes_freed_revoke_.FetchAndAddSequentiallyConsistent(freed_bytes_revoke); - CHECK_GE(num_bytes_allocated_.LoadRelaxed(), num_bytes_freed_revoke_.LoadRelaxed()); + num_bytes_freed_revoke_.fetch_add(freed_bytes_revoke, std::memory_order_seq_cst); + CHECK_GE(num_bytes_allocated_.load(std::memory_order_relaxed), + num_bytes_freed_revoke_.load(std::memory_order_relaxed)); } } if (bump_pointer_space_ != nullptr) { @@ -3771,7 +3773,7 @@ void Heap::RevokeAllThreadLocalBuffers() { } bool Heap::IsGCRequestPending() const { - return concurrent_gc_pending_.LoadRelaxed(); + return concurrent_gc_pending_.load(std::memory_order_relaxed); } void Heap::RunFinalization(JNIEnv* env, uint64_t timeout) { @@ -3781,7 +3783,7 @@ void Heap::RunFinalization(JNIEnv* env, uint64_t timeout) { } void Heap::RegisterNativeAllocation(JNIEnv* env, size_t bytes) { - size_t old_value = new_native_bytes_allocated_.FetchAndAddRelaxed(bytes); + size_t old_value = new_native_bytes_allocated_.fetch_add(bytes, std::memory_order_relaxed); if (old_value > NativeAllocationGcWatermark() * HeapGrowthMultiplier() && !IsGCRequestPending()) { @@ -3803,12 +3805,12 @@ void Heap::RegisterNativeFree(JNIEnv*, size_t bytes) { size_t allocated; size_t new_freed_bytes; do { - allocated = new_native_bytes_allocated_.LoadRelaxed(); + allocated = new_native_bytes_allocated_.load(std::memory_order_relaxed); new_freed_bytes = std::min(allocated, bytes); } while (!new_native_bytes_allocated_.CompareAndSetWeakRelaxed(allocated, allocated - new_freed_bytes)); if (new_freed_bytes < bytes) { - old_native_bytes_allocated_.FetchAndSubRelaxed(bytes - new_freed_bytes); + old_native_bytes_allocated_.fetch_sub(bytes - new_freed_bytes, std::memory_order_relaxed); } } @@ -3942,9 +3944,9 @@ void Heap::CheckGcStressMode(Thread* self, ObjPtr<mirror::Object>* obj) { StackHandleScope<1> hs(self); auto h = hs.NewHandleWrapper(obj); CollectGarbage(/* clear_soft_references */ false); - unique_backtrace_count_.FetchAndAddSequentiallyConsistent(1); + unique_backtrace_count_.fetch_add(1, std::memory_order_seq_cst); } else { - seen_backtrace_count_.FetchAndAddSequentiallyConsistent(1); + seen_backtrace_count_.fetch_add(1, std::memory_order_seq_cst); } } } @@ -4020,11 +4022,11 @@ void Heap::RemoveAllocationListener() { } void Heap::SetGcPauseListener(GcPauseListener* l) { - gc_pause_listener_.StoreRelaxed(l); + gc_pause_listener_.store(l, std::memory_order_relaxed); } void Heap::RemoveGcPauseListener() { - gc_pause_listener_.StoreRelaxed(nullptr); + gc_pause_listener_.store(nullptr, std::memory_order_relaxed); } mirror::Object* Heap::AllocWithNewTLAB(Thread* self, diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index 021fe58cf0..9af57d17e5 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -496,7 +496,7 @@ class Heap { // Returns the number of bytes currently allocated. size_t GetBytesAllocated() const { - return num_bytes_allocated_.LoadSequentiallyConsistent(); + return num_bytes_allocated_.load(std::memory_order_seq_cst); } // Returns the number of objects currently allocated. @@ -546,7 +546,7 @@ class Heap { // Returns how much free memory we have until we need to grow the heap to perform an allocation. // Similar to GetFreeMemoryUntilGC. Implements java.lang.Runtime.freeMemory. size_t GetFreeMemory() const { - size_t byte_allocated = num_bytes_allocated_.LoadSequentiallyConsistent(); + size_t byte_allocated = num_bytes_allocated_.load(std::memory_order_seq_cst); size_t total_memory = GetTotalMemory(); // Make sure we don't get a negative number. return total_memory - std::min(total_memory, byte_allocated); @@ -775,11 +775,11 @@ class Heap { // Allocation tracking support // Callers to this function use double-checked locking to ensure safety on allocation_records_ bool IsAllocTrackingEnabled() const { - return alloc_tracking_enabled_.LoadRelaxed(); + return alloc_tracking_enabled_.load(std::memory_order_relaxed); } void SetAllocTrackingEnabled(bool enabled) REQUIRES(Locks::alloc_tracker_lock_) { - alloc_tracking_enabled_.StoreRelaxed(enabled); + alloc_tracking_enabled_.store(enabled, std::memory_order_relaxed); } AllocRecordObjectMap* GetAllocationRecords() const @@ -825,7 +825,7 @@ class Heap { void SetGcPauseListener(GcPauseListener* l); // Get the currently installed gc pause listener, or null. GcPauseListener* GetGcPauseListener() { - return gc_pause_listener_.LoadAcquire(); + return gc_pause_listener_.load(std::memory_order_acquire); } // Remove a gc pause listener. Note: the listener must not be deleted, as for performance // reasons, we assume it stays valid when we read it (so that we don't require a lock). diff --git a/runtime/gc/space/bump_pointer_space-inl.h b/runtime/gc/space/bump_pointer_space-inl.h index 9ebb131ad1..4c585497ec 100644 --- a/runtime/gc/space/bump_pointer_space-inl.h +++ b/runtime/gc/space/bump_pointer_space-inl.h @@ -46,16 +46,18 @@ inline mirror::Object* BumpPointerSpace::AllocThreadUnsafe(Thread* self, size_t size_t* bytes_tl_bulk_allocated) { Locks::mutator_lock_->AssertExclusiveHeld(self); num_bytes = RoundUp(num_bytes, kAlignment); - uint8_t* end = end_.LoadRelaxed(); + uint8_t* end = end_.load(std::memory_order_relaxed); if (end + num_bytes > growth_end_) { return nullptr; } mirror::Object* obj = reinterpret_cast<mirror::Object*>(end); - end_.StoreRelaxed(end + num_bytes); + end_.store(end + num_bytes, std::memory_order_relaxed); *bytes_allocated = num_bytes; // Use the CAS free versions as an optimization. - objects_allocated_.StoreRelaxed(objects_allocated_.LoadRelaxed() + 1); - bytes_allocated_.StoreRelaxed(bytes_allocated_.LoadRelaxed() + num_bytes); + objects_allocated_.store(objects_allocated_.load(std::memory_order_relaxed) + 1, + std::memory_order_relaxed); + bytes_allocated_.store(bytes_allocated_.load(std::memory_order_relaxed) + num_bytes, + std::memory_order_relaxed); if (UNLIKELY(usable_size != nullptr)) { *usable_size = num_bytes; } @@ -68,7 +70,7 @@ inline mirror::Object* BumpPointerSpace::AllocNonvirtualWithoutAccounting(size_t uint8_t* old_end; uint8_t* new_end; do { - old_end = end_.LoadRelaxed(); + old_end = end_.load(std::memory_order_relaxed); new_end = old_end + num_bytes; // If there is no more room in the region, we are out of memory. if (UNLIKELY(new_end > growth_end_)) { @@ -81,8 +83,8 @@ inline mirror::Object* BumpPointerSpace::AllocNonvirtualWithoutAccounting(size_t inline mirror::Object* BumpPointerSpace::AllocNonvirtual(size_t num_bytes) { mirror::Object* ret = AllocNonvirtualWithoutAccounting(num_bytes); if (ret != nullptr) { - objects_allocated_.FetchAndAddSequentiallyConsistent(1); - bytes_allocated_.FetchAndAddSequentiallyConsistent(num_bytes); + objects_allocated_.fetch_add(1, std::memory_order_seq_cst); + bytes_allocated_.fetch_add(num_bytes, std::memory_order_seq_cst); } return ret; } diff --git a/runtime/gc/space/bump_pointer_space.cc b/runtime/gc/space/bump_pointer_space.cc index ce0e0f3630..e95da01d8c 100644 --- a/runtime/gc/space/bump_pointer_space.cc +++ b/runtime/gc/space/bump_pointer_space.cc @@ -72,8 +72,8 @@ void BumpPointerSpace::Clear() { // Reset the end of the space back to the beginning, we move the end forward as we allocate // objects. SetEnd(Begin()); - objects_allocated_.StoreRelaxed(0); - bytes_allocated_.StoreRelaxed(0); + objects_allocated_.store(0, std::memory_order_relaxed); + bytes_allocated_.store(0, std::memory_order_relaxed); growth_end_ = Limit(); { MutexLock mu(Thread::Current(), block_lock_); @@ -160,7 +160,7 @@ accounting::ContinuousSpaceBitmap::SweepCallback* BumpPointerSpace::GetSweepCall uint64_t BumpPointerSpace::GetBytesAllocated() { // Start out pre-determined amount (blocks which are not being allocated into). - uint64_t total = static_cast<uint64_t>(bytes_allocated_.LoadRelaxed()); + uint64_t total = static_cast<uint64_t>(bytes_allocated_.load(std::memory_order_relaxed)); Thread* self = Thread::Current(); MutexLock mu(self, *Locks::runtime_shutdown_lock_); MutexLock mu2(self, *Locks::thread_list_lock_); @@ -178,7 +178,7 @@ uint64_t BumpPointerSpace::GetBytesAllocated() { uint64_t BumpPointerSpace::GetObjectsAllocated() { // Start out pre-determined amount (blocks which are not being allocated into). - uint64_t total = static_cast<uint64_t>(objects_allocated_.LoadRelaxed()); + uint64_t total = static_cast<uint64_t>(objects_allocated_.load(std::memory_order_relaxed)); Thread* self = Thread::Current(); MutexLock mu(self, *Locks::runtime_shutdown_lock_); MutexLock mu2(self, *Locks::thread_list_lock_); @@ -195,8 +195,8 @@ uint64_t BumpPointerSpace::GetObjectsAllocated() { } void BumpPointerSpace::RevokeThreadLocalBuffersLocked(Thread* thread) { - objects_allocated_.FetchAndAddSequentiallyConsistent(thread->GetThreadLocalObjectsAllocated()); - bytes_allocated_.FetchAndAddSequentiallyConsistent(thread->GetThreadLocalBytesAllocated()); + objects_allocated_.fetch_add(thread->GetThreadLocalObjectsAllocated(), std::memory_order_seq_cst); + bytes_allocated_.fetch_add(thread->GetThreadLocalBytesAllocated(), std::memory_order_seq_cst); thread->SetTlab(nullptr, nullptr, nullptr); } diff --git a/runtime/gc/space/bump_pointer_space.h b/runtime/gc/space/bump_pointer_space.h index 7b43362c2d..5ba13ca3ff 100644 --- a/runtime/gc/space/bump_pointer_space.h +++ b/runtime/gc/space/bump_pointer_space.h @@ -155,8 +155,8 @@ class BumpPointerSpace FINAL : public ContinuousMemMapAllocSpace { // Record objects / bytes freed. void RecordFree(int32_t objects, int32_t bytes) { - objects_allocated_.FetchAndSubSequentiallyConsistent(objects); - bytes_allocated_.FetchAndSubSequentiallyConsistent(bytes); + objects_allocated_.fetch_sub(objects, std::memory_order_seq_cst); + bytes_allocated_.fetch_sub(bytes, std::memory_order_seq_cst); } void LogFragmentationAllocFailure(std::ostream& os, size_t failed_alloc_bytes) OVERRIDE diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc index c100bc0c75..e2154b8e4d 100644 --- a/runtime/gc/space/image_space.cc +++ b/runtime/gc/space/image_space.cc @@ -672,7 +672,7 @@ class ImageSpaceLoader { // Loaded the map, use the image header from the file now in case we patch it with // RelocateInPlace. image_header = reinterpret_cast<ImageHeader*>(map->Begin()); - const uint32_t bitmap_index = ImageSpace::bitmap_index_.FetchAndAddSequentiallyConsistent(1); + const uint32_t bitmap_index = ImageSpace::bitmap_index_.fetch_add(1, std::memory_order_seq_cst); std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u", image_filename, bitmap_index)); diff --git a/runtime/gc/space/region_space-inl.h b/runtime/gc/space/region_space-inl.h index 410931cbe5..7072a7e4cc 100644 --- a/runtime/gc/space/region_space-inl.h +++ b/runtime/gc/space/region_space-inl.h @@ -100,13 +100,13 @@ inline mirror::Object* RegionSpace::Region::Alloc(size_t num_bytes, uint8_t* old_top; uint8_t* new_top; do { - old_top = top_.LoadRelaxed(); + old_top = top_.load(std::memory_order_relaxed); new_top = old_top + num_bytes; if (UNLIKELY(new_top > end_)) { return nullptr; } } while (!top_.CompareAndSetWeakRelaxed(old_top, new_top)); - objects_allocated_.FetchAndAddRelaxed(1); + objects_allocated_.fetch_add(1, std::memory_order_relaxed); DCHECK_LE(Top(), end_); DCHECK_LT(old_top, end_); DCHECK_LE(new_top, end_); @@ -365,11 +365,11 @@ inline size_t RegionSpace::Region::BytesAllocated() const { inline size_t RegionSpace::Region::ObjectsAllocated() const { if (IsLarge()) { DCHECK_LT(begin_ + kRegionSize, Top()); - DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U); + DCHECK_EQ(objects_allocated_.load(std::memory_order_relaxed), 0U); return 1; } else if (IsLargeTail()) { DCHECK_EQ(begin_, Top()); - DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U); + DCHECK_EQ(objects_allocated_.load(std::memory_order_relaxed), 0U); return 0; } else { DCHECK(IsAllocated()) << "state=" << state_; diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc index 8d94c86701..5ea434a318 100644 --- a/runtime/gc/space/region_space.cc +++ b/runtime/gc/space/region_space.cc @@ -489,7 +489,7 @@ void RegionSpace::DumpNonFreeRegions(std::ostream& os) { void RegionSpace::RecordAlloc(mirror::Object* ref) { CHECK(ref != nullptr); Region* r = RefToRegion(ref); - r->objects_allocated_.FetchAndAddSequentiallyConsistent(1); + r->objects_allocated_.fetch_add(1, std::memory_order_seq_cst); } bool RegionSpace::AllocNewTlab(Thread* self, size_t min_bytes) { @@ -589,10 +589,10 @@ size_t RegionSpace::AllocationSizeNonvirtual(mirror::Object* obj, size_t* usable } void RegionSpace::Region::Clear(bool zero_and_release_pages) { - top_.StoreRelaxed(begin_); + top_.store(begin_, std::memory_order_relaxed); state_ = RegionState::kRegionStateFree; type_ = RegionType::kRegionTypeNone; - objects_allocated_.StoreRelaxed(0); + objects_allocated_.store(0, std::memory_order_relaxed); alloc_time_ = 0; live_bytes_ = static_cast<size_t>(-1); if (zero_and_release_pages) { diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h index d63257d928..6a1371af10 100644 --- a/runtime/gc/space/region_space.h +++ b/runtime/gc/space/region_space.h @@ -300,11 +300,11 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { void Init(size_t idx, uint8_t* begin, uint8_t* end) { idx_ = idx; begin_ = begin; - top_.StoreRelaxed(begin); + top_.store(begin, std::memory_order_relaxed); end_ = end; state_ = RegionState::kRegionStateFree; type_ = RegionType::kRegionTypeNone; - objects_allocated_.StoreRelaxed(0); + objects_allocated_.store(0, std::memory_order_relaxed); alloc_time_ = 0; live_bytes_ = static_cast<size_t>(-1); is_newly_allocated_ = false; @@ -334,7 +334,7 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { if (is_free) { DCHECK(IsInNoSpace()); DCHECK_EQ(begin_, Top()); - DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U); + DCHECK_EQ(objects_allocated_.load(std::memory_order_relaxed), 0U); } return is_free; } @@ -461,11 +461,11 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { } ALWAYS_INLINE uint8_t* Top() const { - return top_.LoadRelaxed(); + return top_.load(std::memory_order_relaxed); } void SetTop(uint8_t* new_top) { - top_.StoreRelaxed(new_top); + top_.store(new_top, std::memory_order_relaxed); } uint8_t* End() const { @@ -480,10 +480,10 @@ class RegionSpace FINAL : public ContinuousMemMapAllocSpace { void RecordThreadLocalAllocations(size_t num_objects, size_t num_bytes) { DCHECK(IsAllocated()); - DCHECK_EQ(objects_allocated_.LoadRelaxed(), 0U); + DCHECK_EQ(objects_allocated_.load(std::memory_order_relaxed), 0U); DCHECK_EQ(Top(), end_); - objects_allocated_.StoreRelaxed(num_objects); - top_.StoreRelaxed(begin_ + num_bytes); + objects_allocated_.store(num_objects, std::memory_order_relaxed); + top_.store(begin_ + num_bytes, std::memory_order_relaxed); DCHECK_LE(Top(), end_); } diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h index 7af19fae61..bc3ab48cf4 100644 --- a/runtime/gc/space/space.h +++ b/runtime/gc/space/space.h @@ -272,7 +272,7 @@ class ContinuousSpace : public Space { // Current address at which the space ends, which may vary as the space is filled. uint8_t* End() const { - return end_.LoadRelaxed(); + return end_.load(std::memory_order_relaxed); } // The end of the address range covered by the space. @@ -283,7 +283,7 @@ class ContinuousSpace : public Space { // Change the end of the space. Be careful with use since changing the end of a space to an // invalid value may break the GC. void SetEnd(uint8_t* end) { - end_.StoreRelaxed(end); + end_.store(end, std::memory_order_relaxed); } void SetLimit(uint8_t* limit) { diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc index cde155fb22..8c73ef9116 100644 --- a/runtime/gc/space/zygote_space.cc +++ b/runtime/gc/space/zygote_space.cc @@ -122,7 +122,7 @@ void ZygoteSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* ar // Need to mark the card since this will update the mod-union table next GC cycle. card_table->MarkCard(ptrs[i]); } - zygote_space->objects_allocated_.FetchAndSubSequentiallyConsistent(num_ptrs); + zygote_space->objects_allocated_.fetch_sub(num_ptrs, std::memory_order_seq_cst); } } // namespace space diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h index 08231017e7..10c1398001 100644 --- a/runtime/gc/space/zygote_space.h +++ b/runtime/gc/space/zygote_space.h @@ -67,7 +67,7 @@ class ZygoteSpace FINAL : public ContinuousMemMapAllocSpace { } uint64_t GetObjectsAllocated() { - return objects_allocated_.LoadSequentiallyConsistent(); + return objects_allocated_.load(std::memory_order_seq_cst); } void Clear() OVERRIDE; diff --git a/runtime/gc/task_processor_test.cc b/runtime/gc/task_processor_test.cc index 77b40e4593..38581ce807 100644 --- a/runtime/gc/task_processor_test.cc +++ b/runtime/gc/task_processor_test.cc @@ -37,7 +37,7 @@ class RecursiveTask : public HeapTask { if (max_recursion_ > 0) { task_processor_->AddTask(self, new RecursiveTask(task_processor_, counter_, max_recursion_ - 1)); - counter_->FetchAndAddSequentiallyConsistent(1U); + counter_->fetch_add(1U, std::memory_order_seq_cst); } } @@ -54,7 +54,7 @@ class WorkUntilDoneTask : public SelfDeletingTask { } virtual void Run(Thread* self) OVERRIDE { task_processor_->RunAllTasks(self); - done_running_->StoreSequentiallyConsistent(true); + done_running_->store(true, std::memory_order_seq_cst); } private: @@ -76,7 +76,7 @@ TEST_F(TaskProcessorTest, Interrupt) { thread_pool.StartWorkers(self); ASSERT_FALSE(done_running); // Wait until all the tasks are done, but since we didn't interrupt, done_running should be 0. - while (counter.LoadSequentiallyConsistent() != kRecursion) { + while (counter.load(std::memory_order_seq_cst) != kRecursion) { usleep(10); } ASSERT_FALSE(done_running); @@ -84,11 +84,11 @@ TEST_F(TaskProcessorTest, Interrupt) { thread_pool.Wait(self, true, false); // After the interrupt and wait, the WorkUntilInterruptedTasktask should have terminated and // set done_running_ to true. - ASSERT_TRUE(done_running.LoadSequentiallyConsistent()); + ASSERT_TRUE(done_running.load(std::memory_order_seq_cst)); // Test that we finish remaining tasks before returning from RunTasksUntilInterrupted. - counter.StoreSequentiallyConsistent(0); - done_running.StoreSequentiallyConsistent(false); + counter.store(0, std::memory_order_seq_cst); + done_running.store(false, std::memory_order_seq_cst); // Self interrupt before any of the other tasks run, but since we added them we should keep on // working until all the tasks are completed. task_processor.Stop(self); @@ -96,8 +96,8 @@ TEST_F(TaskProcessorTest, Interrupt) { thread_pool.AddTask(self, new WorkUntilDoneTask(&task_processor, &done_running)); thread_pool.StartWorkers(self); thread_pool.Wait(self, true, false); - ASSERT_TRUE(done_running.LoadSequentiallyConsistent()); - ASSERT_EQ(counter.LoadSequentiallyConsistent(), kRecursion); + ASSERT_TRUE(done_running.load(std::memory_order_seq_cst)); + ASSERT_EQ(counter.load(std::memory_order_seq_cst), kRecursion); } class TestOrderTask : public HeapTask { @@ -137,10 +137,10 @@ TEST_F(TaskProcessorTest, Ordering) { Atomic<bool> done_running(false); // Add a task which will wait until interrupted to the thread pool. thread_pool.AddTask(self, new WorkUntilDoneTask(&task_processor, &done_running)); - ASSERT_FALSE(done_running.LoadSequentiallyConsistent()); + ASSERT_FALSE(done_running.load(std::memory_order_seq_cst)); thread_pool.StartWorkers(self); thread_pool.Wait(self, true, false); - ASSERT_TRUE(done_running.LoadSequentiallyConsistent()); + ASSERT_TRUE(done_running.load(std::memory_order_seq_cst)); ASSERT_EQ(counter, kNumTasks); } diff --git a/runtime/hidden_api.h b/runtime/hidden_api.h index f2ea2fdaaa..5c6b4b56bc 100644 --- a/runtime/hidden_api.h +++ b/runtime/hidden_api.h @@ -27,6 +27,23 @@ namespace art { namespace hiddenapi { +// Hidden API enforcement policy +// This must be kept in sync with ApplicationInfo.ApiEnforcementPolicy in +// frameworks/base/core/java/android/content/pm/ApplicationInfo.java +enum class EnforcementPolicy { + kNoChecks = 0, + kAllLists = 1, // ban anything but whitelist + kDarkGreyAndBlackList = 2, // ban dark grey & blacklist + kBlacklistOnly = 3, // ban blacklist violations only + kMax = kBlacklistOnly, +}; + +inline EnforcementPolicy EnforcementPolicyFromInt(int api_policy_int) { + DCHECK_GE(api_policy_int, 0); + DCHECK_LE(api_policy_int, static_cast<int>(EnforcementPolicy::kMax)); + return static_cast<EnforcementPolicy>(api_policy_int); +} + enum Action { kAllow, kAllowButWarn, @@ -38,7 +55,6 @@ enum AccessMethod { kReflection, kJNI, kLinking, - kOverride, }; inline std::ostream& operator<<(std::ostream& os, AccessMethod value) { @@ -52,23 +68,42 @@ inline std::ostream& operator<<(std::ostream& os, AccessMethod value) { case kLinking: os << "linking"; break; - case kOverride: - os << "override"; - break; } return os; } +static constexpr bool EnumsEqual(EnforcementPolicy policy, HiddenApiAccessFlags::ApiList apiList) { + return static_cast<int>(policy) == static_cast<int>(apiList); +} + inline Action GetMemberAction(uint32_t access_flags) { - switch (HiddenApiAccessFlags::DecodeFromRuntime(access_flags)) { - case HiddenApiAccessFlags::kWhitelist: - return kAllow; - case HiddenApiAccessFlags::kLightGreylist: - return kAllowButWarn; - case HiddenApiAccessFlags::kDarkGreylist: - return kAllowButWarnAndToast; - case HiddenApiAccessFlags::kBlacklist: - return kDeny; + EnforcementPolicy policy = Runtime::Current()->GetHiddenApiEnforcementPolicy(); + if (policy == EnforcementPolicy::kNoChecks) { + // Exit early. Nothing to enforce. + return kAllow; + } + + HiddenApiAccessFlags::ApiList api_list = HiddenApiAccessFlags::DecodeFromRuntime(access_flags); + if (api_list == HiddenApiAccessFlags::kWhitelist) { + return kAllow; + } + // The logic below relies on equality of values in the enums EnforcementPolicy and + // HiddenApiAccessFlags::ApiList, and their ordering. Assert that this is as expected. + static_assert( + EnumsEqual(EnforcementPolicy::kAllLists, HiddenApiAccessFlags::kLightGreylist) && + EnumsEqual(EnforcementPolicy::kDarkGreyAndBlackList, HiddenApiAccessFlags::kDarkGreylist) && + EnumsEqual(EnforcementPolicy::kBlacklistOnly, HiddenApiAccessFlags::kBlacklist), + "Mismatch between EnforcementPolicy and ApiList enums"); + static_assert( + EnforcementPolicy::kAllLists < EnforcementPolicy::kDarkGreyAndBlackList && + EnforcementPolicy::kDarkGreyAndBlackList < EnforcementPolicy::kBlacklistOnly, + "EnforcementPolicy values ordering not correct"); + if (static_cast<int>(policy) > static_cast<int>(api_list)) { + return api_list == HiddenApiAccessFlags::kDarkGreylist + ? kAllowButWarnAndToast + : kAllowButWarn; + } else { + return kDeny; } } @@ -107,12 +142,6 @@ inline bool ShouldBlockAccessToMember(T* member, AccessMethod access_method) REQUIRES_SHARED(Locks::mutator_lock_) { DCHECK(member != nullptr); - Runtime* runtime = Runtime::Current(); - - if (!runtime->AreHiddenApiChecksEnabled()) { - // Exit early. Nothing to enforce. - return false; - } Action action = GetMemberAction(member->GetAccessFlags()); if (action == kAllow) { @@ -133,14 +162,16 @@ inline bool ShouldBlockAccessToMember(T* member, // We do this regardless of whether we block the access or not. WarnAboutMemberAccess(member, access_method); - // Block access if on blacklist. if (action == kDeny) { + // Block access return true; } // Allow access to this member but print a warning. DCHECK(action == kAllowButWarn || action == kAllowButWarnAndToast); + Runtime* runtime = Runtime::Current(); + // Depending on a runtime flag, we might move the member into whitelist and // skip the warning the next time the member is accessed. if (runtime->ShouldDedupeHiddenApiWarnings()) { @@ -150,7 +181,7 @@ inline bool ShouldBlockAccessToMember(T* member, // If this action requires a UI warning, set the appropriate flag. if (action == kAllowButWarnAndToast || runtime->ShouldAlwaysSetHiddenApiWarningFlag()) { - Runtime::Current()->SetPendingHiddenApiWarning(true); + runtime->SetPendingHiddenApiWarning(true); } return false; diff --git a/runtime/image.cc b/runtime/image.cc index 56fee9d510..5af3e5451b 100644 --- a/runtime/image.cc +++ b/runtime/image.cc @@ -26,7 +26,7 @@ namespace art { const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' }; -const uint8_t ImageHeader::kImageVersion[] = { '0', '5', '6', '\0' }; // No image tables in .bss. +const uint8_t ImageHeader::kImageVersion[] = { '0', '5', '7', '\0' }; // R^2 Bitstring type check. ImageHeader::ImageHeader(uint32_t image_begin, uint32_t image_size, diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc index da4c4b2fa4..8fe68bd318 100644 --- a/runtime/java_vm_ext.cc +++ b/runtime/java_vm_ext.cc @@ -736,14 +736,14 @@ void JavaVMExt::DisallowNewWeakGlobals() { // mutator lock exclusively held so that we don't have any threads in the middle of // DecodeWeakGlobal. Locks::mutator_lock_->AssertExclusiveHeld(self); - allow_accessing_weak_globals_.StoreSequentiallyConsistent(false); + allow_accessing_weak_globals_.store(false, std::memory_order_seq_cst); } void JavaVMExt::AllowNewWeakGlobals() { CHECK(!kUseReadBarrier); Thread* self = Thread::Current(); MutexLock mu(self, *Locks::jni_weak_globals_lock_); - allow_accessing_weak_globals_.StoreSequentiallyConsistent(true); + allow_accessing_weak_globals_.store(true, std::memory_order_seq_cst); weak_globals_add_condition_.Broadcast(self); } @@ -770,7 +770,7 @@ inline bool JavaVMExt::MayAccessWeakGlobalsUnlocked(Thread* self) const { DCHECK(self != nullptr); return kUseReadBarrier ? self->GetWeakRefAccessEnabled() : - allow_accessing_weak_globals_.LoadSequentiallyConsistent(); + allow_accessing_weak_globals_.load(std::memory_order_seq_cst); } ObjPtr<mirror::Object> JavaVMExt::DecodeWeakGlobal(Thread* self, IndirectRef ref) { @@ -809,7 +809,7 @@ ObjPtr<mirror::Object> JavaVMExt::DecodeWeakGlobalDuringShutdown(Thread* self, I } // self can be null during a runtime shutdown. ~Runtime()->~ClassLinker()->DecodeWeakGlobal(). if (!kUseReadBarrier) { - DCHECK(allow_accessing_weak_globals_.LoadSequentiallyConsistent()); + DCHECK(allow_accessing_weak_globals_.load(std::memory_order_seq_cst)); } return weak_globals_.SynchronizedGet(ref); } diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc index 291a983e75..1e61ba0f2d 100644 --- a/runtime/jdwp/jdwp_handler.cc +++ b/runtime/jdwp/jdwp_handler.cc @@ -1625,7 +1625,7 @@ size_t JdwpState::ProcessRequest(Request* request, ExpandBuf* pReply, bool* skip * so waitForDebugger() doesn't return if we stall for a bit here. */ Dbg::GoActive(); - last_activity_time_ms_.StoreSequentiallyConsistent(0); + last_activity_time_ms_.store(0, std::memory_order_seq_cst); } /* @@ -1703,7 +1703,7 @@ size_t JdwpState::ProcessRequest(Request* request, ExpandBuf* pReply, bool* skip * the initial setup. Only update if this is a non-DDMS packet. */ if (request->GetCommandSet() != kJDWPDdmCmdSet) { - last_activity_time_ms_.StoreSequentiallyConsistent(MilliTime()); + last_activity_time_ms_.store(MilliTime(), std::memory_order_seq_cst); } return replyLength; diff --git a/runtime/jdwp/jdwp_main.cc b/runtime/jdwp/jdwp_main.cc index 557b032154..447e3bf45b 100644 --- a/runtime/jdwp/jdwp_main.cc +++ b/runtime/jdwp/jdwp_main.cc @@ -729,7 +729,7 @@ int64_t JdwpState::LastDebuggerActivity() { return -1; } - int64_t last = last_activity_time_ms_.LoadSequentiallyConsistent(); + int64_t last = last_activity_time_ms_.load(std::memory_order_seq_cst); /* initializing or in the middle of something? */ if (last == 0) { diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc index 23cf071d56..813430f0bb 100644 --- a/runtime/jit/jit.cc +++ b/runtime/jit/jit.cc @@ -718,10 +718,11 @@ void Jit::MethodEntered(Thread* thread, ArtMethod* method) { Runtime* runtime = Runtime::Current(); if (UNLIKELY(runtime->UseJitCompilation() && runtime->GetJit()->JitAtFirstUse())) { ArtMethod* np_method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize); - DCHECK(!np_method->IsNative()); if (np_method->IsCompilable()) { - // The compiler requires a ProfilingInfo object. - ProfilingInfo::Create(thread, np_method, /* retry_allocation */ true); + if (!np_method->IsNative()) { + // The compiler requires a ProfilingInfo object for non-native methods. + ProfilingInfo::Create(thread, np_method, /* retry_allocation */ true); + } JitCompileTask compile_task(method, JitCompileTask::kCompile); compile_task.Run(thread); } diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index b2d58da80e..1c4b93eb48 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -623,7 +623,7 @@ void JitCodeCache::RemoveMethodsIn(Thread* self, const LinearAlloc& alloc) { bool JitCodeCache::IsWeakAccessEnabled(Thread* self) const { return kUseReadBarrier ? self->GetWeakRefAccessEnabled() - : is_weak_access_enabled_.LoadSequentiallyConsistent(); + : is_weak_access_enabled_.load(std::memory_order_seq_cst); } void JitCodeCache::WaitUntilInlineCacheAccessible(Thread* self) { @@ -645,13 +645,13 @@ void JitCodeCache::BroadcastForInlineCacheAccess() { void JitCodeCache::AllowInlineCacheAccess() { DCHECK(!kUseReadBarrier); - is_weak_access_enabled_.StoreSequentiallyConsistent(true); + is_weak_access_enabled_.store(true, std::memory_order_seq_cst); BroadcastForInlineCacheAccess(); } void JitCodeCache::DisallowInlineCacheAccess() { DCHECK(!kUseReadBarrier); - is_weak_access_enabled_.StoreSequentiallyConsistent(false); + is_weak_access_enabled_.store(false, std::memory_order_seq_cst); } void JitCodeCache::CopyInlineCacheInto(const InlineCache& ic, @@ -820,7 +820,7 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, // code. GetLiveBitmap()->AtomicTestAndSet(FromCodeToAllocation(code_ptr)); } - last_update_time_ns_.StoreRelease(NanoTime()); + last_update_time_ns_.store(NanoTime(), std::memory_order_release); VLOG(jit) << "JIT added (osr=" << std::boolalpha << osr << std::noboolalpha << ") " << ArtMethod::PrettyMethod(method) << "@" << method @@ -1647,7 +1647,7 @@ void JitCodeCache::GetProfiledMethods(const std::set<std::string>& dex_base_loca } uint64_t JitCodeCache::GetLastUpdateTimeNs() const { - return last_update_time_ns_.LoadAcquire(); + return last_update_time_ns_.load(std::memory_order_acquire); } bool JitCodeCache::IsOsrCompiled(ArtMethod* method) { diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h index 3ffedcac4b..7a4876c412 100644 --- a/runtime/mirror/dex_cache-inl.h +++ b/runtime/mirror/dex_cache-inl.h @@ -154,7 +154,7 @@ inline CallSite* DexCache::GetResolvedCallSite(uint32_t call_site_idx) { GcRoot<mirror::CallSite>& target = GetResolvedCallSites()[call_site_idx]; Atomic<GcRoot<mirror::CallSite>>& ref = reinterpret_cast<Atomic<GcRoot<mirror::CallSite>>&>(target); - return ref.LoadSequentiallyConsistent().Read(); + return ref.load(std::memory_order_seq_cst).Read(); } inline CallSite* DexCache::SetResolvedCallSite(uint32_t call_site_idx, CallSite* call_site) { diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h index 55dd51427c..c7561f4278 100644 --- a/runtime/mirror/object-inl.h +++ b/runtime/mirror/object-inl.h @@ -673,7 +673,7 @@ template<typename kSize> inline kSize Object::GetFieldAcquire(MemberOffset field_offset) { const uint8_t* raw_addr = reinterpret_cast<const uint8_t*>(this) + field_offset.Int32Value(); const kSize* addr = reinterpret_cast<const kSize*>(raw_addr); - return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadAcquire(); + return reinterpret_cast<const Atomic<kSize>*>(addr)->load(std::memory_order_acquire); } template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags> @@ -956,7 +956,7 @@ inline ObjPtr<Object> Object::CompareAndExchangeFieldObject(MemberOffset field_o uint32_t new_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(new_value)); uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value(); Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr); - bool success = atomic_addr->CompareAndExchangeStrongSequentiallyConsistent(&old_ref, new_ref); + bool success = atomic_addr->compare_exchange_strong(old_ref, new_ref, std::memory_order_seq_cst); ObjPtr<Object> witness_value(PtrCompression<kPoisonHeapReferences, Object>::Decompress(old_ref)); if (kIsDebugBuild) { // Ensure caller has done read barrier on the reference field so it's in the to-space. @@ -986,7 +986,7 @@ inline ObjPtr<Object> Object::ExchangeFieldObject(MemberOffset field_offset, uint32_t new_ref(PtrCompression<kPoisonHeapReferences, Object>::Compress(new_value)); uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value(); Atomic<uint32_t>* atomic_addr = reinterpret_cast<Atomic<uint32_t>*>(raw_addr); - uint32_t old_ref = atomic_addr->ExchangeSequentiallyConsistent(new_ref); + uint32_t old_ref = atomic_addr->exchange(new_ref, std::memory_order_seq_cst); ObjPtr<Object> old_value(PtrCompression<kPoisonHeapReferences, Object>::Decompress(old_ref)); if (kIsDebugBuild) { // Ensure caller has done read barrier on the reference field so it's in the to-space. diff --git a/runtime/mirror/object.cc b/runtime/mirror/object.cc index f274cfc2fa..0e03e3741c 100644 --- a/runtime/mirror/object.cc +++ b/runtime/mirror/object.cc @@ -87,16 +87,18 @@ Object* Object::CopyObject(ObjPtr<mirror::Object> dest, DCHECK_ALIGNED(dst_bytes, sizeof(uintptr_t)); // Use word sized copies to begin. while (num_bytes >= sizeof(uintptr_t)) { - reinterpret_cast<Atomic<uintptr_t>*>(dst_bytes)->StoreRelaxed( - reinterpret_cast<Atomic<uintptr_t>*>(src_bytes)->LoadRelaxed()); + reinterpret_cast<Atomic<uintptr_t>*>(dst_bytes)->store( + reinterpret_cast<Atomic<uintptr_t>*>(src_bytes)->load(std::memory_order_relaxed), + std::memory_order_relaxed); src_bytes += sizeof(uintptr_t); dst_bytes += sizeof(uintptr_t); num_bytes -= sizeof(uintptr_t); } // Copy possible 32 bit word. if (sizeof(uintptr_t) != sizeof(uint32_t) && num_bytes >= sizeof(uint32_t)) { - reinterpret_cast<Atomic<uint32_t>*>(dst_bytes)->StoreRelaxed( - reinterpret_cast<Atomic<uint32_t>*>(src_bytes)->LoadRelaxed()); + reinterpret_cast<Atomic<uint32_t>*>(dst_bytes)->store( + reinterpret_cast<Atomic<uint32_t>*>(src_bytes)->load(std::memory_order_relaxed), + std::memory_order_relaxed); src_bytes += sizeof(uint32_t); dst_bytes += sizeof(uint32_t); num_bytes -= sizeof(uint32_t); @@ -104,8 +106,9 @@ Object* Object::CopyObject(ObjPtr<mirror::Object> dest, // Copy remaining bytes, avoid going past the end of num_bytes since there may be a redzone // there. while (num_bytes > 0) { - reinterpret_cast<Atomic<uint8_t>*>(dst_bytes)->StoreRelaxed( - reinterpret_cast<Atomic<uint8_t>*>(src_bytes)->LoadRelaxed()); + reinterpret_cast<Atomic<uint8_t>*>(dst_bytes)->store( + reinterpret_cast<Atomic<uint8_t>*>(src_bytes)->load(std::memory_order_relaxed), + std::memory_order_relaxed); src_bytes += sizeof(uint8_t); dst_bytes += sizeof(uint8_t); num_bytes -= sizeof(uint8_t); @@ -173,7 +176,7 @@ Object* Object::Clone(Thread* self) { uint32_t Object::GenerateIdentityHashCode() { uint32_t expected_value, new_value; do { - expected_value = hash_code_seed.LoadRelaxed(); + expected_value = hash_code_seed.load(std::memory_order_relaxed); new_value = expected_value * 1103515245 + 12345; } while (!hash_code_seed.CompareAndSetWeakRelaxed(expected_value, new_value) || (expected_value & LockWord::kHashMask) == 0); @@ -181,7 +184,7 @@ uint32_t Object::GenerateIdentityHashCode() { } void Object::SetHashCodeSeed(uint32_t new_seed) { - hash_code_seed.StoreRelaxed(new_seed); + hash_code_seed.store(new_seed, std::memory_order_relaxed); } int32_t Object::IdentityHashCode() { diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h index 95f82cb147..d00c90bcc0 100644 --- a/runtime/mirror/object.h +++ b/runtime/mirror/object.h @@ -730,7 +730,7 @@ class MANAGED LOCKABLE Object { uint8_t* raw_addr = reinterpret_cast<uint8_t*>(this) + field_offset.Int32Value(); kSize* addr = reinterpret_cast<kSize*>(raw_addr); if (kIsVolatile) { - reinterpret_cast<Atomic<kSize>*>(addr)->StoreSequentiallyConsistent(new_value); + reinterpret_cast<Atomic<kSize>*>(addr)->store(new_value, std::memory_order_seq_cst); } else { reinterpret_cast<Atomic<kSize>*>(addr)->StoreJavaData(new_value); } @@ -742,7 +742,7 @@ class MANAGED LOCKABLE Object { const uint8_t* raw_addr = reinterpret_cast<const uint8_t*>(this) + field_offset.Int32Value(); const kSize* addr = reinterpret_cast<const kSize*>(raw_addr); if (kIsVolatile) { - return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadSequentiallyConsistent(); + return reinterpret_cast<const Atomic<kSize>*>(addr)->load(std::memory_order_seq_cst); } else { return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadJavaData(); } diff --git a/runtime/mirror/object_reference.h b/runtime/mirror/object_reference.h index cf1f85d236..356fef0d26 100644 --- a/runtime/mirror/object_reference.h +++ b/runtime/mirror/object_reference.h @@ -110,13 +110,13 @@ class MANAGED HeapReference { template <bool kIsVolatile = false> MirrorType* AsMirrorPtr() const REQUIRES_SHARED(Locks::mutator_lock_) { return Compression::Decompress( - kIsVolatile ? reference_.LoadSequentiallyConsistent() : reference_.LoadJavaData()); + kIsVolatile ? reference_.load(std::memory_order_seq_cst) : reference_.LoadJavaData()); } template <bool kIsVolatile = false> void Assign(MirrorType* other) REQUIRES_SHARED(Locks::mutator_lock_) { if (kIsVolatile) { - reference_.StoreSequentiallyConsistent(Compression::Compress(other)); + reference_.store(Compression::Compress(other), std::memory_order_seq_cst); } else { reference_.StoreJavaData(Compression::Compress(other)); } diff --git a/runtime/monitor.cc b/runtime/monitor.cc index 2a938da15b..e110763300 100644 --- a/runtime/monitor.cc +++ b/runtime/monitor.cc @@ -140,7 +140,7 @@ int32_t Monitor::GetHashCode() { } } DCHECK(HasHashCode()); - return hash_code_.LoadRelaxed(); + return hash_code_.load(std::memory_order_relaxed); } bool Monitor::Install(Thread* self) { @@ -155,7 +155,7 @@ bool Monitor::Install(Thread* self) { break; } case LockWord::kHashCode: { - CHECK_EQ(hash_code_.LoadRelaxed(), static_cast<int32_t>(lw.GetHashCode())); + CHECK_EQ(hash_code_.load(std::memory_order_relaxed), static_cast<int32_t>(lw.GetHashCode())); break; } case LockWord::kFatLocked: { diff --git a/runtime/monitor.h b/runtime/monitor.h index 384ebbedaa..6b7604ec8a 100644 --- a/runtime/monitor.h +++ b/runtime/monitor.h @@ -130,7 +130,7 @@ class Monitor { bool IsLocked() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!monitor_lock_); bool HasHashCode() const { - return hash_code_.LoadRelaxed() != 0; + return hash_code_.load(std::memory_order_relaxed) != 0; } MonitorId GetMonitorId() const { diff --git a/runtime/native/dalvik_system_VMDebug.cc b/runtime/native/dalvik_system_VMDebug.cc index fc9426650e..3692a308d8 100644 --- a/runtime/native/dalvik_system_VMDebug.cc +++ b/runtime/native/dalvik_system_VMDebug.cc @@ -89,17 +89,27 @@ static void VMDebug_resetAllocCount(JNIEnv*, jclass, jint kinds) { static void VMDebug_startMethodTracingDdmsImpl(JNIEnv*, jclass, jint bufferSize, jint flags, jboolean samplingEnabled, jint intervalUs) { - Trace::Start("[DDMS]", -1, bufferSize, flags, Trace::TraceOutputMode::kDDMS, - samplingEnabled ? Trace::TraceMode::kSampling : Trace::TraceMode::kMethodTracing, - intervalUs); -} - -static void VMDebug_startMethodTracingFd(JNIEnv* env, jclass, jstring javaTraceFilename, - jint javaFd, jint bufferSize, jint flags, - jboolean samplingEnabled, jint intervalUs, + Trace::StartDDMS(bufferSize, + flags, + samplingEnabled ? Trace::TraceMode::kSampling : Trace::TraceMode::kMethodTracing, + intervalUs); +} + +static void VMDebug_startMethodTracingFd(JNIEnv* env, + jclass, + jstring javaTraceFilename ATTRIBUTE_UNUSED, + jint javaFd, + jint bufferSize, + jint flags, + jboolean samplingEnabled, + jint intervalUs, jboolean streamingOutput) { int originalFd = javaFd; if (originalFd < 0) { + ScopedObjectAccess soa(env); + soa.Self()->ThrowNewExceptionF("Ljava/lang/RuntimeException;", + "Trace fd is invalid: %d", + originalFd); return; } @@ -107,18 +117,20 @@ static void VMDebug_startMethodTracingFd(JNIEnv* env, jclass, jstring javaTraceF if (fd < 0) { ScopedObjectAccess soa(env); soa.Self()->ThrowNewExceptionF("Ljava/lang/RuntimeException;", - "dup(%d) failed: %s", originalFd, strerror(errno)); + "dup(%d) failed: %s", + originalFd, + strerror(errno)); return; } - ScopedUtfChars traceFilename(env, javaTraceFilename); - if (traceFilename.c_str() == nullptr) { - return; - } + // Ignore the traceFilename. Trace::TraceOutputMode outputMode = streamingOutput ? Trace::TraceOutputMode::kStreaming : Trace::TraceOutputMode::kFile; - Trace::Start(traceFilename.c_str(), fd, bufferSize, flags, outputMode, + Trace::Start(fd, + bufferSize, + flags, + outputMode, samplingEnabled ? Trace::TraceMode::kSampling : Trace::TraceMode::kMethodTracing, intervalUs); } @@ -130,7 +142,10 @@ static void VMDebug_startMethodTracingFilename(JNIEnv* env, jclass, jstring java if (traceFilename.c_str() == nullptr) { return; } - Trace::Start(traceFilename.c_str(), -1, bufferSize, flags, Trace::TraceOutputMode::kFile, + Trace::Start(traceFilename.c_str(), + bufferSize, + flags, + Trace::TraceOutputMode::kFile, samplingEnabled ? Trace::TraceMode::kSampling : Trace::TraceMode::kMethodTracing, intervalUs); } diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc index 89135698e3..d9a5096331 100644 --- a/runtime/native/dalvik_system_ZygoteHooks.cc +++ b/runtime/native/dalvik_system_ZygoteHooks.cc @@ -162,19 +162,24 @@ static void CollectNonDebuggableClasses() REQUIRES(!Locks::mutator_lock_) { // Must match values in com.android.internal.os.Zygote. enum { - DEBUG_ENABLE_JDWP = 1, - DEBUG_ENABLE_CHECKJNI = 1 << 1, - DEBUG_ENABLE_ASSERT = 1 << 2, - DEBUG_ENABLE_SAFEMODE = 1 << 3, - DEBUG_ENABLE_JNI_LOGGING = 1 << 4, - DEBUG_GENERATE_DEBUG_INFO = 1 << 5, - DEBUG_ALWAYS_JIT = 1 << 6, - DEBUG_NATIVE_DEBUGGABLE = 1 << 7, - DEBUG_JAVA_DEBUGGABLE = 1 << 8, - DISABLE_VERIFIER = 1 << 9, - ONLY_USE_SYSTEM_OAT_FILES = 1 << 10, - ENABLE_HIDDEN_API_CHECKS = 1 << 11, - DEBUG_GENERATE_MINI_DEBUG_INFO = 1 << 12, + DEBUG_ENABLE_JDWP = 1, + DEBUG_ENABLE_CHECKJNI = 1 << 1, + DEBUG_ENABLE_ASSERT = 1 << 2, + DEBUG_ENABLE_SAFEMODE = 1 << 3, + DEBUG_ENABLE_JNI_LOGGING = 1 << 4, + DEBUG_GENERATE_DEBUG_INFO = 1 << 5, + DEBUG_ALWAYS_JIT = 1 << 6, + DEBUG_NATIVE_DEBUGGABLE = 1 << 7, + DEBUG_JAVA_DEBUGGABLE = 1 << 8, + DISABLE_VERIFIER = 1 << 9, + ONLY_USE_SYSTEM_OAT_FILES = 1 << 10, + DEBUG_GENERATE_MINI_DEBUG_INFO = 1 << 11, + HIDDEN_API_ENFORCEMENT_POLICY_MASK = (1 << 12) + | (1 << 13), + + // bits to shift (flags & HIDDEN_API_ENFORCEMENT_POLICY_MASK) by to get a value + // corresponding to hiddenapi::EnforcementPolicy + API_ENFORCEMENT_POLICY_SHIFT = CTZ(HIDDEN_API_ENFORCEMENT_POLICY_MASK), }; static uint32_t EnableDebugFeatures(uint32_t runtime_flags) { @@ -285,7 +290,8 @@ static void ZygoteHooks_nativePostForkChild(JNIEnv* env, // Our system thread ID, etc, has changed so reset Thread state. thread->InitAfterFork(); runtime_flags = EnableDebugFeatures(runtime_flags); - bool do_hidden_api_checks = false; + hiddenapi::EnforcementPolicy api_enforcement_policy = hiddenapi::EnforcementPolicy::kNoChecks; + bool dedupe_hidden_api_warnings = true; if ((runtime_flags & DISABLE_VERIFIER) != 0) { Runtime::Current()->DisableVerifier(); @@ -297,10 +303,9 @@ static void ZygoteHooks_nativePostForkChild(JNIEnv* env, runtime_flags &= ~ONLY_USE_SYSTEM_OAT_FILES; } - if ((runtime_flags & ENABLE_HIDDEN_API_CHECKS) != 0) { - do_hidden_api_checks = true; - runtime_flags &= ~ENABLE_HIDDEN_API_CHECKS; - } + api_enforcement_policy = hiddenapi::EnforcementPolicyFromInt( + (runtime_flags & HIDDEN_API_ENFORCEMENT_POLICY_MASK) >> API_ENFORCEMENT_POLICY_SHIFT); + runtime_flags &= ~HIDDEN_API_ENFORCEMENT_POLICY_MASK; if (runtime_flags != 0) { LOG(ERROR) << StringPrintf("Unknown bits set in runtime_flags: %#x", runtime_flags); @@ -338,7 +343,6 @@ static void ZygoteHooks_nativePostForkChild(JNIEnv* env, std::string trace_file = StringPrintf("/data/misc/trace/%s.trace.bin", proc_name.c_str()); Trace::Start(trace_file.c_str(), - -1, buffer_size, 0, // TODO: Expose flags. output_mode, @@ -351,11 +355,13 @@ static void ZygoteHooks_nativePostForkChild(JNIEnv* env, } } + bool do_hidden_api_checks = api_enforcement_policy != hiddenapi::EnforcementPolicy::kNoChecks; DCHECK(!(is_system_server && do_hidden_api_checks)) - << "SystemServer should be forked with ENABLE_HIDDEN_API_CHECKS"; + << "SystemServer should be forked with EnforcementPolicy::kDisable"; DCHECK(!(is_zygote && do_hidden_api_checks)) - << "Child zygote processes should be forked with ENABLE_HIDDEN_API_CHECKS"; - Runtime::Current()->SetHiddenApiChecksEnabled(do_hidden_api_checks); + << "Child zygote processes should be forked with EnforcementPolicy::kDisable"; + Runtime::Current()->SetHiddenApiEnforcementPolicy(api_enforcement_policy); + Runtime::Current()->SetDedupeHiddenApiWarnings(dedupe_hidden_api_warnings); // Clear the hidden API warning flag, in case it was set. Runtime::Current()->SetPendingHiddenApiWarning(false); diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc index 25d50376de..fc61c9597e 100644 --- a/runtime/native/java_lang_Class.cc +++ b/runtime/native/java_lang_Class.cc @@ -89,8 +89,8 @@ static bool IsCallerInBootClassPath(Thread* self) REQUIRES_SHARED(Locks::mutator // access hidden APIs. This can be *very* expensive. Never call this in a loop. ALWAYS_INLINE static bool ShouldEnforceHiddenApi(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) { - return Runtime::Current()->AreHiddenApiChecksEnabled() && - !IsCallerInBootClassPath(self); + hiddenapi::EnforcementPolicy policy = Runtime::Current()->GetHiddenApiEnforcementPolicy(); + return policy != hiddenapi::EnforcementPolicy::kNoChecks && !IsCallerInBootClassPath(self); } // Returns true if the first non-ClassClass caller up the stack should not be diff --git a/runtime/read_barrier-inl.h b/runtime/read_barrier-inl.h index 58f6c04c3e..5035ba077c 100644 --- a/runtime/read_barrier-inl.h +++ b/runtime/read_barrier-inl.h @@ -130,7 +130,7 @@ inline MirrorType* ReadBarrier::BarrierForRoot(MirrorType** root, ref = reinterpret_cast<MirrorType*>(Mark(old_ref)); // Update the field atomically. This may fail if mutator updates before us, but it's ok. if (ref != old_ref) { - Atomic<mirror::Object*>* atomic_root = reinterpret_cast<Atomic<mirror::Object*>*>(root); + Atomic<MirrorType*>* atomic_root = reinterpret_cast<Atomic<MirrorType*>*>(root); atomic_root->CompareAndSetStrongRelaxed(old_ref, ref); } } diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 7d9d3426fc..9a626bab00 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -267,7 +267,7 @@ Runtime::Runtime() oat_file_manager_(nullptr), is_low_memory_mode_(false), safe_mode_(false), - do_hidden_api_checks_(false), + hidden_api_policy_(hiddenapi::EnforcementPolicy::kNoChecks), pending_hidden_api_warning_(false), dedupe_hidden_api_warnings_(true), always_set_hidden_api_warning_flag_(false), @@ -839,7 +839,6 @@ bool Runtime::Start() { if (trace_config_.get() != nullptr && trace_config_->trace_file != "") { ScopedThreadStateChange tsc(self, kWaitingForMethodTracingStart); Trace::Start(trace_config_->trace_file.c_str(), - -1, static_cast<int>(trace_config_->trace_file_size), 0, trace_config_->trace_output_mode, @@ -1196,9 +1195,14 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { // by default and we only enable them if: // (a) runtime was started with a flag that enables the checks, or // (b) Zygote forked a new process that is not exempt (see ZygoteHooks). - do_hidden_api_checks_ = runtime_options.Exists(Opt::HiddenApiChecks); - DCHECK(!is_zygote_ || !do_hidden_api_checks_) - << "Zygote should not be started with hidden API checks"; + bool do_hidden_api_checks = runtime_options.Exists(Opt::HiddenApiChecks); + DCHECK(!is_zygote_ || !do_hidden_api_checks); + // TODO pass the actual enforcement policy in, rather than just a single bit. + // As is, we're encoding some logic here about which specific policy to use, which would be better + // controlled by the framework. + hidden_api_policy_ = do_hidden_api_checks + ? hiddenapi::EnforcementPolicy::kBlacklistOnly + : hiddenapi::EnforcementPolicy::kNoChecks; no_sig_chain_ = runtime_options.Exists(Opt::NoSigChain); force_native_bridge_ = runtime_options.Exists(Opt::ForceNativeBridge); diff --git a/runtime/runtime.h b/runtime/runtime.h index c7f650ea3f..dba31b2939 100644 --- a/runtime/runtime.h +++ b/runtime/runtime.h @@ -49,6 +49,10 @@ class AbstractSystemWeakHolder; class Heap; } // namespace gc +namespace hiddenapi { +enum class EnforcementPolicy; +} // namespace hiddenapi + namespace jit { class Jit; class JitOptions; @@ -520,12 +524,12 @@ class Runtime { bool IsVerificationEnabled() const; bool IsVerificationSoftFail() const; - void SetHiddenApiChecksEnabled(bool value) { - do_hidden_api_checks_ = value; + void SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy policy) { + hidden_api_policy_ = policy; } - bool AreHiddenApiChecksEnabled() const { - return do_hidden_api_checks_; + hiddenapi::EnforcementPolicy GetHiddenApiEnforcementPolicy() const { + return hidden_api_policy_; } void SetPendingHiddenApiWarning(bool value) { @@ -990,7 +994,7 @@ class Runtime { bool safe_mode_; // Whether access checks on hidden API should be performed. - bool do_hidden_api_checks_; + hiddenapi::EnforcementPolicy hidden_api_policy_; // Whether the application has used an API which is not restricted but we // should issue a warning about it. diff --git a/runtime/thread-inl.h b/runtime/thread-inl.h index 2f6f50e31e..e34f32e0bf 100644 --- a/runtime/thread-inl.h +++ b/runtime/thread-inl.h @@ -251,6 +251,7 @@ inline ThreadState Thread::TransitionFromSuspendedToRunnable() { union StateAndFlags new_state_and_flags; new_state_and_flags.as_int = old_state_and_flags.as_int; new_state_and_flags.as_struct.state = kRunnable; + // CAS the value with a memory barrier. if (LIKELY(tls32_.state_and_flags.as_atomic_int.CompareAndSetWeakAcquire( old_state_and_flags.as_int, diff --git a/runtime/thread.cc b/runtime/thread.cc index 5b03c2d884..b13d8ec42a 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -1280,7 +1280,7 @@ bool Thread::ModifySuspendCountInternal(Thread* self, AtomicClearFlag(kSuspendRequest); } else { // Two bits might be set simultaneously. - tls32_.state_and_flags.as_atomic_int.FetchAndBitwiseOrSequentiallyConsistent(flags); + tls32_.state_and_flags.as_atomic_int.fetch_or(flags, std::memory_order_seq_cst); TriggerSuspend(); } return true; @@ -1318,7 +1318,7 @@ bool Thread::PassActiveSuspendBarriers(Thread* self) { if (pending_threads != nullptr) { bool done = false; do { - int32_t cur_val = pending_threads->LoadRelaxed(); + int32_t cur_val = pending_threads->load(std::memory_order_relaxed); CHECK_GT(cur_val, 0) << "Unexpected value for PassActiveSuspendBarriers(): " << cur_val; // Reduce value by 1. done = pending_threads->CompareAndSetWeakRelaxed(cur_val, cur_val - 1); @@ -1438,8 +1438,12 @@ class BarrierClosure : public Closure { barrier_.Pass(self); } - void Wait(Thread* self) { - barrier_.Increment(self, 1); + void Wait(Thread* self, ThreadState suspend_state) { + if (suspend_state != ThreadState::kRunnable) { + barrier_.Increment<Barrier::kDisallowHoldingLocks>(self, 1); + } else { + barrier_.Increment<Barrier::kAllowHoldingLocks>(self, 1); + } } private: @@ -1448,7 +1452,7 @@ class BarrierClosure : public Closure { }; // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. -bool Thread::RequestSynchronousCheckpoint(Closure* function) { +bool Thread::RequestSynchronousCheckpoint(Closure* function, ThreadState suspend_state) { Thread* self = Thread::Current(); if (this == Thread::Current()) { Locks::thread_list_lock_->AssertExclusiveHeld(self); @@ -1496,8 +1500,8 @@ bool Thread::RequestSynchronousCheckpoint(Closure* function) { // Relinquish the thread-list lock. We should not wait holding any locks. We cannot // reacquire it since we don't know if 'this' hasn't been deleted yet. Locks::thread_list_lock_->ExclusiveUnlock(self); - ScopedThreadSuspension sts(self, ThreadState::kWaiting); - barrier_closure.Wait(self); + ScopedThreadStateChange sts(self, suspend_state); + barrier_closure.Wait(self, suspend_state); return true; } // Fall-through. @@ -1521,7 +1525,7 @@ bool Thread::RequestSynchronousCheckpoint(Closure* function) { // that we can call ModifySuspendCount without racing against ThreadList::Unregister. ScopedThreadListLockUnlock stllu(self); { - ScopedThreadSuspension sts(self, ThreadState::kWaiting); + ScopedThreadStateChange sts(self, suspend_state); while (GetState() == ThreadState::kRunnable) { // We became runnable again. Wait till the suspend triggered in ModifySuspendCount // moves us to suspended. @@ -1558,7 +1562,7 @@ Closure* Thread::GetFlipFunction() { Atomic<Closure*>* atomic_func = reinterpret_cast<Atomic<Closure*>*>(&tlsPtr_.flip_function); Closure* func; do { - func = atomic_func->LoadRelaxed(); + func = atomic_func->load(std::memory_order_relaxed); if (func == nullptr) { return nullptr; } @@ -1570,7 +1574,7 @@ Closure* Thread::GetFlipFunction() { void Thread::SetFlipFunction(Closure* function) { CHECK(function != nullptr); Atomic<Closure*>* atomic_func = reinterpret_cast<Atomic<Closure*>*>(&tlsPtr_.flip_function); - atomic_func->StoreSequentiallyConsistent(function); + atomic_func->store(function, std::memory_order_seq_cst); } void Thread::FullSuspendCheck() { @@ -2102,7 +2106,7 @@ Thread::Thread(bool daemon) "art::Thread has a size which is not a multiple of 4."); tls32_.state_and_flags.as_struct.flags = 0; tls32_.state_and_flags.as_struct.state = kNative; - tls32_.interrupted.StoreRelaxed(false); + tls32_.interrupted.store(false, std::memory_order_relaxed); memset(&tlsPtr_.held_mutexes[0], 0, sizeof(tlsPtr_.held_mutexes)); std::fill(tlsPtr_.rosalloc_runs, tlsPtr_.rosalloc_runs + kNumRosAllocThreadLocalSizeBracketsInThread, @@ -2397,24 +2401,24 @@ bool Thread::IsJWeakCleared(jweak obj) const { bool Thread::Interrupted() { DCHECK_EQ(Thread::Current(), this); // No other thread can concurrently reset the interrupted flag. - bool interrupted = tls32_.interrupted.LoadSequentiallyConsistent(); + bool interrupted = tls32_.interrupted.load(std::memory_order_seq_cst); if (interrupted) { - tls32_.interrupted.StoreSequentiallyConsistent(false); + tls32_.interrupted.store(false, std::memory_order_seq_cst); } return interrupted; } // Implements java.lang.Thread.isInterrupted. bool Thread::IsInterrupted() { - return tls32_.interrupted.LoadSequentiallyConsistent(); + return tls32_.interrupted.load(std::memory_order_seq_cst); } void Thread::Interrupt(Thread* self) { MutexLock mu(self, *wait_mutex_); - if (tls32_.interrupted.LoadSequentiallyConsistent()) { + if (tls32_.interrupted.load(std::memory_order_seq_cst)) { return; } - tls32_.interrupted.StoreSequentiallyConsistent(true); + tls32_.interrupted.store(true, std::memory_order_seq_cst); NotifyLocked(self); } diff --git a/runtime/thread.h b/runtime/thread.h index 6549fc1a1f..22b77eea64 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -263,16 +263,31 @@ class Thread { WARN_UNUSED REQUIRES(Locks::thread_suspend_count_lock_); + // Requests a checkpoint closure to run on another thread. The closure will be run when the thread + // gets suspended. This will return true if the closure was added and will (eventually) be + // executed. It returns false otherwise. + // + // Since multiple closures can be queued and some closures can delay other threads from running no + // closure should attempt to suspend another thread while running. + // TODO We should add some debug option that verifies this. bool RequestCheckpoint(Closure* function) REQUIRES(Locks::thread_suspend_count_lock_); // RequestSynchronousCheckpoint releases the thread_list_lock_ as a part of its execution. This is // due to the fact that Thread::Current() needs to go to sleep to allow the targeted thread to - // execute the checkpoint for us if it is Runnable. - bool RequestSynchronousCheckpoint(Closure* function) + // execute the checkpoint for us if it is Runnable. The suspend_state is the state that the thread + // will go into while it is awaiting the checkpoint to be run. + // NB Passing ThreadState::kRunnable may cause the current thread to wait in a condition variable + // while holding the mutator_lock_. Callers should ensure that this will not cause any problems + // for the closure or the rest of the system. + // NB Since multiple closures can be queued and some closures can delay other threads from running + // no closure should attempt to suspend another thread while running. + bool RequestSynchronousCheckpoint(Closure* function, + ThreadState suspend_state = ThreadState::kWaiting) REQUIRES_SHARED(Locks::mutator_lock_) RELEASE(Locks::thread_list_lock_) REQUIRES(!Locks::thread_suspend_count_lock_); + bool RequestEmptyCheckpoint() REQUIRES(Locks::thread_suspend_count_lock_); @@ -541,7 +556,7 @@ class Thread { bool IsInterrupted(); void Interrupt(Thread* self) REQUIRES(!*wait_mutex_); void SetInterrupted(bool i) { - tls32_.interrupted.StoreSequentiallyConsistent(i); + tls32_.interrupted.store(i, std::memory_order_seq_cst); } void Notify() REQUIRES(!*wait_mutex_); @@ -1095,11 +1110,11 @@ class Thread { } void AtomicSetFlag(ThreadFlag flag) { - tls32_.state_and_flags.as_atomic_int.FetchAndBitwiseOrSequentiallyConsistent(flag); + tls32_.state_and_flags.as_atomic_int.fetch_or(flag, std::memory_order_seq_cst); } void AtomicClearFlag(ThreadFlag flag) { - tls32_.state_and_flags.as_atomic_int.FetchAndBitwiseAndSequentiallyConsistent(-1 ^ flag); + tls32_.state_and_flags.as_atomic_int.fetch_and(-1 ^ flag, std::memory_order_seq_cst); } void ResetQuickAllocEntryPointsForThread(bool is_marking); diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc index 8095ef57c7..44af867d60 100644 --- a/runtime/thread_list.cc +++ b/runtime/thread_list.cc @@ -732,7 +732,7 @@ void ThreadList::SuspendAllInternal(Thread* self, if (reason == SuspendReason::kForDebugger) { ++debug_suspend_all_count_; } - pending_threads.StoreRelaxed(list_.size() - num_ignored); + pending_threads.store(list_.size() - num_ignored, std::memory_order_relaxed); // Increment everybody's suspend count (except those that should be ignored). for (const auto& thread : list_) { if (thread == ignore1 || thread == ignore2) { @@ -748,7 +748,7 @@ void ThreadList::SuspendAllInternal(Thread* self, if (thread->IsSuspended()) { // Only clear the counter for the current thread. thread->ClearSuspendBarrier(&pending_threads); - pending_threads.FetchAndSubSequentiallyConsistent(1); + pending_threads.fetch_sub(1, std::memory_order_seq_cst); } } } @@ -761,7 +761,7 @@ void ThreadList::SuspendAllInternal(Thread* self, #endif const uint64_t start_time = NanoTime(); while (true) { - int32_t cur_val = pending_threads.LoadRelaxed(); + int32_t cur_val = pending_threads.load(std::memory_order_relaxed); if (LIKELY(cur_val > 0)) { #if ART_USE_FUTEXES if (futex(pending_threads.Address(), FUTEX_WAIT, cur_val, &wait_timeout, nullptr, 0) != 0) { diff --git a/runtime/thread_pool_test.cc b/runtime/thread_pool_test.cc index 895a108af0..d7842002ee 100644 --- a/runtime/thread_pool_test.cc +++ b/runtime/thread_pool_test.cc @@ -71,7 +71,7 @@ TEST_F(ThreadPoolTest, CheckRun) { // Wait for tasks to complete. thread_pool.Wait(self, true, false); // Make sure that we finished all the work. - EXPECT_EQ(num_tasks, count.LoadSequentiallyConsistent()); + EXPECT_EQ(num_tasks, count.load(std::memory_order_seq_cst)); } TEST_F(ThreadPoolTest, StopStart) { @@ -84,7 +84,7 @@ TEST_F(ThreadPoolTest, StopStart) { } usleep(200); // Check that no threads started prematurely. - EXPECT_EQ(0, count.LoadSequentiallyConsistent()); + EXPECT_EQ(0, count.load(std::memory_order_seq_cst)); // Signal the threads to start processing tasks. thread_pool.StartWorkers(self); usleep(200); @@ -93,7 +93,7 @@ TEST_F(ThreadPoolTest, StopStart) { thread_pool.AddTask(self, new CountTask(&bad_count)); usleep(200); // Ensure that the task added after the workers were stopped doesn't get run. - EXPECT_EQ(0, bad_count.LoadSequentiallyConsistent()); + EXPECT_EQ(0, bad_count.load(std::memory_order_seq_cst)); // Allow tasks to finish up and delete themselves. thread_pool.StartWorkers(self); thread_pool.Wait(self, false, false); @@ -157,7 +157,7 @@ TEST_F(ThreadPoolTest, RecursiveTest) { thread_pool.AddTask(self, new TreeTask(&thread_pool, &count, depth)); thread_pool.StartWorkers(self); thread_pool.Wait(self, true, false); - EXPECT_EQ((1 << depth) - 1, count.LoadSequentiallyConsistent()); + EXPECT_EQ((1 << depth) - 1, count.load(std::memory_order_seq_cst)); } class PeerTask : public Task { diff --git a/runtime/trace.cc b/runtime/trace.cc index 0f321b6591..bea510ab61 100644 --- a/runtime/trace.cc +++ b/runtime/trace.cc @@ -319,8 +319,74 @@ void* Trace::RunSamplingThread(void* arg) { return nullptr; } -void Trace::Start(const char* trace_filename, int trace_fd, size_t buffer_size, int flags, - TraceOutputMode output_mode, TraceMode trace_mode, int interval_us) { +void Trace::Start(const char* trace_filename, + size_t buffer_size, + int flags, + TraceOutputMode output_mode, + TraceMode trace_mode, + int interval_us) { + std::unique_ptr<File> file(OS::CreateEmptyFileWriteOnly(trace_filename)); + if (file == nullptr) { + std::string msg = android::base::StringPrintf("Unable to open trace file '%s'", trace_filename); + PLOG(ERROR) << msg; + ScopedObjectAccess soa(Thread::Current()); + Thread::Current()->ThrowNewException("Ljava/lang/RuntimeException;", msg.c_str()); + return; + } + Start(std::move(file), buffer_size, flags, output_mode, trace_mode, interval_us); +} + +void Trace::Start(int trace_fd, + size_t buffer_size, + int flags, + TraceOutputMode output_mode, + TraceMode trace_mode, + int interval_us) { + if (trace_fd < 0) { + std::string msg = android::base::StringPrintf("Unable to start tracing with invalid fd %d", + trace_fd); + LOG(ERROR) << msg; + ScopedObjectAccess soa(Thread::Current()); + Thread::Current()->ThrowNewException("Ljava/lang/RuntimeException;", msg.c_str()); + return; + } + std::unique_ptr<File> file(new File(trace_fd, "tracefile")); + Start(std::move(file), buffer_size, flags, output_mode, trace_mode, interval_us); +} + +void Trace::StartDDMS(size_t buffer_size, + int flags, + TraceMode trace_mode, + int interval_us) { + Start(std::unique_ptr<File>(), + buffer_size, + flags, + TraceOutputMode::kDDMS, + trace_mode, + interval_us); +} + +void Trace::Start(std::unique_ptr<File>&& trace_file_in, + size_t buffer_size, + int flags, + TraceOutputMode output_mode, + TraceMode trace_mode, + int interval_us) { + // We own trace_file now and are responsible for closing it. To account for error situations, use + // a specialized unique_ptr to ensure we close it on the way out (if it hasn't been passed to a + // Trace instance). + auto deleter = [](File* file) { + if (file != nullptr) { + file->MarkUnchecked(); // Don't deal with flushing requirements. + int result ATTRIBUTE_UNUSED = file->Close(); + delete file; + } + }; + std::unique_ptr<File, decltype(deleter)> trace_file(trace_file_in.release(), deleter); + if (trace_file != nullptr) { + trace_file->DisableAutoClose(); + } + Thread* self = Thread::Current(); { MutexLock mu(self, *Locks::trace_lock_); @@ -338,23 +404,6 @@ void Trace::Start(const char* trace_filename, int trace_fd, size_t buffer_size, return; } - // Open trace file if not going directly to ddms. - std::unique_ptr<File> trace_file; - if (output_mode != TraceOutputMode::kDDMS) { - if (trace_fd < 0) { - trace_file.reset(OS::CreateEmptyFileWriteOnly(trace_filename)); - } else { - trace_file.reset(new File(trace_fd, "tracefile")); - trace_file->DisableAutoClose(); - } - if (trace_file.get() == nullptr) { - PLOG(ERROR) << "Unable to open trace file '" << trace_filename << "'"; - ScopedObjectAccess soa(self); - ThrowRuntimeException("Unable to open trace file '%s'", trace_filename); - return; - } - } - Runtime* runtime = Runtime::Current(); // Enable count of allocs if specified in the flags. @@ -372,8 +421,7 @@ void Trace::Start(const char* trace_filename, int trace_fd, size_t buffer_size, LOG(ERROR) << "Trace already in progress, ignoring this request"; } else { enable_stats = (flags && kTraceCountAllocs) != 0; - the_trace_ = new Trace(trace_file.release(), trace_filename, buffer_size, flags, output_mode, - trace_mode); + the_trace_ = new Trace(trace_file.release(), buffer_size, flags, output_mode, trace_mode); if (trace_mode == TraceMode::kSampling) { CHECK_PTHREAD_CALL(pthread_create, (&sampling_pthread_, nullptr, &RunSamplingThread, reinterpret_cast<void*>(interval_us)), @@ -595,8 +643,11 @@ TracingMode Trace::GetMethodTracingMode() { static constexpr size_t kMinBufSize = 18U; // Trace header is up to 18B. -Trace::Trace(File* trace_file, const char* trace_name, size_t buffer_size, int flags, - TraceOutputMode output_mode, TraceMode trace_mode) +Trace::Trace(File* trace_file, + size_t buffer_size, + int flags, + TraceOutputMode output_mode, + TraceMode trace_mode) : trace_file_(trace_file), buf_(new uint8_t[std::max(kMinBufSize, buffer_size)]()), flags_(flags), trace_output_mode_(output_mode), trace_mode_(trace_mode), @@ -605,6 +656,8 @@ Trace::Trace(File* trace_file, const char* trace_name, size_t buffer_size, int f start_time_(MicroTime()), clock_overhead_ns_(GetClockOverheadNanoSeconds()), cur_offset_(0), overflow_(false), interval_us_(0), streaming_lock_(nullptr), unique_methods_lock_(new Mutex("unique methods lock", kTracingUniqueMethodsLock)) { + CHECK(trace_file != nullptr || output_mode == TraceOutputMode::kDDMS); + uint16_t trace_version = GetTraceVersion(clock_source_); if (output_mode == TraceOutputMode::kStreaming) { trace_version |= 0xF0U; @@ -622,10 +675,9 @@ Trace::Trace(File* trace_file, const char* trace_name, size_t buffer_size, int f static_assert(18 <= kMinBufSize, "Minimum buffer size not large enough for trace header"); // Update current offset. - cur_offset_.StoreRelaxed(kTraceHeaderLength); + cur_offset_.store(kTraceHeaderLength, std::memory_order_relaxed); if (output_mode == TraceOutputMode::kStreaming) { - streaming_file_name_ = trace_name; streaming_lock_ = new Mutex("tracing lock", LockLevel::kTracingStreamingLock); seen_threads_.reset(new ThreadIDBitSet()); } @@ -665,7 +717,7 @@ void Trace::FinishTracing() { // Clean up. STLDeleteValues(&seen_methods_); } else { - final_offset = cur_offset_.LoadRelaxed(); + final_offset = cur_offset_.load(std::memory_order_relaxed); GetVisitedMethods(final_offset, &visited_methods); } @@ -892,7 +944,7 @@ std::string Trace::GetMethodLine(ArtMethod* method) { } void Trace::WriteToBuf(const uint8_t* src, size_t src_size) { - int32_t old_offset = cur_offset_.LoadRelaxed(); + int32_t old_offset = cur_offset_.load(std::memory_order_relaxed); int32_t new_offset = old_offset + static_cast<int32_t>(src_size); if (dchecked_integral_cast<size_t>(new_offset) > buffer_size_) { // Flush buffer. @@ -905,24 +957,24 @@ void Trace::WriteToBuf(const uint8_t* src, size_t src_size) { if (!trace_file_->WriteFully(src, src_size)) { PLOG(WARNING) << "Failed streaming a tracing event."; } - cur_offset_.StoreRelease(0); // Buffer is empty now. + cur_offset_.store(0, std::memory_order_release); // Buffer is empty now. return; } old_offset = 0; new_offset = static_cast<int32_t>(src_size); } - cur_offset_.StoreRelease(new_offset); + cur_offset_.store(new_offset, std::memory_order_release); // Fill in data. memcpy(buf_.get() + old_offset, src, src_size); } void Trace::FlushBuf() { - int32_t offset = cur_offset_.LoadRelaxed(); + int32_t offset = cur_offset_.load(std::memory_order_relaxed); if (!trace_file_->WriteFully(buf_.get(), offset)) { PLOG(WARNING) << "Failed flush the remaining data in streaming."; } - cur_offset_.StoreRelease(0); + cur_offset_.store(0, std::memory_order_release); } void Trace::LogMethodTraceEvent(Thread* thread, ArtMethod* method, @@ -938,7 +990,7 @@ void Trace::LogMethodTraceEvent(Thread* thread, ArtMethod* method, // We do a busy loop here trying to acquire the next offset. if (trace_output_mode_ != TraceOutputMode::kStreaming) { do { - old_offset = cur_offset_.LoadRelaxed(); + old_offset = cur_offset_.load(std::memory_order_relaxed); new_offset = old_offset + GetRecordSize(clock_source_); if (static_cast<size_t>(new_offset) > buffer_size_) { overflow_ = true; diff --git a/runtime/trace.h b/runtime/trace.h index 86b8d00d51..7171f759c9 100644 --- a/runtime/trace.h +++ b/runtime/trace.h @@ -33,6 +33,10 @@ #include "globals.h" #include "instrumentation.h" +namespace unix_file { +class FdFile; +} // namespace unix_file + namespace art { class ArtField; @@ -115,10 +119,37 @@ class Trace FINAL : public instrumentation::InstrumentationListener { static void SetDefaultClockSource(TraceClockSource clock_source); - static void Start(const char* trace_filename, int trace_fd, size_t buffer_size, int flags, - TraceOutputMode output_mode, TraceMode trace_mode, int interval_us) + static void Start(const char* trace_filename, + size_t buffer_size, + int flags, + TraceOutputMode output_mode, + TraceMode trace_mode, + int interval_us) + REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, + !Locks::trace_lock_); + static void Start(int trace_fd, + size_t buffer_size, + int flags, + TraceOutputMode output_mode, + TraceMode trace_mode, + int interval_us) + REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, + !Locks::trace_lock_); + static void Start(std::unique_ptr<unix_file::FdFile>&& file, + size_t buffer_size, + int flags, + TraceOutputMode output_mode, + TraceMode trace_mode, + int interval_us) + REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, + !Locks::trace_lock_); + static void StartDDMS(size_t buffer_size, + int flags, + TraceMode trace_mode, + int interval_us) REQUIRES(!Locks::mutator_lock_, !Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_, !Locks::trace_lock_); + static void Pause() REQUIRES(!Locks::trace_lock_, !Locks::thread_list_lock_); static void Resume() REQUIRES(!Locks::trace_lock_); @@ -212,8 +243,11 @@ class Trace FINAL : public instrumentation::InstrumentationListener { static bool IsTracingEnabled() REQUIRES(!Locks::trace_lock_); private: - Trace(File* trace_file, const char* trace_name, size_t buffer_size, int flags, - TraceOutputMode output_mode, TraceMode trace_mode); + Trace(File* trace_file, + size_t buffer_size, + int flags, + TraceOutputMode output_mode, + TraceMode trace_mode); // The sampling interval in microseconds is passed as an argument. static void* RunSamplingThread(void* arg) REQUIRES(!Locks::trace_lock_); @@ -318,7 +352,6 @@ class Trace FINAL : public instrumentation::InstrumentationListener { int interval_us_; // Streaming mode data. - std::string streaming_file_name_; Mutex* streaming_lock_; std::map<const DexFile*, DexIndexBitSet*> seen_methods_; std::unique_ptr<ThreadIDBitSet> seen_threads_; diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc index b07001e595..cee717610d 100644 --- a/runtime/verifier/method_verifier.cc +++ b/runtime/verifier/method_verifier.cc @@ -2765,47 +2765,61 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { break; case Instruction::IGET_BOOLEAN: + case Instruction::IGET_BOOLEAN_QUICK: VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Boolean(), true, false); break; case Instruction::IGET_BYTE: + case Instruction::IGET_BYTE_QUICK: VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Byte(), true, false); break; case Instruction::IGET_CHAR: + case Instruction::IGET_CHAR_QUICK: VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Char(), true, false); break; case Instruction::IGET_SHORT: + case Instruction::IGET_SHORT_QUICK: VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Short(), true, false); break; case Instruction::IGET: + case Instruction::IGET_QUICK: VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Integer(), true, false); break; case Instruction::IGET_WIDE: + case Instruction::IGET_WIDE_QUICK: VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.LongLo(), true, false); break; case Instruction::IGET_OBJECT: + case Instruction::IGET_OBJECT_QUICK: VerifyISFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.JavaLangObject(false), false, false); break; case Instruction::IPUT_BOOLEAN: + case Instruction::IPUT_BOOLEAN_QUICK: VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Boolean(), true, false); break; case Instruction::IPUT_BYTE: + case Instruction::IPUT_BYTE_QUICK: VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Byte(), true, false); break; case Instruction::IPUT_CHAR: + case Instruction::IPUT_CHAR_QUICK: VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Char(), true, false); break; case Instruction::IPUT_SHORT: + case Instruction::IPUT_SHORT_QUICK: VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Short(), true, false); break; case Instruction::IPUT: + case Instruction::IPUT_QUICK: VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Integer(), true, false); break; case Instruction::IPUT_WIDE: + case Instruction::IPUT_WIDE_QUICK: VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.LongLo(), true, false); break; case Instruction::IPUT_OBJECT: + case Instruction::IPUT_OBJECT_QUICK: VerifyISFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.JavaLangObject(false), false, false); break; @@ -2859,9 +2873,12 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { case Instruction::INVOKE_VIRTUAL: case Instruction::INVOKE_VIRTUAL_RANGE: case Instruction::INVOKE_SUPER: - case Instruction::INVOKE_SUPER_RANGE: { + case Instruction::INVOKE_SUPER_RANGE: + case Instruction::INVOKE_VIRTUAL_QUICK: + case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: { bool is_range = (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE || - inst->Opcode() == Instruction::INVOKE_SUPER_RANGE); + inst->Opcode() == Instruction::INVOKE_SUPER_RANGE || + inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK); bool is_super = (inst->Opcode() == Instruction::INVOKE_SUPER || inst->Opcode() == Instruction::INVOKE_SUPER_RANGE); MethodType type = is_super ? METHOD_SUPER : METHOD_VIRTUAL; @@ -2881,7 +2898,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { } } if (return_type == nullptr) { - uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c(); + uint32_t method_idx = GetMethodIdxOfInvoke(inst); const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx); dex::TypeIndex return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_; @@ -3368,67 +3385,6 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) { } } break; - // Note: the following instructions encode offsets derived from class linking. - // As such they use Class*/Field*/Executable* as these offsets only have - // meaning if the class linking and resolution were successful. - case Instruction::IGET_QUICK: - VerifyQuickFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Integer(), true); - break; - case Instruction::IGET_WIDE_QUICK: - VerifyQuickFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.LongLo(), true); - break; - case Instruction::IGET_OBJECT_QUICK: - VerifyQuickFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.JavaLangObject(false), false); - break; - case Instruction::IGET_BOOLEAN_QUICK: - VerifyQuickFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Boolean(), true); - break; - case Instruction::IGET_BYTE_QUICK: - VerifyQuickFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Byte(), true); - break; - case Instruction::IGET_CHAR_QUICK: - VerifyQuickFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Char(), true); - break; - case Instruction::IGET_SHORT_QUICK: - VerifyQuickFieldAccess<FieldAccessType::kAccGet>(inst, reg_types_.Short(), true); - break; - case Instruction::IPUT_QUICK: - VerifyQuickFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Integer(), true); - break; - case Instruction::IPUT_BOOLEAN_QUICK: - VerifyQuickFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Boolean(), true); - break; - case Instruction::IPUT_BYTE_QUICK: - VerifyQuickFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Byte(), true); - break; - case Instruction::IPUT_CHAR_QUICK: - VerifyQuickFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Char(), true); - break; - case Instruction::IPUT_SHORT_QUICK: - VerifyQuickFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.Short(), true); - break; - case Instruction::IPUT_WIDE_QUICK: - VerifyQuickFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.LongLo(), true); - break; - case Instruction::IPUT_OBJECT_QUICK: - VerifyQuickFieldAccess<FieldAccessType::kAccPut>(inst, reg_types_.JavaLangObject(false), false); - break; - case Instruction::INVOKE_VIRTUAL_QUICK: - case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: { - bool is_range = (inst->Opcode() == Instruction::INVOKE_VIRTUAL_RANGE_QUICK); - ArtMethod* called_method = VerifyInvokeVirtualQuickArgs(inst, is_range); - if (called_method != nullptr) { - const char* descriptor = called_method->GetReturnTypeDescriptor(); - const RegType& return_type = reg_types_.FromDescriptor(GetClassLoader(), descriptor, false); - if (!return_type.IsLowHalf()) { - work_line_->SetResultRegisterType(this, return_type); - } else { - work_line_->SetResultRegisterTypeWide(return_type, return_type.HighHalf(®_types_)); - } - just_set_result = true; - } - break; - } /* These should never appear during verification. */ case Instruction::UNUSED_3E ... Instruction::UNUSED_43: @@ -3995,7 +3951,7 @@ ArtMethod* MethodVerifier::VerifyInvocationArgsFromIterator( } } else { // Check whether the name of the called method is "<init>" - const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c(); + const uint32_t method_idx = GetMethodIdxOfInvoke(inst); if (strcmp(dex_file_->GetMethodName(dex_file_->GetMethodId(method_idx)), "<init>") != 0) { Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "'this' arg must be initialized"; return nullptr; @@ -4017,7 +3973,7 @@ ArtMethod* MethodVerifier::VerifyInvocationArgsFromIterator( res_method_class = &FromClass(klass->GetDescriptor(&temp), klass, klass->CannotBeAssignedFromOtherTypes()); } else { - const uint32_t method_idx = inst->VRegB(); + const uint32_t method_idx = GetMethodIdxOfInvoke(inst); const dex::TypeIndex class_idx = dex_file_->GetMethodId(method_idx).class_idx_; res_method_class = ®_types_.FromDescriptor( GetClassLoader(), @@ -4108,7 +4064,7 @@ void MethodVerifier::VerifyInvocationArgsUnresolvedMethod(const Instruction* ins // As the method may not have been resolved, make this static check against what we expect. // The main reason for this code block is to fail hard when we find an illegal use, e.g., // wrong number of arguments or wrong primitive types, even if the method could not be resolved. - const uint32_t method_idx = inst->VRegB(); + const uint32_t method_idx = GetMethodIdxOfInvoke(inst); DexFileParameterIterator it(*dex_file_, dex_file_->GetProtoId(dex_file_->GetMethodId(method_idx).proto_idx_)); VerifyInvocationArgsFromIterator(&it, inst, method_type, is_range, nullptr); @@ -4181,7 +4137,7 @@ ArtMethod* MethodVerifier::VerifyInvocationArgs( const Instruction* inst, MethodType method_type, bool is_range) { // Resolve the method. This could be an abstract or concrete method depending on what sort of call // we're making. - const uint32_t method_idx = inst->VRegB(); + const uint32_t method_idx = GetMethodIdxOfInvoke(inst); ArtMethod* res_method = ResolveMethodAndCheckAccess(method_idx, method_type); if (res_method == nullptr) { // error or class is unresolved // Check what we can statically. @@ -4334,122 +4290,34 @@ bool MethodVerifier::CheckSignaturePolymorphicReceiver(const Instruction* inst) return true; } -ArtMethod* MethodVerifier::GetQuickInvokedMethod(const Instruction* inst, bool is_range) { - if (is_range) { - DCHECK_EQ(inst->Opcode(), Instruction::INVOKE_VIRTUAL_RANGE_QUICK); - } else { - DCHECK_EQ(inst->Opcode(), Instruction::INVOKE_VIRTUAL_QUICK); - } - - DCHECK(method_being_verified_ != nullptr); - uint16_t method_idx = method_being_verified_->GetIndexFromQuickening(work_insn_idx_); - CHECK_NE(method_idx, DexFile::kDexNoIndex16); - return ResolveMethodAndCheckAccess(method_idx, METHOD_VIRTUAL); -} - -ArtMethod* MethodVerifier::VerifyInvokeVirtualQuickArgs(const Instruction* inst, bool is_range) { - DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_) - << dex_file_->PrettyMethod(dex_method_idx_, true) << "@" << work_insn_idx_; - - ArtMethod* res_method = GetQuickInvokedMethod(inst, is_range); - if (res_method == nullptr) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer method from " << inst->Name(); - return nullptr; - } - if (FailOrAbort(!res_method->IsDirect(), - "Quick-invoked method is direct at ", - work_insn_idx_)) { - return nullptr; - } - if (FailOrAbort(!res_method->IsStatic(), - "Quick-invoked method is static at ", - work_insn_idx_)) { - return nullptr; - } - - // We use vAA as our expected arg count, rather than res_method->insSize, because we need to - // match the call to the signature. Also, we might be calling through an abstract method - // definition (which doesn't have register count values). - const RegType& actual_arg_type = work_line_->GetInvocationThis(this, inst); - if (actual_arg_type.IsConflict()) { // GetInvocationThis failed. - return nullptr; - } - const size_t expected_args = (is_range) ? inst->VRegA_3rc() : inst->VRegA_35c(); - /* caught by static verifier */ - DCHECK(is_range || expected_args <= 5); - if (expected_args > code_item_accessor_.OutsSize()) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid argument count (" << expected_args - << ") exceeds outsSize (" << code_item_accessor_.OutsSize() << ")"; - return nullptr; - } - - /* - * Check the "this" argument, which must be an instance of the class that declared the method. - * For an interface class, we don't do the full interface merge (see JoinClass), so we can't do a - * rigorous check here (which is okay since we have to do it at runtime). - */ - // Note: given an uninitialized type, this should always fail. Constructors aren't virtual. - if (actual_arg_type.IsUninitializedTypes() && !res_method->IsConstructor()) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "'this' arg must be initialized"; - return nullptr; - } - if (!actual_arg_type.IsZeroOrNull()) { - mirror::Class* klass = res_method->GetDeclaringClass(); - std::string temp; - const RegType& res_method_class = - FromClass(klass->GetDescriptor(&temp), klass, klass->CannotBeAssignedFromOtherTypes()); - if (!res_method_class.IsAssignableFrom(actual_arg_type, this)) { - Fail(actual_arg_type.IsUninitializedTypes() // Just overcautious - should have never - ? VERIFY_ERROR_BAD_CLASS_HARD // quickened this. - : actual_arg_type.IsUnresolvedTypes() - ? VERIFY_ERROR_NO_CLASS - : VERIFY_ERROR_BAD_CLASS_SOFT) << "'this' argument '" << actual_arg_type - << "' not instance of '" << res_method_class << "'"; - return nullptr; - } - } - /* - * Process the target method's signature. This signature may or may not - * have been verified, so we can't assume it's properly formed. - */ - const DexFile::TypeList* params = res_method->GetParameterTypeList(); - size_t params_size = params == nullptr ? 0 : params->Size(); - uint32_t arg[5]; - if (!is_range) { - inst->GetVarArgs(arg); - } - size_t actual_args = 1; - for (size_t param_index = 0; param_index < params_size; param_index++) { - if (actual_args >= expected_args) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Rejecting invalid call to '" - << res_method->PrettyMethod() - << "'. Expected " << expected_args - << " arguments, processing argument " << actual_args - << " (where longs/doubles count twice)."; - return nullptr; - } - const char* descriptor = - res_method->GetTypeDescriptorFromTypeIdx(params->GetTypeItem(param_index).type_idx_); - if (descriptor == nullptr) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Rejecting invocation of " - << res_method->PrettyMethod() - << " missing signature component"; - return nullptr; +uint16_t MethodVerifier::GetMethodIdxOfInvoke(const Instruction* inst) { + switch (inst->Opcode()) { + case Instruction::INVOKE_VIRTUAL_RANGE_QUICK: + case Instruction::INVOKE_VIRTUAL_QUICK: { + DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_) + << dex_file_->PrettyMethod(dex_method_idx_, true) << "@" << work_insn_idx_; + DCHECK(method_being_verified_ != nullptr); + uint16_t method_idx = method_being_verified_->GetIndexFromQuickening(work_insn_idx_); + CHECK_NE(method_idx, DexFile::kDexNoIndex16); + return method_idx; } - const RegType& reg_type = reg_types_.FromDescriptor(GetClassLoader(), descriptor, false); - uint32_t get_reg = is_range ? inst->VRegC_3rc() + actual_args : arg[actual_args]; - if (!work_line_->VerifyRegisterType(this, get_reg, reg_type)) { - return res_method; + default: { + return inst->VRegB(); } - actual_args = reg_type.IsLongOrDoubleTypes() ? actual_args + 2 : actual_args + 1; } - if (actual_args != expected_args) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Rejecting invocation of " - << res_method->PrettyMethod() << " expected " - << expected_args << " arguments, found " << actual_args; - return nullptr; +} + +uint16_t MethodVerifier::GetFieldIdxOfFieldAccess(const Instruction* inst, bool is_static) { + if (is_static) { + return inst->VRegB_21c(); + } else if (inst->IsQuickened()) { + DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_); + DCHECK(method_being_verified_ != nullptr); + uint16_t field_idx = method_being_verified_->GetIndexFromQuickening(work_insn_idx_); + CHECK_NE(field_idx, DexFile::kDexNoIndex16); + return field_idx; } else { - return res_method; + return inst->VRegC_22c(); } } @@ -4819,7 +4687,7 @@ ArtField* MethodVerifier::GetInstanceField(const RegType& obj_type, int field_id template <MethodVerifier::FieldAccessType kAccType> void MethodVerifier::VerifyISFieldAccess(const Instruction* inst, const RegType& insn_type, bool is_primitive, bool is_static) { - uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c(); + uint32_t field_idx = GetFieldIdxOfFieldAccess(inst, is_static); ArtField* field; if (is_static) { field = GetStaticField(field_idx); @@ -4972,151 +4840,6 @@ void MethodVerifier::VerifyISFieldAccess(const Instruction* inst, const RegType& } } -ArtField* MethodVerifier::GetQuickAccessedField() { - DCHECK(method_being_verified_ != nullptr); - uint16_t field_idx = method_being_verified_->GetIndexFromQuickening(work_insn_idx_); - CHECK_NE(field_idx, DexFile::kDexNoIndex16); - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - ArtField* field = class_linker->ResolveFieldJLS(field_idx, dex_cache_, class_loader_); - if (field == nullptr) { - DCHECK(self_->IsExceptionPending()); - self_->ClearException(); - } - return field; -} - -template <MethodVerifier::FieldAccessType kAccType> -void MethodVerifier::VerifyQuickFieldAccess(const Instruction* inst, const RegType& insn_type, - bool is_primitive) { - DCHECK(Runtime::Current()->IsStarted() || verify_to_dump_); - - ArtField* field = GetQuickAccessedField(); - if (field == nullptr) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer field from " << inst->Name(); - return; - } - - // For an IPUT_QUICK, we now test for final flag of the field. - if (kAccType == FieldAccessType::kAccPut) { - if (field->IsFinal() && field->GetDeclaringClass() != GetDeclaringClass().GetClass()) { - Fail(VERIFY_ERROR_ACCESS_FIELD) << "cannot modify final field " << field->PrettyField() - << " from other class " << GetDeclaringClass(); - return; - } - } - - // Get the field type. - const RegType* field_type; - { - ObjPtr<mirror::Class> field_type_class = - can_load_classes_ ? field->ResolveType() : field->LookupResolvedType(); - - if (field_type_class != nullptr) { - field_type = &FromClass(field->GetTypeDescriptor(), - field_type_class.Ptr(), - field_type_class->CannotBeAssignedFromOtherTypes()); - } else { - Thread* self = Thread::Current(); - DCHECK(!can_load_classes_ || self->IsExceptionPending()); - self->ClearException(); - field_type = ®_types_.FromDescriptor(field->GetDeclaringClass()->GetClassLoader(), - field->GetTypeDescriptor(), - false); - } - if (field_type == nullptr) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot infer field type from " << inst->Name(); - return; - } - } - - const uint32_t vregA = inst->VRegA_22c(); - static_assert(kAccType == FieldAccessType::kAccPut || kAccType == FieldAccessType::kAccGet, - "Unexpected third access type"); - if (kAccType == FieldAccessType::kAccPut) { - if (is_primitive) { - // Primitive field assignability rules are weaker than regular assignability rules - bool instruction_compatible; - bool value_compatible; - const RegType& value_type = work_line_->GetRegisterType(this, vregA); - if (field_type->IsIntegralTypes()) { - instruction_compatible = insn_type.IsIntegralTypes(); - value_compatible = value_type.IsIntegralTypes(); - } else if (field_type->IsFloat()) { - instruction_compatible = insn_type.IsInteger(); // no [is]put-float, so expect [is]put-int - value_compatible = value_type.IsFloatTypes(); - } else if (field_type->IsLong()) { - instruction_compatible = insn_type.IsLong(); - value_compatible = value_type.IsLongTypes(); - } else if (field_type->IsDouble()) { - instruction_compatible = insn_type.IsLong(); // no [is]put-double, so expect [is]put-long - value_compatible = value_type.IsDoubleTypes(); - } else { - instruction_compatible = false; // reference field with primitive store - value_compatible = false; // unused - } - if (!instruction_compatible) { - // This is a global failure rather than a class change failure as the instructions and - // the descriptors for the type should have been consistent within the same file at - // compile time - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected field " << ArtField::PrettyField(field) - << " to be of type '" << insn_type - << "' but found type '" << *field_type - << "' in put"; - return; - } - if (!value_compatible) { - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "unexpected value in v" << vregA - << " of type " << value_type - << " but expected " << *field_type - << " for store to " << ArtField::PrettyField(field) << " in put"; - return; - } - } else { - if (!insn_type.IsAssignableFrom(*field_type, this)) { - Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << ArtField::PrettyField(field) - << " to be compatible with type '" << insn_type - << "' but found type '" << *field_type - << "' in put-object"; - return; - } - work_line_->VerifyRegisterType(this, vregA, *field_type); - } - } else if (kAccType == FieldAccessType::kAccGet) { - if (is_primitive) { - if (field_type->Equals(insn_type) || - (field_type->IsFloat() && insn_type.IsIntegralTypes()) || - (field_type->IsDouble() && insn_type.IsLongTypes())) { - // expected that read is of the correct primitive type or that int reads are reading - // floats or long reads are reading doubles - } else { - // This is a global failure rather than a class change failure as the instructions and - // the descriptors for the type should have been consistent within the same file at - // compile time - Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "expected field " << ArtField::PrettyField(field) - << " to be of type '" << insn_type - << "' but found type '" << *field_type << "' in Get"; - return; - } - } else { - if (!insn_type.IsAssignableFrom(*field_type, this)) { - Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "expected field " << ArtField::PrettyField(field) - << " to be compatible with type '" << insn_type - << "' but found type '" << *field_type - << "' in get-object"; - work_line_->SetRegisterType<LockOp::kClear>(this, vregA, reg_types_.Conflict()); - return; - } - } - if (!field_type->IsLowHalf()) { - work_line_->SetRegisterType<LockOp::kClear>(this, vregA, *field_type); - } else { - work_line_->SetRegisterTypeWide(this, vregA, *field_type, field_type->HighHalf(®_types_)); - } - } else { - LOG(FATAL) << "Unexpected case."; - } -} - bool MethodVerifier::CheckNotMoveException(const uint16_t* insns, int insn_idx) { if ((insns[insn_idx] & 0xff) == Instruction::MOVE_EXCEPTION) { Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid use of move-exception"; diff --git a/runtime/verifier/method_verifier.h b/runtime/verifier/method_verifier.h index 9237a8b44b..531d3dabfa 100644 --- a/runtime/verifier/method_verifier.h +++ b/runtime/verifier/method_verifier.h @@ -209,12 +209,12 @@ class MethodVerifier { const RegType& ResolveCheckedClass(dex::TypeIndex class_idx) REQUIRES_SHARED(Locks::mutator_lock_); - // Returns the method of a quick invoke or null if it cannot be found. - ArtMethod* GetQuickInvokedMethod(const Instruction* inst, bool is_range) + // Returns the method index of an invoke instruction. + uint16_t GetMethodIdxOfInvoke(const Instruction* inst) + REQUIRES_SHARED(Locks::mutator_lock_); + // Returns the field index of a field access instruction. + uint16_t GetFieldIdxOfFieldAccess(const Instruction* inst, bool is_static) REQUIRES_SHARED(Locks::mutator_lock_); - // Returns the access field of a quick field access (iget/iput-quick) or null - // if it cannot be found. - ArtField* GetQuickAccessedField() REQUIRES_SHARED(Locks::mutator_lock_); uint32_t GetEncounteredFailureTypes() { return encountered_failure_types_; @@ -575,10 +575,6 @@ class MethodVerifier { bool is_primitive, bool is_static) REQUIRES_SHARED(Locks::mutator_lock_); - template <FieldAccessType kAccType> - void VerifyQuickFieldAccess(const Instruction* inst, const RegType& insn_type, bool is_primitive) - REQUIRES_SHARED(Locks::mutator_lock_); - enum class CheckAccess { // private. kYes, kNo, @@ -642,9 +638,6 @@ class MethodVerifier { ArtMethod* res_method) REQUIRES_SHARED(Locks::mutator_lock_); - ArtMethod* VerifyInvokeVirtualQuickArgs(const Instruction* inst, bool is_range) - REQUIRES_SHARED(Locks::mutator_lock_); - /* * Verify the arguments present for a call site. Returns "true" if all is well, "false" otherwise. */ diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc index 67ea64be74..bf36ccf0fa 100644 --- a/runtime/well_known_classes.cc +++ b/runtime/well_known_classes.cc @@ -24,6 +24,7 @@ #include <android-base/stringprintf.h> #include "entrypoints/quick/quick_entrypoints_enum.h" +#include "hidden_api.h" #include "jni_internal.h" #include "mirror/class.h" #include "mirror/throwable.h" @@ -287,17 +288,17 @@ class ScopedHiddenApiExemption { public: explicit ScopedHiddenApiExemption(Runtime* runtime) : runtime_(runtime), - initially_enabled_(runtime_->AreHiddenApiChecksEnabled()) { - runtime_->SetHiddenApiChecksEnabled(false); + initial_policy_(runtime_->GetHiddenApiEnforcementPolicy()) { + runtime_->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kNoChecks); } ~ScopedHiddenApiExemption() { - runtime_->SetHiddenApiChecksEnabled(initially_enabled_); + runtime_->SetHiddenApiEnforcementPolicy(initial_policy_); } private: Runtime* runtime_; - const bool initially_enabled_; + const hiddenapi::EnforcementPolicy initial_policy_; DISALLOW_COPY_AND_ASSIGN(ScopedHiddenApiExemption); }; diff --git a/test/121-modifiers/classes/A$B.class b/test/121-modifiers/classes/A$B.class Binary files differdeleted file mode 100644 index bd7ebfe11d..0000000000 --- a/test/121-modifiers/classes/A$B.class +++ /dev/null diff --git a/test/121-modifiers/classes/A$C.class b/test/121-modifiers/classes/A$C.class Binary files differdeleted file mode 100644 index 3ae872e356..0000000000 --- a/test/121-modifiers/classes/A$C.class +++ /dev/null diff --git a/test/121-modifiers/classes/A.class b/test/121-modifiers/classes/A.class Binary files differdeleted file mode 100644 index d89d029796..0000000000 --- a/test/121-modifiers/classes/A.class +++ /dev/null diff --git a/test/121-modifiers/classes/Inf.class b/test/121-modifiers/classes/Inf.class Binary files differdeleted file mode 100644 index e8dd68029d..0000000000 --- a/test/121-modifiers/classes/Inf.class +++ /dev/null diff --git a/test/121-modifiers/classes/Main.class b/test/121-modifiers/classes/Main.class Binary files differdeleted file mode 100644 index e044074269..0000000000 --- a/test/121-modifiers/classes/Main.class +++ /dev/null diff --git a/test/121-modifiers/classes/NonInf.class b/test/121-modifiers/classes/NonInf.class Binary files differdeleted file mode 100644 index 0f1e826fb7..0000000000 --- a/test/121-modifiers/classes/NonInf.class +++ /dev/null diff --git a/test/121-modifiers/info.txt b/test/121-modifiers/info.txt index 335df53f3d..7dba1133d1 100644 --- a/test/121-modifiers/info.txt +++ b/test/121-modifiers/info.txt @@ -10,9 +10,9 @@ Finally, compile with jack/jill or dx, and run baksmali. javac Inf.java NonInf.java Main.java javac -cp asm.jar:asm-tree.jar:. Asm.java java -cp asm.jar:asm-tree.jar:. Asm -mv Inf.out classes/Inf.class -mv NonInf.out classes/NonInf.class -mv Main.class A.class A\$B.class A\$C.class classes/ +mv Inf.out classes_tmp/Inf.class +mv NonInf.out classes_tmp/NonInf.class +mv Main.class A.class A\$B.class A\$C.class classes_tmp/ dx --debug --dex --output=classes.dex classes baksmali disassemble classes.dex mv out/*.smali smali/ diff --git a/test/161-final-abstract-class/smali/Main.smali b/test/161-final-abstract-class/smali/Main.smali new file mode 100644 index 0000000000..588854cf52 --- /dev/null +++ b/test/161-final-abstract-class/smali/Main.smali @@ -0,0 +1,214 @@ +# Created with baksmali. + +# Java file for reference. + +# import java.lang.reflect.InvocationTargetException; +# import java.lang.reflect.Method; +# +# public class Main { +# public static void main(String[] args) { +# try { +# // Make sure that the abstract final class is marked as erroneous. +# Class.forName("AbstractFinal"); +# System.out.println("UNREACHABLE!"); +# } catch (VerifyError expected) { +# } catch (Throwable t) { +# t.printStackTrace(System.out); +# } +# try { +# // Verification of TestClass.test() used to crash when processing +# // the final abstract (erroneous) class. +# Class<?> tc = Class.forName("TestClass"); +# Method test = tc.getDeclaredMethod("test"); +# test.invoke(null); +# System.out.println("UNREACHABLE!"); +# } catch (InvocationTargetException ite) { +# if (ite.getCause() instanceof InstantiationError) { +# System.out.println( +# ite.getCause().getClass().getName() + ": " + ite.getCause().getMessage()); +# } else { +# ite.printStackTrace(System.out); +# } +# } catch (Throwable t) { +# t.printStackTrace(System.out); +# } +# } +# } + +.class public LMain; +.super Ljava/lang/Object; +.source "Main.java" + + +# direct methods +.method public constructor <init>()V + .registers 1 + + .line 20 + invoke-direct {p0}, Ljava/lang/Object;-><init>()V + + return-void +.end method + +.method public static main([Ljava/lang/String;)V + .registers 4 + + .line 24 + :try_start_0 + const-string p0, "AbstractFinal" + + invoke-static {p0}, Ljava/lang/Class;->forName(Ljava/lang/String;)Ljava/lang/Class; + + .line 25 + sget-object p0, Ljava/lang/System;->out:Ljava/io/PrintStream; + + const-string v0, "UNREACHABLE!" + + invoke-virtual {p0, v0}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V + :try_end_c + .catch Ljava/lang/VerifyError; {:try_start_0 .. :try_end_c} :catch_14 + .catch Ljava/lang/Throwable; {:try_start_0 .. :try_end_c} :catch_d + + goto :goto_15 + + .line 27 + :catch_d + move-exception p0 + + .line 28 + sget-object v0, Ljava/lang/System;->out:Ljava/io/PrintStream; + + invoke-virtual {p0, v0}, Ljava/lang/Throwable;->printStackTrace(Ljava/io/PrintStream;)V + + goto :goto_16 + + .line 26 + :catch_14 + move-exception p0 + + .line 29 + :goto_15 + nop + + .line 33 + :goto_16 + :try_start_16 + const-string p0, "TestClass" + + invoke-static {p0}, Ljava/lang/Class;->forName(Ljava/lang/String;)Ljava/lang/Class; + + move-result-object p0 + + .line 34 + const-string v0, "test" + + const/4 v1, 0x0 + + new-array v2, v1, [Ljava/lang/Class; + + invoke-virtual {p0, v0, v2}, Ljava/lang/Class;->getDeclaredMethod(Ljava/lang/String;[Ljava/lang/Class;)Ljava/lang/reflect/Method; + + move-result-object p0 + + .line 35 + const/4 v0, 0x0 + + new-array v1, v1, [Ljava/lang/Object; + + invoke-virtual {p0, v0, v1}, Ljava/lang/reflect/Method;->invoke(Ljava/lang/Object;[Ljava/lang/Object;)Ljava/lang/Object; + + .line 36 + sget-object p0, Ljava/lang/System;->out:Ljava/io/PrintStream; + + const-string v0, "UNREACHABLE!" + + invoke-virtual {p0, v0}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V + :try_end_32 + .catch Ljava/lang/reflect/InvocationTargetException; {:try_start_16 .. :try_end_32} :catch_3a + .catch Ljava/lang/Throwable; {:try_start_16 .. :try_end_32} :catch_33 + + goto :goto_76 + + .line 44 + :catch_33 + move-exception p0 + + .line 45 + sget-object v0, Ljava/lang/System;->out:Ljava/io/PrintStream; + + invoke-virtual {p0, v0}, Ljava/lang/Throwable;->printStackTrace(Ljava/io/PrintStream;)V + + goto :goto_77 + + .line 37 + :catch_3a + move-exception p0 + + .line 38 + invoke-virtual {p0}, Ljava/lang/reflect/InvocationTargetException;->getCause()Ljava/lang/Throwable; + + move-result-object v0 + + instance-of v0, v0, Ljava/lang/InstantiationError; + + if-eqz v0, :cond_71 + + .line 39 + sget-object v0, Ljava/lang/System;->out:Ljava/io/PrintStream; + + new-instance v1, Ljava/lang/StringBuilder; + + invoke-direct {v1}, Ljava/lang/StringBuilder;-><init>()V + + .line 40 + invoke-virtual {p0}, Ljava/lang/reflect/InvocationTargetException;->getCause()Ljava/lang/Throwable; + + move-result-object v2 + + invoke-virtual {v2}, Ljava/lang/Object;->getClass()Ljava/lang/Class; + + move-result-object v2 + + invoke-virtual {v2}, Ljava/lang/Class;->getName()Ljava/lang/String; + + move-result-object v2 + + invoke-virtual {v1, v2}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder; + + const-string v2, ": " + + invoke-virtual {v1, v2}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder; + + invoke-virtual {p0}, Ljava/lang/reflect/InvocationTargetException;->getCause()Ljava/lang/Throwable; + + move-result-object p0 + + invoke-virtual {p0}, Ljava/lang/Throwable;->getMessage()Ljava/lang/String; + + move-result-object p0 + + invoke-virtual {v1, p0}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder; + + invoke-virtual {v1}, Ljava/lang/StringBuilder;->toString()Ljava/lang/String; + + move-result-object p0 + + .line 39 + invoke-virtual {v0, p0}, Ljava/io/PrintStream;->println(Ljava/lang/String;)V + + goto :goto_76 + + .line 42 + :cond_71 + sget-object v0, Ljava/lang/System;->out:Ljava/io/PrintStream; + + invoke-virtual {p0, v0}, Ljava/lang/reflect/InvocationTargetException;->printStackTrace(Ljava/io/PrintStream;)V + + .line 46 + :goto_76 + nop + + .line 47 + :goto_77 + return-void +.end method diff --git a/test/161-final-abstract-class/src/Main.java b/test/161-final-abstract-class/src/Main.java deleted file mode 100644 index 2452490226..0000000000 --- a/test/161-final-abstract-class/src/Main.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright (C) 2017 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.lang.reflect.InvocationTargetException; -import java.lang.reflect.Method; - -public class Main { - public static void main(String[] args) { - try { - // Make sure that the abstract final class is marked as erroneous. - Class.forName("AbstractFinal"); - System.out.println("UNREACHABLE!"); - } catch (VerifyError expected) { - } catch (Throwable t) { - t.printStackTrace(System.out); - } - try { - // Verification of TestClass.test() used to crash when processing - // the final abstract (erroneous) class. - Class<?> tc = Class.forName("TestClass"); - Method test = tc.getDeclaredMethod("test"); - test.invoke(null); - System.out.println("UNREACHABLE!"); - } catch (InvocationTargetException ite) { - if (ite.getCause() instanceof InstantiationError) { - System.out.println( - ite.getCause().getClass().getName() + ": " + ite.getCause().getMessage()); - } else { - ite.printStackTrace(System.out); - } - } catch (Throwable t) { - t.printStackTrace(System.out); - } - } -} diff --git a/test/1929-exception-catch-exception/expected.txt b/test/1929-exception-catch-exception/expected.txt index bc5608ac4e..a82b732eda 100644 --- a/test/1929-exception-catch-exception/expected.txt +++ b/test/1929-exception-catch-exception/expected.txt @@ -1,11 +1,11 @@ Test "art.Test1929$DoThrowClass": Running breakpoint with handler "art.Test1929$DoNothingHandler" -main: public static void art.Test1929.run() throws java.lang.Exception @ line = 283 caught class art.Test1929$TestException: doThrow +main: public static void art.Test1929.run() throws java.lang.Exception @ line = 298 caught class art.Test1929$TestException: doThrow Current Stack: private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1 public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61 private static void art.Test1929.PrintStack() @ line = 52 public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65 - public static void art.Test1929.run() throws java.lang.Exception @ line = 283 + public static void art.Test1929.run() throws java.lang.Exception @ line = 298 Test "art.Test1929$DoThrowClass": Caught error art.Test1929$TestException:"doThrow" with handler "art.Test1929$DoNothingHandler" Test "art.Test1929$DoThrowClass": Finished running with handler "art.Test1929$DoNothingHandler" Test "art.Test1929$DoThrowCatchBaseTestException": Running breakpoint with handler "art.Test1929$DoNothingHandler" @@ -17,71 +17,71 @@ main: public static void art.Test1929.throwCatchBaseTestException() @ line = 140 public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65 public static void art.Test1929.throwCatchBaseTestException() @ line = 140 public void art.Test1929$DoThrowCatchBaseTestException.run() @ line = 149 - public static void art.Test1929.run() throws java.lang.Exception @ line = 280 + public static void art.Test1929.run() throws java.lang.Exception @ line = 295 Doing nothing! Caught art.Test1929$TestException: "throwCatchBaseTestException" Test "art.Test1929$DoThrowCatchBaseTestException": No error caught with handler "art.Test1929$DoNothingHandler" Test "art.Test1929$DoThrowCatchBaseTestException": Finished running with handler "art.Test1929$DoNothingHandler" Test "art.Test1929$DoThrowCatchBaseTestExceptionTwice": Running breakpoint with handler "art.Test1929$DoNothingHandler" -main: public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 161 caught class art.Test1929$TestException: throwCatchBaseTestExceptionTwice +main: public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 157 caught class art.Test1929$TestException: throwCatchBaseTestExceptionTwice Current Stack: private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1 public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61 private static void art.Test1929.PrintStack() @ line = 52 public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65 - public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 161 - public static void art.Test1929.throwCatchBaseTestExceptionTwice() @ line = 197 - public void art.Test1929$DoThrowCatchBaseTestExceptionTwice.run() @ line = 201 - public static void art.Test1929.run() throws java.lang.Exception @ line = 280 + public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 157 + public static void art.Test1929.throwCatchBaseTestExceptionTwice() @ line = 203 + public void art.Test1929$DoThrowCatchBaseTestExceptionTwice.run() @ line = 210 + public static void art.Test1929.run() throws java.lang.Exception @ line = 295 Caught art.Test1929$TestException: "throwCatchBaseTestExceptionTwice" Test "art.Test1929$DoThrowCatchBaseTestExceptionTwice": No error caught with handler "art.Test1929$DoNothingHandler" Test "art.Test1929$DoThrowCatchBaseTestExceptionTwice": Finished running with handler "art.Test1929$DoNothingHandler" Test "art.Test1929$DoThrowCatchTestException": Running breakpoint with handler "art.Test1929$DoNothingHandler" -main: public static void art.Test1929.throwCatchTestException() @ line = 207 caught class art.Test1929$TestException: throwCatchTestException +main: public static void art.Test1929.throwCatchTestException() @ line = 216 caught class art.Test1929$TestException: throwCatchTestException Current Stack: private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1 public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61 private static void art.Test1929.PrintStack() @ line = 52 public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65 - public static void art.Test1929.throwCatchTestException() @ line = 207 - public void art.Test1929$DoThrowCatchTestException.run() @ line = 216 - public static void art.Test1929.run() throws java.lang.Exception @ line = 280 + public static void art.Test1929.throwCatchTestException() @ line = 216 + public void art.Test1929$DoThrowCatchTestException.run() @ line = 225 + public static void art.Test1929.run() throws java.lang.Exception @ line = 295 Doing nothing! Caught art.Test1929$TestException: "throwCatchTestException" Test "art.Test1929$DoThrowCatchTestException": No error caught with handler "art.Test1929$DoNothingHandler" Test "art.Test1929$DoThrowCatchTestException": Finished running with handler "art.Test1929$DoNothingHandler" Test "art.Test1929$DoThrowCatchTestExceptionTwice": Running breakpoint with handler "art.Test1929$DoNothingHandler" -main: public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 179 caught class art.Test1929$TestException: throwCatchTestExceptionTwice +main: public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 175 caught class art.Test1929$TestException: throwCatchTestExceptionTwice Current Stack: private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1 public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61 private static void art.Test1929.PrintStack() @ line = 52 public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65 - public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 179 - public static void art.Test1929.throwCatchTestExceptionTwice() @ line = 222 - public void art.Test1929$DoThrowCatchTestExceptionTwice.run() @ line = 226 - public static void art.Test1929.run() throws java.lang.Exception @ line = 280 + public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 175 + public static void art.Test1929.throwCatchTestExceptionTwice() @ line = 234 + public void art.Test1929$DoThrowCatchTestExceptionTwice.run() @ line = 241 + public static void art.Test1929.run() throws java.lang.Exception @ line = 295 Caught art.Test1929$TestException: "throwCatchTestExceptionTwice" Test "art.Test1929$DoThrowCatchTestExceptionTwice": No error caught with handler "art.Test1929$DoNothingHandler" Test "art.Test1929$DoThrowCatchTestExceptionTwice": Finished running with handler "art.Test1929$DoNothingHandler" Test "art.Test1929$DoThrowCatchTestExceptionNoRethrow": Running breakpoint with handler "art.Test1929$DoNothingHandler" -main: public static void art.Test1929.run() throws java.lang.Exception @ line = 283 caught class art.Test1929$TestException: throwCatchTestExceptionNoRethrow +main: public static void art.Test1929.run() throws java.lang.Exception @ line = 298 caught class art.Test1929$TestException: throwCatchTestExceptionNoRethrow Current Stack: private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1 public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61 private static void art.Test1929.PrintStack() @ line = 52 public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65 - public static void art.Test1929.run() throws java.lang.Exception @ line = 283 + public static void art.Test1929.run() throws java.lang.Exception @ line = 298 Test "art.Test1929$DoThrowCatchTestExceptionNoRethrow": Caught error art.Test1929$TestException:"throwCatchTestExceptionNoRethrow" with handler "art.Test1929$DoNothingHandler" Test "art.Test1929$DoThrowCatchTestExceptionNoRethrow": Finished running with handler "art.Test1929$DoNothingHandler" Test "art.Test1929$DoThrowClass": Running breakpoint with handler "art.Test1929$ThrowCatchBase" -main: public static void art.Test1929.run() throws java.lang.Exception @ line = 283 caught class art.Test1929$TestException: doThrow +main: public static void art.Test1929.run() throws java.lang.Exception @ line = 298 caught class art.Test1929$TestException: doThrow Current Stack: private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1 public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61 private static void art.Test1929.PrintStack() @ line = 52 public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65 - public static void art.Test1929.run() throws java.lang.Exception @ line = 283 + public static void art.Test1929.run() throws java.lang.Exception @ line = 298 Test "art.Test1929$DoThrowClass": Caught error art.Test1929$TestException:"doThrow" with handler "art.Test1929$ThrowCatchBase" Test "art.Test1929$DoThrowClass": Finished running with handler "art.Test1929$ThrowCatchBase" Test "art.Test1929$DoThrowCatchBaseTestException": Running breakpoint with handler "art.Test1929$ThrowCatchBase" @@ -93,73 +93,73 @@ main: public static void art.Test1929.throwCatchBaseTestException() @ line = 140 public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65 public static void art.Test1929.throwCatchBaseTestException() @ line = 140 public void art.Test1929$DoThrowCatchBaseTestException.run() @ line = 149 - public static void art.Test1929.run() throws java.lang.Exception @ line = 280 + public static void art.Test1929.run() throws java.lang.Exception @ line = 295 Throwing BaseTestException and catching it! Caught art.Test1929$BaseTestException: "ThrowBaseHandler during throw from public static void art.Test1929.throwCatchBaseTestException() @ line = 140" Caught art.Test1929$TestException: "throwCatchBaseTestException" Test "art.Test1929$DoThrowCatchBaseTestException": No error caught with handler "art.Test1929$ThrowCatchBase" Test "art.Test1929$DoThrowCatchBaseTestException": Finished running with handler "art.Test1929$ThrowCatchBase" Test "art.Test1929$DoThrowCatchBaseTestExceptionTwice": Running breakpoint with handler "art.Test1929$ThrowCatchBase" -main: public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 161 caught class art.Test1929$TestException: throwCatchBaseTestExceptionTwice +main: public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 157 caught class art.Test1929$TestException: throwCatchBaseTestExceptionTwice Current Stack: private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1 public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61 private static void art.Test1929.PrintStack() @ line = 52 public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65 - public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 161 - public static void art.Test1929.throwCatchBaseTestExceptionTwice() @ line = 197 - public void art.Test1929$DoThrowCatchBaseTestExceptionTwice.run() @ line = 201 - public static void art.Test1929.run() throws java.lang.Exception @ line = 280 + public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 157 + public static void art.Test1929.throwCatchBaseTestExceptionTwice() @ line = 203 + public void art.Test1929$DoThrowCatchBaseTestExceptionTwice.run() @ line = 210 + public static void art.Test1929.run() throws java.lang.Exception @ line = 295 Caught art.Test1929$TestException: "throwCatchBaseTestExceptionTwice" Test "art.Test1929$DoThrowCatchBaseTestExceptionTwice": No error caught with handler "art.Test1929$ThrowCatchBase" Test "art.Test1929$DoThrowCatchBaseTestExceptionTwice": Finished running with handler "art.Test1929$ThrowCatchBase" Test "art.Test1929$DoThrowCatchTestException": Running breakpoint with handler "art.Test1929$ThrowCatchBase" -main: public static void art.Test1929.throwCatchTestException() @ line = 207 caught class art.Test1929$TestException: throwCatchTestException +main: public static void art.Test1929.throwCatchTestException() @ line = 216 caught class art.Test1929$TestException: throwCatchTestException Current Stack: private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1 public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61 private static void art.Test1929.PrintStack() @ line = 52 public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65 - public static void art.Test1929.throwCatchTestException() @ line = 207 - public void art.Test1929$DoThrowCatchTestException.run() @ line = 216 - public static void art.Test1929.run() throws java.lang.Exception @ line = 280 + public static void art.Test1929.throwCatchTestException() @ line = 216 + public void art.Test1929$DoThrowCatchTestException.run() @ line = 225 + public static void art.Test1929.run() throws java.lang.Exception @ line = 295 Throwing BaseTestException and catching it! -Caught art.Test1929$BaseTestException: "ThrowBaseHandler during throw from public static void art.Test1929.throwCatchTestException() @ line = 207" +Caught art.Test1929$BaseTestException: "ThrowBaseHandler during throw from public static void art.Test1929.throwCatchTestException() @ line = 216" Caught art.Test1929$TestException: "throwCatchTestException" Test "art.Test1929$DoThrowCatchTestException": No error caught with handler "art.Test1929$ThrowCatchBase" Test "art.Test1929$DoThrowCatchTestException": Finished running with handler "art.Test1929$ThrowCatchBase" Test "art.Test1929$DoThrowCatchTestExceptionTwice": Running breakpoint with handler "art.Test1929$ThrowCatchBase" -main: public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 179 caught class art.Test1929$TestException: throwCatchTestExceptionTwice +main: public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 175 caught class art.Test1929$TestException: throwCatchTestExceptionTwice Current Stack: private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1 public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61 private static void art.Test1929.PrintStack() @ line = 52 public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65 - public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 179 - public static void art.Test1929.throwCatchTestExceptionTwice() @ line = 222 - public void art.Test1929$DoThrowCatchTestExceptionTwice.run() @ line = 226 - public static void art.Test1929.run() throws java.lang.Exception @ line = 280 + public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 175 + public static void art.Test1929.throwCatchTestExceptionTwice() @ line = 234 + public void art.Test1929$DoThrowCatchTestExceptionTwice.run() @ line = 241 + public static void art.Test1929.run() throws java.lang.Exception @ line = 295 Caught art.Test1929$TestException: "throwCatchTestExceptionTwice" Test "art.Test1929$DoThrowCatchTestExceptionTwice": No error caught with handler "art.Test1929$ThrowCatchBase" Test "art.Test1929$DoThrowCatchTestExceptionTwice": Finished running with handler "art.Test1929$ThrowCatchBase" Test "art.Test1929$DoThrowCatchTestExceptionNoRethrow": Running breakpoint with handler "art.Test1929$ThrowCatchBase" -main: public static void art.Test1929.run() throws java.lang.Exception @ line = 283 caught class art.Test1929$TestException: throwCatchTestExceptionNoRethrow +main: public static void art.Test1929.run() throws java.lang.Exception @ line = 298 caught class art.Test1929$TestException: throwCatchTestExceptionNoRethrow Current Stack: private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1 public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61 private static void art.Test1929.PrintStack() @ line = 52 public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65 - public static void art.Test1929.run() throws java.lang.Exception @ line = 283 + public static void art.Test1929.run() throws java.lang.Exception @ line = 298 Test "art.Test1929$DoThrowCatchTestExceptionNoRethrow": Caught error art.Test1929$TestException:"throwCatchTestExceptionNoRethrow" with handler "art.Test1929$ThrowCatchBase" Test "art.Test1929$DoThrowCatchTestExceptionNoRethrow": Finished running with handler "art.Test1929$ThrowCatchBase" Test "art.Test1929$DoThrowClass": Running breakpoint with handler "art.Test1929$ThrowBaseTestExceptionHandler" -main: public static void art.Test1929.run() throws java.lang.Exception @ line = 283 caught class art.Test1929$TestException: doThrow +main: public static void art.Test1929.run() throws java.lang.Exception @ line = 298 caught class art.Test1929$TestException: doThrow Current Stack: private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1 public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61 private static void art.Test1929.PrintStack() @ line = 52 public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65 - public static void art.Test1929.run() throws java.lang.Exception @ line = 283 + public static void art.Test1929.run() throws java.lang.Exception @ line = 298 Test "art.Test1929$DoThrowClass": Caught error art.Test1929$TestException:"doThrow" with handler "art.Test1929$ThrowBaseTestExceptionHandler" Test "art.Test1929$DoThrowClass": Finished running with handler "art.Test1929$ThrowBaseTestExceptionHandler" Test "art.Test1929$DoThrowCatchBaseTestException": Running breakpoint with handler "art.Test1929$ThrowBaseTestExceptionHandler" @@ -171,69 +171,69 @@ main: public static void art.Test1929.throwCatchBaseTestException() @ line = 140 public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65 public static void art.Test1929.throwCatchBaseTestException() @ line = 140 public void art.Test1929$DoThrowCatchBaseTestException.run() @ line = 149 - public static void art.Test1929.run() throws java.lang.Exception @ line = 280 + public static void art.Test1929.run() throws java.lang.Exception @ line = 295 Throwing BaseTestException! Test "art.Test1929$DoThrowCatchBaseTestException": Caught error art.Test1929$BaseTestException:"ThrowBaseHandler during throw from public static void art.Test1929.throwCatchBaseTestException() @ line = 140" with handler "art.Test1929$ThrowBaseTestExceptionHandler" Test "art.Test1929$DoThrowCatchBaseTestException": Finished running with handler "art.Test1929$ThrowBaseTestExceptionHandler" Test "art.Test1929$DoThrowCatchBaseTestExceptionTwice": Running breakpoint with handler "art.Test1929$ThrowBaseTestExceptionHandler" -main: public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 161 caught class art.Test1929$TestException: throwCatchBaseTestExceptionTwice +main: public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 157 caught class art.Test1929$TestException: throwCatchBaseTestExceptionTwice Current Stack: private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1 public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61 private static void art.Test1929.PrintStack() @ line = 52 public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65 - public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 161 - public static void art.Test1929.throwCatchBaseTestExceptionTwice() @ line = 197 - public void art.Test1929$DoThrowCatchBaseTestExceptionTwice.run() @ line = 201 - public static void art.Test1929.run() throws java.lang.Exception @ line = 280 + public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 157 + public static void art.Test1929.throwCatchBaseTestExceptionTwice() @ line = 203 + public void art.Test1929$DoThrowCatchBaseTestExceptionTwice.run() @ line = 210 + public static void art.Test1929.run() throws java.lang.Exception @ line = 295 Caught art.Test1929$TestException: "throwCatchBaseTestExceptionTwice" Test "art.Test1929$DoThrowCatchBaseTestExceptionTwice": No error caught with handler "art.Test1929$ThrowBaseTestExceptionHandler" Test "art.Test1929$DoThrowCatchBaseTestExceptionTwice": Finished running with handler "art.Test1929$ThrowBaseTestExceptionHandler" Test "art.Test1929$DoThrowCatchTestException": Running breakpoint with handler "art.Test1929$ThrowBaseTestExceptionHandler" -main: public static void art.Test1929.throwCatchTestException() @ line = 207 caught class art.Test1929$TestException: throwCatchTestException +main: public static void art.Test1929.throwCatchTestException() @ line = 216 caught class art.Test1929$TestException: throwCatchTestException Current Stack: private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1 public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61 private static void art.Test1929.PrintStack() @ line = 52 public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65 - public static void art.Test1929.throwCatchTestException() @ line = 207 - public void art.Test1929$DoThrowCatchTestException.run() @ line = 216 - public static void art.Test1929.run() throws java.lang.Exception @ line = 280 + public static void art.Test1929.throwCatchTestException() @ line = 216 + public void art.Test1929$DoThrowCatchTestException.run() @ line = 225 + public static void art.Test1929.run() throws java.lang.Exception @ line = 295 Throwing BaseTestException! -Test "art.Test1929$DoThrowCatchTestException": Caught error art.Test1929$BaseTestException:"ThrowBaseHandler during throw from public static void art.Test1929.throwCatchTestException() @ line = 207" with handler "art.Test1929$ThrowBaseTestExceptionHandler" +Test "art.Test1929$DoThrowCatchTestException": Caught error art.Test1929$BaseTestException:"ThrowBaseHandler during throw from public static void art.Test1929.throwCatchTestException() @ line = 216" with handler "art.Test1929$ThrowBaseTestExceptionHandler" Test "art.Test1929$DoThrowCatchTestException": Finished running with handler "art.Test1929$ThrowBaseTestExceptionHandler" Test "art.Test1929$DoThrowCatchTestExceptionTwice": Running breakpoint with handler "art.Test1929$ThrowBaseTestExceptionHandler" -main: public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 179 caught class art.Test1929$TestException: throwCatchTestExceptionTwice +main: public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 175 caught class art.Test1929$TestException: throwCatchTestExceptionTwice Current Stack: private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1 public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61 private static void art.Test1929.PrintStack() @ line = 52 public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65 - public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 179 - public static void art.Test1929.throwCatchTestExceptionTwice() @ line = 222 - public void art.Test1929$DoThrowCatchTestExceptionTwice.run() @ line = 226 - public static void art.Test1929.run() throws java.lang.Exception @ line = 280 + public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 175 + public static void art.Test1929.throwCatchTestExceptionTwice() @ line = 234 + public void art.Test1929$DoThrowCatchTestExceptionTwice.run() @ line = 241 + public static void art.Test1929.run() throws java.lang.Exception @ line = 295 Caught art.Test1929$TestException: "throwCatchTestExceptionTwice" Test "art.Test1929$DoThrowCatchTestExceptionTwice": No error caught with handler "art.Test1929$ThrowBaseTestExceptionHandler" Test "art.Test1929$DoThrowCatchTestExceptionTwice": Finished running with handler "art.Test1929$ThrowBaseTestExceptionHandler" Test "art.Test1929$DoThrowCatchTestExceptionNoRethrow": Running breakpoint with handler "art.Test1929$ThrowBaseTestExceptionHandler" -main: public static void art.Test1929.run() throws java.lang.Exception @ line = 283 caught class art.Test1929$TestException: throwCatchTestExceptionNoRethrow +main: public static void art.Test1929.run() throws java.lang.Exception @ line = 298 caught class art.Test1929$TestException: throwCatchTestExceptionNoRethrow Current Stack: private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1 public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61 private static void art.Test1929.PrintStack() @ line = 52 public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65 - public static void art.Test1929.run() throws java.lang.Exception @ line = 283 + public static void art.Test1929.run() throws java.lang.Exception @ line = 298 Test "art.Test1929$DoThrowCatchTestExceptionNoRethrow": Caught error art.Test1929$TestException:"throwCatchTestExceptionNoRethrow" with handler "art.Test1929$ThrowBaseTestExceptionHandler" Test "art.Test1929$DoThrowCatchTestExceptionNoRethrow": Finished running with handler "art.Test1929$ThrowBaseTestExceptionHandler" Test "art.Test1929$DoThrowClass": Running breakpoint with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler" -main: public static void art.Test1929.run() throws java.lang.Exception @ line = 283 caught class art.Test1929$TestException: doThrow +main: public static void art.Test1929.run() throws java.lang.Exception @ line = 298 caught class art.Test1929$TestException: doThrow Current Stack: private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1 public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61 private static void art.Test1929.PrintStack() @ line = 52 public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65 - public static void art.Test1929.run() throws java.lang.Exception @ line = 283 + public static void art.Test1929.run() throws java.lang.Exception @ line = 298 Test "art.Test1929$DoThrowClass": Caught error art.Test1929$TestException:"doThrow" with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler" Test "art.Test1929$DoThrowClass": Finished running with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler" Test "art.Test1929$DoThrowCatchBaseTestException": Running breakpoint with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler" @@ -245,58 +245,58 @@ main: public static void art.Test1929.throwCatchBaseTestException() @ line = 140 public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65 public static void art.Test1929.throwCatchBaseTestException() @ line = 140 public void art.Test1929$DoThrowCatchBaseTestException.run() @ line = 149 - public static void art.Test1929.run() throws java.lang.Exception @ line = 280 + public static void art.Test1929.run() throws java.lang.Exception @ line = 295 Throwing TestExceptionNoRethrow! Test "art.Test1929$DoThrowCatchBaseTestException": Caught error art.Test1929$TestExceptionNoRethrow:"ThrowTestExceptionNoRethrowHandler during throw from public static void art.Test1929.throwCatchBaseTestException() @ line = 140" with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler" Test "art.Test1929$DoThrowCatchBaseTestException": Finished running with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler" Test "art.Test1929$DoThrowCatchBaseTestExceptionTwice": Running breakpoint with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler" -main: public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 161 caught class art.Test1929$TestException: throwCatchBaseTestExceptionTwice +main: public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 157 caught class art.Test1929$TestException: throwCatchBaseTestExceptionTwice Current Stack: private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1 public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61 private static void art.Test1929.PrintStack() @ line = 52 public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65 - public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 161 - public static void art.Test1929.throwCatchBaseTestExceptionTwice() @ line = 197 - public void art.Test1929$DoThrowCatchBaseTestExceptionTwice.run() @ line = 201 - public static void art.Test1929.run() throws java.lang.Exception @ line = 280 + public static void art.Test1929$Impl.throwCatchBaseTestExceptionTwiceImpl() @ line = 157 + public static void art.Test1929.throwCatchBaseTestExceptionTwice() @ line = 203 + public void art.Test1929$DoThrowCatchBaseTestExceptionTwice.run() @ line = 210 + public static void art.Test1929.run() throws java.lang.Exception @ line = 295 Caught art.Test1929$TestException: "throwCatchBaseTestExceptionTwice" Test "art.Test1929$DoThrowCatchBaseTestExceptionTwice": No error caught with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler" Test "art.Test1929$DoThrowCatchBaseTestExceptionTwice": Finished running with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler" Test "art.Test1929$DoThrowCatchTestException": Running breakpoint with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler" -main: public static void art.Test1929.throwCatchTestException() @ line = 207 caught class art.Test1929$TestException: throwCatchTestException +main: public static void art.Test1929.throwCatchTestException() @ line = 216 caught class art.Test1929$TestException: throwCatchTestException Current Stack: private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1 public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61 private static void art.Test1929.PrintStack() @ line = 52 public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65 - public static void art.Test1929.throwCatchTestException() @ line = 207 - public void art.Test1929$DoThrowCatchTestException.run() @ line = 216 - public static void art.Test1929.run() throws java.lang.Exception @ line = 280 + public static void art.Test1929.throwCatchTestException() @ line = 216 + public void art.Test1929$DoThrowCatchTestException.run() @ line = 225 + public static void art.Test1929.run() throws java.lang.Exception @ line = 295 Throwing TestExceptionNoRethrow! -Test "art.Test1929$DoThrowCatchTestException": Caught error art.Test1929$TestExceptionNoRethrow:"ThrowTestExceptionNoRethrowHandler during throw from public static void art.Test1929.throwCatchTestException() @ line = 207" with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler" +Test "art.Test1929$DoThrowCatchTestException": Caught error art.Test1929$TestExceptionNoRethrow:"ThrowTestExceptionNoRethrowHandler during throw from public static void art.Test1929.throwCatchTestException() @ line = 216" with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler" Test "art.Test1929$DoThrowCatchTestException": Finished running with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler" Test "art.Test1929$DoThrowCatchTestExceptionTwice": Running breakpoint with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler" -main: public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 179 caught class art.Test1929$TestException: throwCatchTestExceptionTwice +main: public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 175 caught class art.Test1929$TestException: throwCatchTestExceptionTwice Current Stack: private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1 public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61 private static void art.Test1929.PrintStack() @ line = 52 public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65 - public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 179 - public static void art.Test1929.throwCatchTestExceptionTwice() @ line = 222 - public void art.Test1929$DoThrowCatchTestExceptionTwice.run() @ line = 226 - public static void art.Test1929.run() throws java.lang.Exception @ line = 280 + public static void art.Test1929$Impl.throwCatchTestExceptionTwiceImpl() @ line = 175 + public static void art.Test1929.throwCatchTestExceptionTwice() @ line = 234 + public void art.Test1929$DoThrowCatchTestExceptionTwice.run() @ line = 241 + public static void art.Test1929.run() throws java.lang.Exception @ line = 295 Caught art.Test1929$TestException: "throwCatchTestExceptionTwice" Test "art.Test1929$DoThrowCatchTestExceptionTwice": No error caught with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler" Test "art.Test1929$DoThrowCatchTestExceptionTwice": Finished running with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler" Test "art.Test1929$DoThrowCatchTestExceptionNoRethrow": Running breakpoint with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler" -main: public static void art.Test1929.run() throws java.lang.Exception @ line = 283 caught class art.Test1929$TestException: throwCatchTestExceptionNoRethrow +main: public static void art.Test1929.run() throws java.lang.Exception @ line = 298 caught class art.Test1929$TestException: throwCatchTestExceptionNoRethrow Current Stack: private static native art.StackTrace$StackFrameData[] art.StackTrace.nativeGetStackTrace(java.lang.Thread) @ line = -1 public static art.StackTrace$StackFrameData[] art.StackTrace.GetStackTrace(java.lang.Thread) @ line = 61 private static void art.Test1929.PrintStack() @ line = 52 public static void art.Test1929.ExceptionCatchEvent(java.lang.Thread,java.lang.reflect.Executable,long,java.lang.Throwable) @ line = 65 - public static void art.Test1929.run() throws java.lang.Exception @ line = 283 + public static void art.Test1929.run() throws java.lang.Exception @ line = 298 Test "art.Test1929$DoThrowCatchTestExceptionNoRethrow": Caught error art.Test1929$TestException:"throwCatchTestExceptionNoRethrow" with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler" Test "art.Test1929$DoThrowCatchTestExceptionNoRethrow": Finished running with handler "art.Test1929$ThrowTestExceptionNoRethrowHandler" diff --git a/test/1929-exception-catch-exception/src/art/Test1929.java b/test/1929-exception-catch-exception/src/art/Test1929.java index 07d2087a0f..e2deb3f85f 100644 --- a/test/1929-exception-catch-exception/src/art/Test1929.java +++ b/test/1929-exception-catch-exception/src/art/Test1929.java @@ -152,49 +152,58 @@ public class Test1929 { // dx/d8/jack all do an optimization around catch blocks that (while legal) breaks assumptions // this test relies on so we have the actual implementation be corrected smali. This does work // for RI however. - public static final class Impl { - private Impl() {} - public static void throwCatchBaseTestExceptionTwiceImpl() { - try { - try { - throw new TestException("throwCatchBaseTestExceptionTwice"); - } catch (BaseTestException t) { - System.out.println("Caught " + t.getClass().getName() + ": \"" + t.getMessage() + "\""); - if (PRINT_FULL_EXCEPTION) { - t.printStackTrace(System.out); - } - } - } catch (BaseTestException t) { - System.out.println("2nd Caught " + t.getClass().getName() + ": \"" + t.getMessage() + "\""); - if (PRINT_FULL_EXCEPTION) { - t.printStackTrace(System.out); - } - } - } - public static void throwCatchTestExceptionTwiceImpl() { - try { - try { - throw new TestException("throwCatchTestExceptionTwice"); - } catch (TestException t) { - System.out.println("Caught " + t.getClass().getName() + ": \"" + t.getMessage() + "\""); - if (PRINT_FULL_EXCEPTION) { - t.printStackTrace(System.out); - } - } - } catch (TestException t) { - System.out.println("2nd Caught " + t.getClass().getName() + ": \"" + t.getMessage() + "\""); - if (PRINT_FULL_EXCEPTION) { - t.printStackTrace(System.out); - } - } - } - } + // For reference: + + // public static final class Impl { + // private Impl() {} + // public static void throwCatchBaseTestExceptionTwiceImpl() { + // try { + // try { + // throw new TestException("throwCatchBaseTestExceptionTwice"); + // } catch (BaseTestException t) { + // System.out.println("Caught " + t.getClass().getName() + ": \"" + t.getMessage() + "\""); + // if (PRINT_FULL_EXCEPTION) { + // t.printStackTrace(System.out); + // } + // } + // } catch (BaseTestException t) { + // System.out.println("2nd Caught " + t.getClass().getName() + ": \"" + t.getMessage() + "\""); + // if (PRINT_FULL_EXCEPTION) { + // t.printStackTrace(System.out); + // } + // } + // } + + // public static void throwCatchTestExceptionTwiceImpl() { + // try { + // try { + // throw new TestException("throwCatchTestExceptionTwice"); + // } catch (TestException t) { + // System.out.println("Caught " + t.getClass().getName() + ": \"" + t.getMessage() + "\""); + // if (PRINT_FULL_EXCEPTION) { + // t.printStackTrace(System.out); + // } + // } + // } catch (TestException t) { + // System.out.println("2nd Caught " + t.getClass().getName() + ": \"" + t.getMessage() + "\""); + // if (PRINT_FULL_EXCEPTION) { + // t.printStackTrace(System.out); + // } + // } + // } + // } public static void throwCatchBaseTestExceptionTwice() { // The implementation of this has to change depending upon the runtime slightly due to compiler // optimizations present in DX/D8/Jack. - Impl.throwCatchBaseTestExceptionTwiceImpl(); + try { + Class<?> Impl = Class.forName("art.Test1929$Impl"); + Method m = Impl.getMethod("throwCatchBaseTestExceptionTwiceImpl"); + m.invoke(null); + } catch (Exception e) { + e.printStackTrace(System.out); + } } public static class DoThrowCatchBaseTestExceptionTwice implements Runnable { @@ -219,7 +228,13 @@ public class Test1929 { public static void throwCatchTestExceptionTwice() { // The implementation of this has to change depending upon the runtime slightly due to compiler // optimizations present in DX/D8/Jack. - Impl.throwCatchTestExceptionTwiceImpl(); + try { + Class<?> Impl = Class.forName("art.Test1929$Impl"); + Method m = Impl.getMethod("throwCatchTestExceptionTwiceImpl"); + m.invoke(null); + } catch (Exception e) { + e.printStackTrace(System.out); + } } public static class DoThrowCatchTestExceptionTwice implements Runnable { diff --git a/test/1935-get-set-current-frame-jit/expected.txt b/test/1935-get-set-current-frame-jit/expected.txt index fed993cc1a..cdb8f6a825 100644 --- a/test/1935-get-set-current-frame-jit/expected.txt +++ b/test/1935-get-set-current-frame-jit/expected.txt @@ -1,7 +1,7 @@ JNI_OnLoad called From GetLocalInt(), value is 42 -isInterpreted? true +isInOsrCode? false Value is '42' Setting TARGET to 1337 -isInterpreted? true +isInOsrCode? false Value is '1337' diff --git a/test/1935-get-set-current-frame-jit/src/Main.java b/test/1935-get-set-current-frame-jit/src/Main.java index eb0a6374d2..714a98aaf3 100644 --- a/test/1935-get-set-current-frame-jit/src/Main.java +++ b/test/1935-get-set-current-frame-jit/src/Main.java @@ -64,9 +64,9 @@ public class Main { Main.ensureJitCompiled(IntRunner.class, "run"); i++; } - // We shouldn't be doing OSR since we are using JVMTI and the get/set local will push us to - // interpreter. - System.out.println("isInterpreted? " + Main.isInterpreted()); + // We shouldn't be doing OSR since we are using JVMTI and the get/set prevents OSR. + // Set local will also push us to interpreter but the get local may remain in compiled code. + System.out.println("isInOsrCode? " + (hasJit() && Main.isInOsrCode("run"))); reportValue(TARGET); } public void waitForBusyLoopStart() { while (!inBusyLoop) {} } @@ -159,4 +159,6 @@ public class Main { public static native void ensureJitCompiled(Class k, String f); public static native boolean isInterpreted(); + public static native boolean isInOsrCode(String methodName); + public static native boolean hasJit(); } diff --git a/test/674-hiddenapi/hiddenapi.cc b/test/674-hiddenapi/hiddenapi.cc index effa37ade4..04c3fbf03a 100644 --- a/test/674-hiddenapi/hiddenapi.cc +++ b/test/674-hiddenapi/hiddenapi.cc @@ -16,6 +16,7 @@ #include "class_linker.h" #include "dex/art_dex_file_loader.h" +#include "hidden_api.h" #include "jni.h" #include "runtime.h" #include "scoped_thread_state_change-inl.h" @@ -27,7 +28,7 @@ namespace Test674HiddenApi { extern "C" JNIEXPORT void JNICALL Java_Main_init(JNIEnv*, jclass) { Runtime* runtime = Runtime::Current(); - runtime->SetHiddenApiChecksEnabled(true); + runtime->SetHiddenApiEnforcementPolicy(hiddenapi::EnforcementPolicy::kBlacklistOnly); runtime->SetDedupeHiddenApiWarnings(false); runtime->AlwaysSetHiddenApiWarningFlag(); } diff --git a/test/674-hiddenapi/src-ex/ChildClass.java b/test/674-hiddenapi/src-ex/ChildClass.java index 8cd237ab6f..582e907ca3 100644 --- a/test/674-hiddenapi/src-ex/ChildClass.java +++ b/test/674-hiddenapi/src-ex/ChildClass.java @@ -123,9 +123,6 @@ public class ChildClass { // Check whether one can use an interface default method. String name = "method" + visibility.name() + "Default" + hiddenness.name(); checkMethod(ParentInterface.class, name, /*isStatic*/ false, visibility, expected); - - // Check whether one can override this method. - checkOverriding(suffix, isStatic, visibility, expected); } // Test whether static linking succeeds. @@ -406,37 +403,6 @@ public class ChildClass { } } - private static void checkOverriding(String suffix, - boolean isStatic, - Visibility visibility, - Behaviour behaviour) throws Exception { - if (isStatic || visibility == Visibility.Private) { - // Does not make sense to override a static or private method. - return; - } - - // The classes are in the same package, but will be able to access each - // other only if loaded with the same class loader, here the boot class loader. - boolean canAccess = (visibility != Visibility.Package) || (isParentInBoot && isChildInBoot); - boolean setsWarning = false; // warnings may be set during vtable linking - - String methodName = "callMethod" + visibility.name() + suffix; - - // Force the test class to link its vtable, which may cause warnings, before - // the actual test. - new OverrideClass().methodPublicWhitelist(); - - clearWarning(); - if (Linking.canOverride(methodName) != canAccess) { - throw new RuntimeException("Expected to " + (canAccess ? "" : "not ") + - "be able to override " + methodName + "." + - "isParentInBoot = " + isParentInBoot + ", " + "isChildInBoot = " + isChildInBoot); - } - if (canAccess && hasPendingWarning() != setsWarning) { - throwWarningException(ParentClass.class, methodName, false, "static linking", setsWarning); - } - } - private static void throwDiscoveryException(Class<?> klass, String name, boolean isField, String fn, boolean canAccess) { throw new RuntimeException("Expected " + (isField ? "field " : "method ") + klass.getName() + diff --git a/test/674-hiddenapi/src-ex/Linking.java b/test/674-hiddenapi/src-ex/Linking.java index b416250953..a89b92b2b9 100644 --- a/test/674-hiddenapi/src-ex/Linking.java +++ b/test/674-hiddenapi/src-ex/Linking.java @@ -14,7 +14,6 @@ * limitations under the License. */ -import java.lang.reflect.Method; import java.lang.reflect.InvocationTargetException; public class Linking { @@ -35,16 +34,6 @@ public class Linking { } } } - - public static boolean canOverride(String methodName) throws Exception { - // ParentClass returns only positive numbers, OverrideClass only negative. - // This way we can tell if OverrideClass managed to override the original - // method or not. - Method method = ParentClass.class.getDeclaredMethod(methodName); - int result1 = (int) method.invoke(new ParentClass()); - int result2 = (int) method.invoke(new OverrideClass()); - return (result1 > 0) && (result2 < 0); - } } // INSTANCE FIELD GET diff --git a/test/674-hiddenapi/src-ex/OverrideClass.java b/test/674-hiddenapi/src-ex/OverrideClass.java deleted file mode 100644 index 1f1f4d6aac..0000000000 --- a/test/674-hiddenapi/src-ex/OverrideClass.java +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (C) 2018 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -public class OverrideClass extends ParentClass { - - @Override public int methodPublicWhitelist() { return -411; } - @Override int methodPackageWhitelist() { return -412; } - @Override protected int methodProtectedWhitelist() { return -413; } - - @Override public int methodPublicLightGreylist() { return -421; } - @Override int methodPackageLightGreylist() { return -422; } - @Override protected int methodProtectedLightGreylist() { return -423; } - - @Override public int methodPublicDarkGreylist() { return -431; } - @Override int methodPackageDarkGreylist() { return -432; } - @Override protected int methodProtectedDarkGreylist() { return -433; } - - @Override public int methodPublicBlacklist() { return -441; } - @Override int methodPackageBlacklist() { return -442; } - @Override protected int methodProtectedBlacklist() { return -443; } - -} diff --git a/test/679-checker-minmax/src/Main.java b/test/679-checker-minmax/src/Main.java index d016de6686..38085bbd7b 100644 --- a/test/679-checker-minmax/src/Main.java +++ b/test/679-checker-minmax/src/Main.java @@ -79,6 +79,51 @@ public class Main { return a >= b ? b : a; } + /// CHECK-START: int Main.min5(short, short) instruction_simplifier$after_inlining (before) + /// CHECK-DAG: <<Cnd:z\d+>> LessThan [<<Op1:s\d+>>,<<Op2:s\d+>>] + /// CHECK-DAG: <<Sel:i\d+>> Select [<<Op2>>,<<Op1>>,<<Cnd>>] + /// CHECK-DAG: Return [<<Sel>>] + // + /// CHECK-START: int Main.min5(short, short) instruction_simplifier$after_inlining (after) + /// CHECK-DAG: <<Min:i\d+>> Min + /// CHECK-DAG: Return [<<Min>>] + // + /// CHECK-START: int Main.min5(short, short) instruction_simplifier$after_inlining (after) + /// CHECK-NOT: Select + public static int min5(short a, short b) { + return a >= b ? b : a; + } + + /// CHECK-START: int Main.min6(byte, byte) instruction_simplifier$after_inlining (before) + /// CHECK-DAG: <<Cnd:z\d+>> LessThan [<<Op1:b\d+>>,<<Op2:b\d+>>] + /// CHECK-DAG: <<Sel:i\d+>> Select [<<Op2>>,<<Op1>>,<<Cnd>>] + /// CHECK-DAG: Return [<<Sel>>] + // + /// CHECK-START: int Main.min6(byte, byte) instruction_simplifier$after_inlining (after) + /// CHECK-DAG: <<Min:i\d+>> Min + /// CHECK-DAG: Return [<<Min>>] + // + /// CHECK-START: int Main.min6(byte, byte) instruction_simplifier$after_inlining (after) + /// CHECK-NOT: Select + public static int min6(byte a, byte b) { + return a >= b ? b : a; + } + + /// CHECK-START: long Main.min7(long, long) instruction_simplifier$after_inlining (before) + /// CHECK-DAG: <<Cnd:z\d+>> LessThan [<<Op1:j\d+>>,<<Op2:j\d+>>] + /// CHECK-DAG: <<Sel:j\d+>> Select [<<Op2>>,<<Op1>>,<<Cnd>>] + /// CHECK-DAG: Return [<<Sel>>] + // + /// CHECK-START: long Main.min7(long, long) instruction_simplifier$after_inlining (after) + /// CHECK-DAG: <<Min:j\d+>> Min + /// CHECK-DAG: Return [<<Min>>] + // + /// CHECK-START: long Main.min7(long, long) instruction_simplifier$after_inlining (after) + /// CHECK-NOT: Select + public static long min7(long a, long b) { + return a >= b ? b : a; + } + /// CHECK-START: int Main.max1(int, int) instruction_simplifier$after_inlining (before) /// CHECK-DAG: <<Cnd:z\d+>> GreaterThanOrEqual [<<Op1:i\d+>>,<<Op2:i\d+>>] /// CHECK-DAG: <<Sel:i\d+>> Select [<<Op2>>,<<Op1>>,<<Cnd>>] @@ -139,15 +184,66 @@ public class Main { return a >= b ? a : b; } + /// CHECK-START: int Main.max5(short, short) instruction_simplifier$after_inlining (before) + /// CHECK-DAG: <<Cnd:z\d+>> LessThan [<<Op1:s\d+>>,<<Op2:s\d+>>] + /// CHECK-DAG: <<Sel:i\d+>> Select [<<Op1>>,<<Op2>>,<<Cnd>>] + /// CHECK-DAG: Return [<<Sel>>] + // + /// CHECK-START: int Main.max5(short, short) instruction_simplifier$after_inlining (after) + /// CHECK-DAG: <<Max:i\d+>> Max + /// CHECK-DAG: Return [<<Max>>] + // + /// CHECK-START: int Main.max5(short, short) instruction_simplifier$after_inlining (after) + /// CHECK-NOT: Select + public static int max5(short a, short b) { + return a >= b ? a : b; + } + + /// CHECK-START: int Main.max6(byte, byte) instruction_simplifier$after_inlining (before) + /// CHECK-DAG: <<Cnd:z\d+>> LessThan [<<Op1:b\d+>>,<<Op2:b\d+>>] + /// CHECK-DAG: <<Sel:i\d+>> Select [<<Op1>>,<<Op2>>,<<Cnd>>] + /// CHECK-DAG: Return [<<Sel>>] + // + /// CHECK-START: int Main.max6(byte, byte) instruction_simplifier$after_inlining (after) + /// CHECK-DAG: <<Max:i\d+>> Max + /// CHECK-DAG: Return [<<Max>>] + // + /// CHECK-START: int Main.max6(byte, byte) instruction_simplifier$after_inlining (after) + /// CHECK-NOT: Select + public static int max6(byte a, byte b) { + return a >= b ? a : b; + } + + /// CHECK-START: long Main.max7(long, long) instruction_simplifier$after_inlining (before) + /// CHECK-DAG: <<Cnd:z\d+>> LessThan [<<Op1:j\d+>>,<<Op2:j\d+>>] + /// CHECK-DAG: <<Sel:j\d+>> Select [<<Op1>>,<<Op2>>,<<Cnd>>] + /// CHECK-DAG: Return [<<Sel>>] + // + /// CHECK-START: long Main.max7(long, long) instruction_simplifier$after_inlining (after) + /// CHECK-DAG: <<Max:j\d+>> Max + /// CHECK-DAG: Return [<<Max>>] + // + /// CHECK-START: long Main.max7(long, long) instruction_simplifier$after_inlining (after) + /// CHECK-NOT: Select + public static long max7(long a, long b) { + return a >= b ? a : b; + } + public static void main(String[] args) { expectEquals(10, min1(10, 20)); expectEquals(10, min2(10, 20)); expectEquals(10, min3(10, 20)); expectEquals(10, min4(10, 20)); + expectEquals(10, min5((short) 10, (short) 20)); + expectEquals(10, min6((byte) 10, (byte) 20)); + expectEquals(10L, min7(10L, 20L)); expectEquals(20, max1(10, 20)); expectEquals(20, max2(10, 20)); expectEquals(20, max3(10, 20)); expectEquals(20, max4(10, 20)); + expectEquals(20, max5((short) 10, (short) 20)); + expectEquals(20, max6((byte) 10, (byte) 20)); + expectEquals(20L, max7(10L, 20L)); System.out.println("passed"); } @@ -156,4 +252,10 @@ public class Main { throw new Error("Expected: " + expected + ", found: " + result); } } + + private static void expectEquals(long expected, long result) { + if (expected != result) { + throw new Error("Expected: " + expected + ", found: " + result); + } + } } diff --git a/test/679-locks/expected.txt b/test/679-locks/expected.txt new file mode 100644 index 0000000000..85a20bea2f --- /dev/null +++ b/test/679-locks/expected.txt @@ -0,0 +1,2 @@ +JNI_OnLoad called +MyString diff --git a/test/679-locks/info.txt b/test/679-locks/info.txt new file mode 100644 index 0000000000..7ada4900c6 --- /dev/null +++ b/test/679-locks/info.txt @@ -0,0 +1,2 @@ +Ensure FindLocksAtDexPc is able to pass through quickened instructions related +to unresolved classes. diff --git a/test/679-locks/run b/test/679-locks/run new file mode 100644 index 0000000000..0cc87f3168 --- /dev/null +++ b/test/679-locks/run @@ -0,0 +1,18 @@ +#!/bin/bash +# +# Copyright (C) 2018 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License.i + +# Run without an app image to prevent the class NotLoaded to be loaded at startup. +exec ${RUN} "${@}" --no-app-image diff --git a/test/679-locks/src/Main.java b/test/679-locks/src/Main.java new file mode 100644 index 0000000000..fbc8c53833 --- /dev/null +++ b/test/679-locks/src/Main.java @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +class NotLoaded { + public void foo() {} +} + +public class Main { + public static void main(String[] args) throws Exception { + System.loadLibrary(args[0]); + TestSync.run(); + } + + public static void run() { + testVisitLocks(); + } + + static Object myStatic; + + // Note: declared in 167-visit-locks. + public static native void testVisitLocks(); +} + +// 167-visit-locks/visit-locks.cc looks at the locks held in TestSync.run(). +class TestSync { + public static void run() { + Object o = Main.myStatic; + if (o != null) { + if (o instanceof NotLoaded) { + ((NotLoaded)o).foo(); + } + } + synchronized ("MyString") { + Main.testVisitLocks(); + } + } +} diff --git a/test/680-sink-regression/expected.txt b/test/680-sink-regression/expected.txt new file mode 100644 index 0000000000..b0aad4deb5 --- /dev/null +++ b/test/680-sink-regression/expected.txt @@ -0,0 +1 @@ +passed diff --git a/test/680-sink-regression/info.txt b/test/680-sink-regression/info.txt new file mode 100644 index 0000000000..547e3b801a --- /dev/null +++ b/test/680-sink-regression/info.txt @@ -0,0 +1 @@ +Regression test for code sinking with exceptions (b/75971227). diff --git a/test/680-sink-regression/src/Main.java b/test/680-sink-regression/src/Main.java new file mode 100644 index 0000000000..642c3abdf4 --- /dev/null +++ b/test/680-sink-regression/src/Main.java @@ -0,0 +1,87 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.*; + +/** + * Regression test for b/75971227 (code sinking with exceptions). + */ +public class Main { + + public static class N { + int x; + } + + private int f; + + public int doit(N n1) throws FileNotFoundException { + int x = 1; + N n3 = new N(); + try { + if (n1.x == 0) { + f = 11; + x = 3; + } else { + f = x; + } + throw new FileNotFoundException("n3" + n3.x); + } catch (NullPointerException e) { + } + return x; + } + + + public static void main(String[] args) { + N n = new N(); + Main t = new Main(); + int x = 0; + + // Main 1, null pointer argument. + t.f = 0; + try { + x = t.doit(null); + } catch (FileNotFoundException e) { + x = -1; + } + if (x != 1 || t.f != 0) { + throw new Error("Main 1: x=" + x + " f=" + t.f); + } + + // Main 2, n.x is 0. + n.x = 0; + try { + x = t.doit(n); + } catch (FileNotFoundException e) { + x = -1; + } + if (x != -1 || t.f != 11) { + throw new Error("Main 2: x=" + x + " f=" + t.f); + } + + // Main 3, n.x is not 0. + n.x = 1; + try { + x = t.doit(n); + } catch (FileNotFoundException e) { + x = -1; + } + if (x != -1 || t.f != 1) { + throw new Error("Main 3: x=" + x + " f=" + t.f); + } + + System.out.println("passed"); + } +} diff --git a/test/681-checker-abs/expected.txt b/test/681-checker-abs/expected.txt new file mode 100644 index 0000000000..b0aad4deb5 --- /dev/null +++ b/test/681-checker-abs/expected.txt @@ -0,0 +1 @@ +passed diff --git a/test/681-checker-abs/info.txt b/test/681-checker-abs/info.txt new file mode 100644 index 0000000000..d36e76e504 --- /dev/null +++ b/test/681-checker-abs/info.txt @@ -0,0 +1 @@ +Functional tests on detecting abs. diff --git a/test/681-checker-abs/src/Main.java b/test/681-checker-abs/src/Main.java new file mode 100644 index 0000000000..8064b1dac1 --- /dev/null +++ b/test/681-checker-abs/src/Main.java @@ -0,0 +1,184 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Functional tests for detecting abs. + */ +public class Main { + + /// CHECK-START: int Main.abs1(int) instruction_simplifier$after_inlining (before) + /// CHECK-DAG: <<Par:i\d+>> ParameterValue + /// CHECK-DAG: <<Zer:i\d+>> IntConstant 0 + /// CHECK-DAG: <<Cnd:z\d+>> GreaterThanOrEqual [<<Par>>,<<Zer>>] + /// CHECK-DAG: <<Neg:i\d+>> [<<Par>>] + /// CHECK-DAG: <<Sel:i\d+>> Select [<<Neg>>,<<Par>>,<<Cnd>>] + /// CHECK-DAG: Return [<<Sel>>] + // + /// CHECK-START: int Main.abs1(int) instruction_simplifier$after_inlining (after) + /// CHECK-DAG: <<Par:i\d+>> ParameterValue + /// CHECK-DAG: <<Abs:i\d+>> Abs [<<Par>>] + /// CHECK-DAG: Return [<<Abs>>] + // + /// CHECK-START: int Main.abs1(int) instruction_simplifier$after_inlining (after) + /// CHECK-NOT: Select + public static int abs1(int a) { + return a < 0 ? -a : a; + } + + /// CHECK-START: int Main.abs2(int) instruction_simplifier$after_inlining (before) + /// CHECK-DAG: <<Par:i\d+>> ParameterValue + /// CHECK-DAG: <<Zer:i\d+>> IntConstant 0 + /// CHECK-DAG: <<Cnd:z\d+>> GreaterThan [<<Par>>,<<Zer>>] + /// CHECK-DAG: <<Neg:i\d+>> [<<Par>>] + /// CHECK-DAG: <<Sel:i\d+>> Select [<<Neg>>,<<Par>>,<<Cnd>>] + /// CHECK-DAG: Return [<<Sel>>] + // + /// CHECK-START: int Main.abs2(int) instruction_simplifier$after_inlining (after) + /// CHECK-DAG: <<Par:i\d+>> ParameterValue + /// CHECK-DAG: <<Abs:i\d+>> Abs [<<Par>>] + /// CHECK-DAG: Return [<<Abs>>] + // + /// CHECK-START: int Main.abs2(int) instruction_simplifier$after_inlining (after) + /// CHECK-NOT: Select + public static int abs2(int a) { + return a <= 0 ? -a : a; + } + + /// CHECK-START: int Main.abs3(int) instruction_simplifier$after_inlining (before) + /// CHECK-DAG: <<Par:i\d+>> ParameterValue + /// CHECK-DAG: <<Zer:i\d+>> IntConstant 0 + /// CHECK-DAG: <<Cnd:z\d+>> LessThanOrEqual [<<Par>>,<<Zer>>] + /// CHECK-DAG: <<Neg:i\d+>> [<<Par>>] + /// CHECK-DAG: <<Sel:i\d+>> Select [<<Par>>,<<Neg>>,<<Cnd>>] + /// CHECK-DAG: Return [<<Sel>>] + // + /// CHECK-START: int Main.abs3(int) instruction_simplifier$after_inlining (after) + /// CHECK-DAG: <<Par:i\d+>> ParameterValue + /// CHECK-DAG: <<Abs:i\d+>> Abs [<<Par>>] + /// CHECK-DAG: Return [<<Abs>>] + // + /// CHECK-START: int Main.abs3(int) instruction_simplifier$after_inlining (after) + /// CHECK-NOT: Select + public static int abs3(int a) { + return a > 0 ? a : -a; + } + + /// CHECK-START: int Main.abs4(int) instruction_simplifier$after_inlining (before) + /// CHECK-DAG: <<Par:i\d+>> ParameterValue + /// CHECK-DAG: <<Zer:i\d+>> IntConstant 0 + /// CHECK-DAG: <<Cnd:z\d+>> LessThan [<<Par>>,<<Zer>>] + /// CHECK-DAG: <<Neg:i\d+>> [<<Par>>] + /// CHECK-DAG: <<Sel:i\d+>> Select [<<Par>>,<<Neg>>,<<Cnd>>] + /// CHECK-DAG: Return [<<Sel>>] + // + /// CHECK-START: int Main.abs4(int) instruction_simplifier$after_inlining (after) + /// CHECK-DAG: <<Par:i\d+>> ParameterValue + /// CHECK-DAG: <<Abs:i\d+>> Abs [<<Par>>] + /// CHECK-DAG: Return [<<Abs>>] + // + /// CHECK-START: int Main.abs4(int) instruction_simplifier$after_inlining (after) + /// CHECK-NOT: Select + public static int abs4(int a) { + return a >= 0 ? a : -a; + } + + /// CHECK-START: int Main.abs5(short) instruction_simplifier$after_inlining (before) + /// CHECK-DAG: <<Par:s\d+>> ParameterValue + /// CHECK-DAG: <<Zer:i\d+>> IntConstant 0 + /// CHECK-DAG: <<Cnd:z\d+>> LessThan [<<Par>>,<<Zer>>] + /// CHECK-DAG: <<Neg:i\d+>> [<<Par>>] + /// CHECK-DAG: <<Sel:i\d+>> Select [<<Par>>,<<Neg>>,<<Cnd>>] + /// CHECK-DAG: Return [<<Sel>>] + // + /// CHECK-START: int Main.abs5(short) instruction_simplifier$after_inlining (after) + /// CHECK-DAG: <<Par:s\d+>> ParameterValue + /// CHECK-DAG: <<Abs:i\d+>> Abs [<<Par>>] + /// CHECK-DAG: Return [<<Abs>>] + // + /// CHECK-START: int Main.abs5(short) instruction_simplifier$after_inlining (after) + /// CHECK-NOT: Select + public static int abs5(short a) { + return a >= 0 ? a : -a; + } + + /// CHECK-START: int Main.abs6(byte) instruction_simplifier$after_inlining (before) + /// CHECK-DAG: <<Par:b\d+>> ParameterValue + /// CHECK-DAG: <<Zer:i\d+>> IntConstant 0 + /// CHECK-DAG: <<Cnd:z\d+>> LessThan [<<Par>>,<<Zer>>] + /// CHECK-DAG: <<Neg:i\d+>> [<<Par>>] + /// CHECK-DAG: <<Sel:i\d+>> Select [<<Par>>,<<Neg>>,<<Cnd>>] + /// CHECK-DAG: Return [<<Sel>>] + // + /// CHECK-START: int Main.abs6(byte) instruction_simplifier$after_inlining (after) + /// CHECK-DAG: <<Par:b\d+>> ParameterValue + /// CHECK-DAG: <<Abs:i\d+>> Abs [<<Par>>] + /// CHECK-DAG: Return [<<Abs>>] + // + /// CHECK-START: int Main.abs6(byte) instruction_simplifier$after_inlining (after) + /// CHECK-NOT: Select + public static int abs6(byte a) { + return a >= 0 ? a : -a; + } + + /// CHECK-START: long Main.abs7(long) instruction_simplifier$after_inlining (before) + /// CHECK-DAG: <<Par:j\d+>> ParameterValue + /// CHECK-DAG: <<Zer:j\d+>> LongConstant 0 + /// CHECK-DAG: <<Cnd:z\d+>> LessThan [<<Par>>,<<Zer>>] + /// CHECK-DAG: <<Neg:j\d+>> [<<Par>>] + /// CHECK-DAG: <<Sel:j\d+>> Select [<<Par>>,<<Neg>>,<<Cnd>>] + /// CHECK-DAG: Return [<<Sel>>] + // + /// CHECK-START: long Main.abs7(long) instruction_simplifier$after_inlining (after) + /// CHECK-DAG: <<Par:j\d+>> ParameterValue + /// CHECK-DAG: <<Abs:j\d+>> Abs [<<Par>>] + /// CHECK-DAG: Return [<<Abs>>] + // + /// CHECK-START: long Main.abs7(long) instruction_simplifier$after_inlining (after) + /// CHECK-NOT: Select + public static long abs7(long a) { + return a >= 0 ? a : -a; + } + + public static void main(String[] args) { + expectEquals(10, abs1(-10)); + expectEquals(20, abs1(20)); + expectEquals(10, abs2(-10)); + expectEquals(20, abs2(20)); + expectEquals(10, abs3(-10)); + expectEquals(20, abs3(20)); + expectEquals(10, abs4(-10)); + expectEquals(20, abs4(20)); + expectEquals(10, abs4((short) -10)); + expectEquals(20, abs4((short) 20)); + expectEquals(10, abs6((byte) -10)); + expectEquals(20, abs6((byte) 20)); + expectEquals(10L, abs7(-10L)); + expectEquals(20L, abs7(20L)); + System.out.println("passed"); + } + + private static void expectEquals(int expected, int result) { + if (expected != result) { + throw new Error("Expected: " + expected + ", found: " + result); + } + } + + private static void expectEquals(long expected, long result) { + if (expected != result) { + throw new Error("Expected: " + expected + ", found: " + result); + } + } +} diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk index cf781d7f2b..6633958140 100644 --- a/test/Android.run-test.mk +++ b/test/Android.run-test.mk @@ -24,7 +24,6 @@ TEST_ART_RUN_TEST_DEPENDENCIES := \ $(HOST_OUT_EXECUTABLES)/hiddenapi \ $(HOST_OUT_EXECUTABLES)/jasmin \ $(HOST_OUT_EXECUTABLES)/smali \ - $(HOST_OUT_EXECUTABLES)/dexmerger \ $(HOST_OUT_JAVA_LIBRARIES)/desugar.jar # Add d8 dependency, if enabled. @@ -103,7 +102,7 @@ endif # Host executables. host_prereq_rules := $(ART_TEST_HOST_RUN_TEST_DEPENDENCIES) -# Required for dx, jasmin, smali, dexmerger. +# Required for dx, jasmin, smali. host_prereq_rules += $(TEST_ART_RUN_TEST_DEPENDENCIES) # Sync test files to the target, depends upon all things that must be pushed diff --git a/test/etc/default-build b/test/etc/default-build index 3e6577cfda..9de7294a59 100755 --- a/test/etc/default-build +++ b/test/etc/default-build @@ -341,8 +341,26 @@ function make_dexmerge() { shift done - # Should have at least 1 dex_files_to_merge here, otherwise dxmerger will print the help. - ${DXMERGER} "$dst_file" "${dex_files_to_merge[@]}" + # Skip merge if we are not merging anything. IE: input = output. + if [[ "${#dex_files_to_merge[@]}" -eq "1" ]]; then + local single_input=${dex_files_to_merge[0]} + if [[ "$dst_file" != "$single_input" ]]; then + mv "$single_input" "$dst_file"; + return + fi + fi + + # We assume the dexer did all the API level checks and just merge away. + mkdir d8_merge_out + ${DXMERGER} --min-api 1000 --output ./d8_merge_out "${dex_files_to_merge[@]}" + + if [[ -e "./d8_merge_out/classes2.dex" ]]; then + echo "Cannot merge all dex files into a single dex" + exit 1 + fi + + mv ./d8_merge_out/classes.dex "$dst_file"; + rmdir d8_merge_out } function make_hiddenapi() { diff --git a/test/knownfailures.json b/test/knownfailures.json index a7e76d131e..d390c4c049 100644 --- a/test/knownfailures.json +++ b/test/knownfailures.json @@ -276,7 +276,8 @@ }, { "tests": "596-app-images", - "variant": "npictest" + "description": "Code being tested has been disabled", + "bug": "b/70734839" }, { "tests": "055-enum-performance", @@ -954,11 +955,19 @@ }, { "tests": ["616-cha-unloading", - "678-quickening"], + "678-quickening", + "679-locks"], "variant": "jvm", "description": ["Doesn't run on RI."] }, { + "tests": ["121-modifiers", + "1929-exception-catch-exception"], + "variant": "jvm", + "bug": "b/76399183", + "description": ["New failures to be investigated."] + }, + { "tests": ["616-cha-unloading"], "variant": "trace", "description": ["Trace prevents class unloading."] diff --git a/test/run-test b/test/run-test index 260a65a056..5b43b52b41 100755 --- a/test/run-test +++ b/test/run-test @@ -50,11 +50,18 @@ export USE_JACK="false" export USE_DESUGAR="true" export SMALI_ARGS="" +# If d8 was not set by the environment variable, assume it is in the path. +if [ -z "$D8" ]; then + export D8="d8" +fi + # If dx was not set by the environment variable, assume it is in the path. if [ -z "$DX" ]; then export DX="dx" fi +export DXMERGER="$D8" + # If jasmin was not set by the environment variable, assume it is in the path. if [ -z "$JASMIN" ]; then export JASMIN="jasmin" @@ -65,11 +72,6 @@ if [ -z "$SMALI" ]; then export SMALI="smali" fi -# If dexmerger was not set by the environment variable, assume it is in the path. -if [ -z "$DXMERGER" ]; then - export DXMERGER="dexmerger" -fi - # If jack was not set by the environment variable, assume it is in the path. if [ -z "$JACK" ]; then export JACK="jack" diff --git a/test/testrunner/env.py b/test/testrunner/env.py index 70efce51ee..539499173c 100644 --- a/test/testrunner/env.py +++ b/test/testrunner/env.py @@ -136,9 +136,8 @@ HOST_OUT_EXECUTABLES = os.path.join(ANDROID_BUILD_TOP, _get_build_var("HOST_OUT_EXECUTABLES")) # Set up default values for $JACK, $DX, $SMALI, etc to the $HOST_OUT_EXECUTABLES/$name path. -for tool in ['jack', 'dx', 'smali', 'jasmin', 'dxmerger']: - binary = tool if tool != 'dxmerger' else 'dexmerger' - os.environ.setdefault(tool.upper(), HOST_OUT_EXECUTABLES + '/' + binary) +for tool in ['jack', 'dx', 'smali', 'jasmin', 'd8']: + os.environ.setdefault(tool.upper(), HOST_OUT_EXECUTABLES + '/' + tool) ANDROID_JAVA_TOOLCHAIN = os.path.join(ANDROID_BUILD_TOP, _get_build_var('ANDROID_JAVA_TOOLCHAIN')) diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py index a2215f9e9b..734a600c5e 100755 --- a/test/testrunner/testrunner.py +++ b/test/testrunner/testrunner.py @@ -114,6 +114,7 @@ ignore_skips = False build = False gdb = False gdb_arg = '' +runtime_option = '' stop_testrunner = False dex2oat_jobs = -1 # -1 corresponds to default threads for dex2oat run_all_configs = False @@ -346,6 +347,10 @@ def run_tests(tests): if gdb_arg: options_all += ' --gdb-arg ' + gdb_arg + if runtime_option: + for opt in runtime_option: + options_all += ' --runtime-option ' + opt + if dex2oat_jobs != -1: options_all += ' --dex2oat-jobs ' + str(dex2oat_jobs) @@ -921,6 +926,7 @@ def parse_option(): global build global gdb global gdb_arg + global runtime_option global timeout global dex2oat_jobs global run_all_configs @@ -933,9 +939,9 @@ def parse_option(): global_group.add_argument('--timeout', default=timeout, type=int, dest='timeout') global_group.add_argument('--verbose', '-v', action='store_true', dest='verbose') global_group.add_argument('--dry-run', action='store_true', dest='dry_run') - global_group.add_argument("--skip", action="append", dest="skips", default=[], + global_group.add_argument("--skip", action='append', dest="skips", default=[], help="Skip the given test in all circumstances.") - global_group.add_argument("--no-skips", dest="ignore_skips", action="store_true", default=False, + global_group.add_argument("--no-skips", dest="ignore_skips", action='store_true', default=False, help="""Don't skip any run-test configurations listed in knownfailures.json.""") global_group.add_argument('--no-build-dependencies', @@ -950,6 +956,10 @@ def parse_option(): global_group.set_defaults(build = env.ART_TEST_RUN_TEST_BUILD) global_group.add_argument('--gdb', action='store_true', dest='gdb') global_group.add_argument('--gdb-arg', dest='gdb_arg') + global_group.add_argument('--runtime-option', action='append', dest='runtime_option', + help="""Pass an option to the runtime. Runtime options + starting with a '-' must be separated by a '=', for + example '--runtime-option=-Xjitthreshold:0'.""") global_group.add_argument('--dex2oat-jobs', type=int, dest='dex2oat_jobs', help='Number of dex2oat jobs') global_group.add_argument('-a', '--all', action='store_true', dest='run_all', @@ -993,6 +1003,7 @@ def parse_option(): gdb = True if options['gdb_arg']: gdb_arg = options['gdb_arg'] + runtime_option = options['runtime_option']; timeout = options['timeout'] if options['dex2oat_jobs']: dex2oat_jobs = options['dex2oat_jobs'] diff --git a/tools/ahat/Android.bp b/tools/ahat/Android.bp new file mode 100644 index 0000000000..dc9f098ff3 --- /dev/null +++ b/tools/ahat/Android.bp @@ -0,0 +1,25 @@ +// Copyright 2018 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +droiddoc_host { + name: "ahat-docs", + srcs: [ + "src/main/**/*.java", + ], + custom_template: "droiddoc-templates-sdk", + args: "-stubpackages com.android.ahat:com.android.ahat.*", + api_tag_name: "AHAT", + api_filename: "ahat_api.txt", + removed_api_filename: "ahat_removed_api.txt", +} diff --git a/tools/ahat/Android.mk b/tools/ahat/Android.mk index bf79751659..ad33233159 100644 --- a/tools/ahat/Android.mk +++ b/tools/ahat/Android.mk @@ -37,23 +37,10 @@ LOCAL_COMPATIBILITY_SUITE := general-tests include $(BUILD_HOST_JAVA_LIBRARY) AHAT_JAR := $(LOCAL_BUILT_MODULE) -AHAT_API := $(intermediates.COMMON)/ahat_api.txt -AHAT_REMOVED_API := $(intermediates.COMMON)/ahat_removed_api.txt # --- api check for ahat.jar ---------- -include $(CLEAR_VARS) -LOCAL_SRC_FILES := $(call all-java-files-under, src/main) -LOCAL_IS_HOST_MODULE := true -LOCAL_MODULE_TAGS := optional -LOCAL_MODULE_CLASS := JAVA_LIBRARIES -LOCAL_MODULE := ahat -LOCAL_DROIDDOC_OPTIONS := \ - -stubpackages com.android.ahat:com.android.ahat.* \ - -api $(AHAT_API) \ - -removedApi $(AHAT_REMOVED_API) -LOCAL_DROIDDOC_CUSTOM_TEMPLATE_DIR := external/doclava/res/assets/templates-sdk -include $(BUILD_DROIDDOC) -$(AHAT_API): $(full_target) +AHAT_API := $(INTERNAL_PLATFORM_AHAT_API_FILE) +AHAT_REMOVED_API := $(INTERNAL_PLATFORM_AHAT_REMOVED_API_FILE) $(eval $(call check-api, \ ahat-check-api, \ diff --git a/tools/veridex/Android.bp b/tools/veridex/Android.bp index cac441aaf0..31ff682828 100644 --- a/tools/veridex/Android.bp +++ b/tools/veridex/Android.bp @@ -15,7 +15,10 @@ art_cc_binary { name: "veridex", host_supported: true, - srcs: ["veridex.cc"], + srcs: [ + "resolver.cc", + "veridex.cc", + ], cflags: ["-Wall", "-Werror"], shared_libs: ["libdexfile", "libbase"], header_libs: [ diff --git a/tools/veridex/resolver.cc b/tools/veridex/resolver.cc new file mode 100644 index 0000000000..82978215a6 --- /dev/null +++ b/tools/veridex/resolver.cc @@ -0,0 +1,299 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "resolver.h" + +#include "dex/dex_file-inl.h" +#include "dex/primitive.h" +#include "veridex.h" + +namespace art { + +void VeridexResolver::Run() { + size_t class_def_count = dex_file_.NumClassDefs(); + for (size_t class_def_index = 0; class_def_index < class_def_count; ++class_def_index) { + const DexFile::ClassDef& class_def = dex_file_.GetClassDef(class_def_index); + std::string name(dex_file_.StringByTypeIdx(class_def.class_idx_)); + auto existing = type_map_.find(name); + if (existing != type_map_.end()) { + // Class already exists, cache it and move on. + type_infos_[class_def.class_idx_.index_] = *existing->second; + continue; + } + type_infos_[class_def.class_idx_.index_] = VeriClass(Primitive::Type::kPrimNot, 0, &class_def); + type_map_[name] = &(type_infos_[class_def.class_idx_.index_]); + + const uint8_t* class_data = dex_file_.GetClassData(class_def); + if (class_data == nullptr) { + // Empty class. + continue; + } + + ClassDataItemIterator it(dex_file_, class_data); + for (; it.HasNextStaticField(); it.Next()) { + field_infos_[it.GetMemberIndex()] = it.DataPointer(); + } + for (; it.HasNextInstanceField(); it.Next()) { + field_infos_[it.GetMemberIndex()] = it.DataPointer(); + } + for (; it.HasNextMethod(); it.Next()) { + method_infos_[it.GetMemberIndex()] = it.DataPointer(); + } + } +} + +static bool HasSameNameAndSignature(const DexFile& dex_file, + const DexFile::MethodId& method_id, + const char* method_name, + const Signature& signature) { + return strcmp(method_name, dex_file.GetMethodName(method_id)) == 0 && + dex_file.GetMethodSignature(method_id) == signature; +} + +static bool HasSameNameAndType(const DexFile& dex_file, + const DexFile::FieldId& field_id, + const char* field_name, + const char* field_type) { + return strcmp(field_name, dex_file.GetFieldName(field_id)) == 0 && + strcmp(field_type, dex_file.GetFieldTypeDescriptor(field_id)) == 0; +} + +VeriClass* VeridexResolver::GetVeriClass(dex::TypeIndex index) { + CHECK_LT(index.index_, dex_file_.NumTypeIds()); + // Lookup in our local cache. + VeriClass* cls = &type_infos_[index.index_]; + if (cls->IsUninitialized()) { + // Class is defined in another dex file. Lookup in the global cache. + std::string name(dex_file_.StringByTypeIdx(index)); + auto existing = type_map_.find(name); + if (existing == type_map_.end()) { + // Class hasn't been defined, so check if it's an array class. + size_t last_array = name.find_last_of('['); + if (last_array == std::string::npos) { + // There is no such class. + return nullptr; + } else { + // Class is an array class. Check if its most enclosed component type (which is not + // an array class) has been defined. + std::string klass_name = name.substr(last_array + 1); + existing = type_map_.find(klass_name); + if (existing == type_map_.end()) { + // There is no such class, so there is no such array. + return nullptr; + } else { + // Create the type, and cache it locally and globally. + type_infos_[index.index_] = VeriClass( + existing->second->GetKind(), last_array + 1, existing->second->GetClassDef()); + cls = &(type_infos_[index.index_]); + type_map_[name] = cls; + } + } + } else { + // Cache the found class. + cls = existing->second; + type_infos_[index.index_] = *cls; + } + } + return cls; +} + +VeridexResolver* VeridexResolver::GetResolverOf(const VeriClass& kls) const { + auto resolver_it = dex_resolvers_.lower_bound(reinterpret_cast<uintptr_t>(kls.GetClassDef())); + --resolver_it; + + // Check the class def pointer is indeed in the mapped dex file range. + const DexFile& dex_file = resolver_it->second->dex_file_; + CHECK_LT(reinterpret_cast<uintptr_t>(dex_file.Begin()), + reinterpret_cast<uintptr_t>(kls.GetClassDef())); + CHECK_GT(reinterpret_cast<uintptr_t>(dex_file.Begin()) + dex_file.Size(), + reinterpret_cast<uintptr_t>(kls.GetClassDef())); + return resolver_it->second; +} + +VeriMethod VeridexResolver::LookupMethodIn(const VeriClass& kls, + const char* method_name, + const Signature& method_signature) { + if (kls.IsPrimitive()) { + // Primitive classes don't have methods. + return nullptr; + } + if (kls.IsArray()) { + // Array classes don't have methods, but inherit the ones in j.l.Object. + return LookupMethodIn(*VeriClass::object_, method_name, method_signature); + } + // Get the resolver where `kls` is from. + VeridexResolver* resolver = GetResolverOf(kls); + + // Look at methods declared in `kls`. + const DexFile& other_dex_file = resolver->dex_file_; + const uint8_t* class_data = other_dex_file.GetClassData(*kls.GetClassDef()); + if (class_data != nullptr) { + ClassDataItemIterator it(other_dex_file, class_data); + it.SkipAllFields(); + for (; it.HasNextMethod(); it.Next()) { + const DexFile::MethodId& other_method_id = other_dex_file.GetMethodId(it.GetMemberIndex()); + if (HasSameNameAndSignature(other_dex_file, + other_method_id, + method_name, + method_signature)) { + return it.DataPointer(); + } + } + } + + // Look at methods in `kls`'s super class hierarchy. + if (kls.GetClassDef()->superclass_idx_.IsValid()) { + VeriClass* super = resolver->GetVeriClass(kls.GetClassDef()->superclass_idx_); + if (super != nullptr) { + VeriMethod super_method = resolver->LookupMethodIn(*super, method_name, method_signature); + if (super_method != nullptr) { + return super_method; + } + } + } + + // Look at methods in `kls`'s interface hierarchy. + const DexFile::TypeList* interfaces = other_dex_file.GetInterfacesList(*kls.GetClassDef()); + if (interfaces != nullptr) { + for (size_t i = 0; i < interfaces->Size(); i++) { + dex::TypeIndex idx = interfaces->GetTypeItem(i).type_idx_; + VeriClass* itf = resolver->GetVeriClass(idx); + if (itf != nullptr) { + VeriMethod itf_method = resolver->LookupMethodIn(*itf, method_name, method_signature); + if (itf_method != nullptr) { + return itf_method; + } + } + } + } + return nullptr; +} + +VeriField VeridexResolver::LookupFieldIn(const VeriClass& kls, + const char* field_name, + const char* field_type) { + if (kls.IsPrimitive()) { + // Primitive classes don't have fields. + return nullptr; + } + if (kls.IsArray()) { + // Array classes don't have fields. + return nullptr; + } + // Get the resolver where `kls` is from. + VeridexResolver* resolver = GetResolverOf(kls); + + // Look at fields declared in `kls`. + const DexFile& other_dex_file = resolver->dex_file_; + const uint8_t* class_data = other_dex_file.GetClassData(*kls.GetClassDef()); + if (class_data != nullptr) { + ClassDataItemIterator it(other_dex_file, class_data); + for (; it.HasNextStaticField() || it.HasNextInstanceField(); it.Next()) { + const DexFile::FieldId& other_field_id = other_dex_file.GetFieldId(it.GetMemberIndex()); + if (HasSameNameAndType(other_dex_file, + other_field_id, + field_name, + field_type)) { + return it.DataPointer(); + } + } + } + + // Look at fields in `kls`'s interface hierarchy. + const DexFile::TypeList* interfaces = other_dex_file.GetInterfacesList(*kls.GetClassDef()); + if (interfaces != nullptr) { + for (size_t i = 0; i < interfaces->Size(); i++) { + dex::TypeIndex idx = interfaces->GetTypeItem(i).type_idx_; + VeriClass* itf = resolver->GetVeriClass(idx); + if (itf != nullptr) { + VeriField itf_field = resolver->LookupFieldIn(*itf, field_name, field_type); + if (itf_field != nullptr) { + return itf_field; + } + } + } + } + + // Look at fields in `kls`'s super class hierarchy. + if (kls.GetClassDef()->superclass_idx_.IsValid()) { + VeriClass* super = resolver->GetVeriClass(kls.GetClassDef()->superclass_idx_); + if (super != nullptr) { + VeriField super_field = resolver->LookupFieldIn(*super, field_name, field_type); + if (super_field != nullptr) { + return super_field; + } + } + } + return nullptr; +} + +VeriMethod VeridexResolver::GetMethod(uint32_t method_index) { + VeriMethod method_info = method_infos_[method_index]; + if (method_info == nullptr) { + // Method is defined in another dex file. + const DexFile::MethodId& method_id = dex_file_.GetMethodId(method_index); + VeriClass* kls = GetVeriClass(method_id.class_idx_); + if (kls == nullptr) { + return nullptr; + } + // Class found, now lookup the method in it. + method_info = LookupMethodIn(*kls, + dex_file_.GetMethodName(method_id), + dex_file_.GetMethodSignature(method_id)); + method_infos_[method_index] = method_info; + } + return method_info; +} + +VeriField VeridexResolver::GetField(uint32_t field_index) { + VeriField field_info = field_infos_[field_index]; + if (field_info == nullptr) { + // Field is defined in another dex file. + const DexFile::FieldId& field_id = dex_file_.GetFieldId(field_index); + VeriClass* kls = GetVeriClass(field_id.class_idx_); + if (kls == nullptr) { + return nullptr; + } + // Class found, now lookup the field in it. + field_info = LookupFieldIn(*kls, + dex_file_.GetFieldName(field_id), + dex_file_.GetFieldTypeDescriptor(field_id)); + field_infos_[field_index] = field_info; + } + return field_info; +} + +void VeridexResolver::ResolveAll() { + for (uint32_t i = 0; i < dex_file_.NumTypeIds(); ++i) { + if (GetVeriClass(dex::TypeIndex(i)) == nullptr) { + LOG(WARNING) << "Unresolved " << dex_file_.PrettyType(dex::TypeIndex(i)); + } + } + + for (uint32_t i = 0; i < dex_file_.NumMethodIds(); ++i) { + if (GetMethod(i) == nullptr) { + LOG(WARNING) << "Unresolved: " << dex_file_.PrettyMethod(i); + } + } + + for (uint32_t i = 0; i < dex_file_.NumFieldIds(); ++i) { + if (GetField(i) == nullptr) { + LOG(WARNING) << "Unresolved: " << dex_file_.PrettyField(i); + } + } +} + +} // namespace art diff --git a/tools/veridex/resolver.h b/tools/veridex/resolver.h new file mode 100644 index 0000000000..ae94dadb28 --- /dev/null +++ b/tools/veridex/resolver.h @@ -0,0 +1,85 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_TOOLS_VERIDEX_RESOLVER_H_ +#define ART_TOOLS_VERIDEX_RESOLVER_H_ + +#include "dex/dex_file.h" +#include "veridex.h" + +namespace art { + +class VeridexResolver; + +/** + * Map from the start of a dex file (ie DexFile::Begin()), to + * its corresponding resolver. + */ +using DexResolverMap = std::map<uintptr_t, VeridexResolver*>; + +class VeridexResolver { + public: + VeridexResolver(const DexFile& dex_file, + const DexResolverMap& dex_resolvers, + TypeMap& type_map) + : dex_file_(dex_file), + type_map_(type_map), + dex_resolvers_(dex_resolvers), + type_infos_(dex_file.NumTypeIds(), VeriClass()), + method_infos_(dex_file.NumMethodIds(), nullptr), + field_infos_(dex_file.NumFieldIds(), nullptr) {} + + // Run on the defined classes of that dex file and populate our + // local type cache. + void Run(); + + // Return the class declared at `index`. + VeriClass* GetVeriClass(dex::TypeIndex index); + + // Return the method declared at `method_index`. + VeriMethod GetMethod(uint32_t method_index); + + // Return the field declared at `field_index`. + VeriField GetField(uint32_t field_index); + + // Do a JLS lookup in `kls` to find a method. + VeriMethod LookupMethodIn(const VeriClass& kls, + const char* method_name, + const Signature& method_signature); + + // Do a JLS lookup in `kls` to find a field. + VeriField LookupFieldIn(const VeriClass& kls, + const char* field_name, + const char* field_type); + + // Resolve all type_id/method_id/field_id. + void ResolveAll(); + + private: + // Return the resolver where `kls` is from. + VeridexResolver* GetResolverOf(const VeriClass& kls) const; + + const DexFile& dex_file_; + TypeMap& type_map_; + const DexResolverMap& dex_resolvers_; + std::vector<VeriClass> type_infos_; + std::vector<VeriMethod> method_infos_; + std::vector<VeriField> field_infos_; +}; + +} // namespace art + +#endif // ART_TOOLS_VERIDEX_RESOLVER_H_ diff --git a/tools/veridex/veridex.cc b/tools/veridex/veridex.cc index 9d0dd36019..9287211a3c 100644 --- a/tools/veridex/veridex.cc +++ b/tools/veridex/veridex.cc @@ -14,15 +14,40 @@ * limitations under the License. */ +#include "veridex.h" + #include <android-base/file.h> #include "dex/dex_file.h" #include "dex/dex_file_loader.h" +#include "resolver.h" #include <sstream> namespace art { +static VeriClass z_(Primitive::Type::kPrimBoolean, 0, nullptr); +static VeriClass b_(Primitive::Type::kPrimByte, 0, nullptr); +static VeriClass c_(Primitive::Type::kPrimChar, 0, nullptr); +static VeriClass s_(Primitive::Type::kPrimShort, 0, nullptr); +static VeriClass i_(Primitive::Type::kPrimInt, 0, nullptr); +static VeriClass f_(Primitive::Type::kPrimFloat, 0, nullptr); +static VeriClass d_(Primitive::Type::kPrimDouble, 0, nullptr); +static VeriClass j_(Primitive::Type::kPrimLong, 0, nullptr); +static VeriClass v_(Primitive::Type::kPrimVoid, 0, nullptr); + +VeriClass* VeriClass::boolean_ = &z_; +VeriClass* VeriClass::byte_ = &b_; +VeriClass* VeriClass::char_ = &c_; +VeriClass* VeriClass::short_ = &s_; +VeriClass* VeriClass::integer_ = &i_; +VeriClass* VeriClass::float_ = &f_; +VeriClass* VeriClass::double_ = &d_; +VeriClass* VeriClass::long_ = &j_; +VeriClass* VeriClass::void_ = &v_; +// Will be set after boot classpath has been resolved. +VeriClass* VeriClass::object_ = nullptr; + struct VeridexOptions { const char* dex_file = nullptr; const char* core_stubs = nullptr; @@ -108,6 +133,39 @@ class Veridex { return 1; } } + + // Resolve classes/methods/fields defined in each dex file. + + // Cache of types we've seen, for quick class name lookups. + TypeMap type_map; + // Add internally defined primitives. + type_map["Z"] = VeriClass::boolean_; + type_map["B"] = VeriClass::byte_; + type_map["S"] = VeriClass::short_; + type_map["C"] = VeriClass::char_; + type_map["I"] = VeriClass::integer_; + type_map["F"] = VeriClass::float_; + type_map["D"] = VeriClass::double_; + type_map["J"] = VeriClass::long_; + type_map["V"] = VeriClass::void_; + + // Cache of resolvers, to easily query address in memory to VeridexResolver. + DexResolverMap resolver_map; + + std::vector<std::unique_ptr<VeridexResolver>> boot_resolvers; + Resolve(boot_dex_files, resolver_map, type_map, &boot_resolvers); + + // Now that boot classpath has been resolved, fill j.l.Object. + VeriClass::object_ = type_map["Ljava/lang/Object;"]; + + std::vector<std::unique_ptr<VeridexResolver>> app_resolvers; + Resolve(app_dex_files, resolver_map, type_map, &app_resolvers); + + // Resolve all type_id/method_id/field_id of app dex files. + for (const std::unique_ptr<VeridexResolver>& resolver : app_resolvers) { + resolver->ResolveAll(); + } + return 0; } @@ -142,6 +200,22 @@ class Veridex { return true; } + + static void Resolve(const std::vector<std::unique_ptr<const DexFile>>& dex_files, + DexResolverMap& resolver_map, + TypeMap& type_map, + std::vector<std::unique_ptr<VeridexResolver>>* resolvers) { + for (const std::unique_ptr<const DexFile>& dex_file : dex_files) { + VeridexResolver* resolver = + new VeridexResolver(*dex_file.get(), resolver_map, type_map); + resolvers->emplace_back(resolver); + resolver_map[reinterpret_cast<uintptr_t>(dex_file->Begin())] = resolver; + } + + for (const std::unique_ptr<VeridexResolver>& resolver : *resolvers) { + resolver->Run(); + } + } }; } // namespace art diff --git a/tools/veridex/veridex.h b/tools/veridex/veridex.h new file mode 100644 index 0000000000..0c928ab166 --- /dev/null +++ b/tools/veridex/veridex.h @@ -0,0 +1,90 @@ +/* + * Copyright (C) 2018 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_TOOLS_VERIDEX_VERIDEX_H_ +#define ART_TOOLS_VERIDEX_VERIDEX_H_ + +#include <map> + +#include "dex/dex_file.h" +#include "dex/primitive.h" + +namespace art { + +/** + * Abstraction for classes defined, or implicitly defined (for arrays and primitives) + * in dex files. + */ +class VeriClass { + public: + VeriClass(const VeriClass& other) = default; + VeriClass() = default; + VeriClass(Primitive::Type k, uint8_t dims, const DexFile::ClassDef* cl) + : kind_(k), dimensions_(dims), class_def_(cl) {} + + bool IsUninitialized() const { + return kind_ == Primitive::Type::kPrimNot && dimensions_ == 0 && class_def_ == nullptr; + } + + bool IsPrimitive() const { + return kind_ != Primitive::Type::kPrimNot && dimensions_ == 0; + } + + bool IsArray() const { + return dimensions_ != 0; + } + + Primitive::Type GetKind() const { return kind_; } + uint8_t GetDimensions() const { return dimensions_; } + const DexFile::ClassDef* GetClassDef() const { return class_def_; } + + static VeriClass* object_; + static VeriClass* boolean_; + static VeriClass* byte_; + static VeriClass* char_; + static VeriClass* short_; + static VeriClass* integer_; + static VeriClass* float_; + static VeriClass* double_; + static VeriClass* long_; + static VeriClass* void_; + + private: + Primitive::Type kind_; + uint8_t dimensions_; + const DexFile::ClassDef* class_def_; +}; + +/** + * Abstraction for fields defined in dex files. Currently, that's a pointer into their + * `encoded_field` description. + */ +using VeriField = const uint8_t*; + +/** + * Abstraction for methods defined in dex files. Currently, that's a pointer into their + * `encoded_method` description. + */ +using VeriMethod = const uint8_t*; + +/** + * Map from name to VeriClass to quickly lookup classes. + */ +using TypeMap = std::map<std::string, VeriClass*>; + +} // namespace art + +#endif // ART_TOOLS_VERIDEX_VERIDEX_H_ |