summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.mk14
-rw-r--r--adbconnection/adbconnection.cc19
-rw-r--r--compiler/compiled_method.cc6
-rw-r--r--compiler/compiled_method.h6
-rw-r--r--compiler/debug/elf_symtab_writer.h29
-rw-r--r--compiler/dex/dex_to_dex_compiler.cc206
-rw-r--r--compiler/dex/dex_to_dex_compiler.h97
-rw-r--r--compiler/driver/compiler_driver.cc528
-rw-r--r--compiler/driver/compiler_driver.h13
-rw-r--r--compiler/jit/jit_compiler.cc2
-rw-r--r--compiler/linker/elf_builder.h20
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc17
-rw-r--r--compiler/optimizing/code_generator_mips.cc66
-rw-r--r--compiler/optimizing/code_generator_mips64.cc66
-rw-r--r--compiler/optimizing/dead_code_elimination.cc64
-rw-r--r--compiler/optimizing/load_store_elimination.cc381
-rw-r--r--compiler/optimizing/optimizing_compiler.cc2
-rw-r--r--compiler/utils/atomic_dex_ref_map-inl.h12
-rw-r--r--compiler/utils/atomic_dex_ref_map.h3
-rw-r--r--dex2oat/linker/oat_writer.cc7
-rw-r--r--dex2oat/linker/oat_writer.h4
-rw-r--r--dexdump/dexdump.cc10
-rw-r--r--dexlayout/compact_dex_writer.cc18
-rw-r--r--dexlayout/compact_dex_writer.h5
-rw-r--r--dexlayout/dexlayout.h3
-rw-r--r--dexlayout/dexlayout_test.cc82
-rw-r--r--openjdkjvmti/OpenjdkJvmTi.cc1
-rw-r--r--openjdkjvmti/fixed_up_dex_file.cc7
-rw-r--r--openjdkjvmti/fixed_up_dex_file.h3
-rw-r--r--openjdkjvmti/ti_class_definition.cc93
-rw-r--r--openjdkjvmti/ti_class_definition.h35
-rw-r--r--openjdkjvmti/transform.cc182
-rw-r--r--openjdkjvmti/transform.h2
-rw-r--r--profman/profile_assistant.cc21
-rw-r--r--profman/profile_assistant.h11
-rw-r--r--profman/profile_assistant_test.cc108
-rw-r--r--profman/profman.cc136
-rw-r--r--runtime/arch/mips64/quick_entrypoints_mips64.S3
-rw-r--r--runtime/arch/stub_test.cc10
-rw-r--r--runtime/base/array_ref.h6
-rw-r--r--runtime/base/mutex.cc6
-rw-r--r--runtime/base/mutex.h7
-rw-r--r--runtime/class_linker.cc18
-rw-r--r--runtime/class_linker.h1
-rw-r--r--runtime/dex/compact_dex_file.h6
-rw-r--r--runtime/dex/dex_file.h7
-rw-r--r--runtime/dex/dex_file_layout.h2
-rw-r--r--runtime/dex/standard_dex_file.h4
-rw-r--r--runtime/fault_handler.cc26
-rw-r--r--runtime/gc/allocator/dlmalloc.cc1
-rw-r--r--runtime/gc/allocator/dlmalloc.h1
-rw-r--r--runtime/gc/heap-inl.h11
-rw-r--r--runtime/gc/heap.cc15
-rw-r--r--runtime/gc/heap_test.cc1
-rw-r--r--runtime/hidden_api.h22
-rw-r--r--runtime/instrumentation.cc4
-rw-r--r--runtime/interpreter/interpreter_common.cc7
-rw-r--r--runtime/jit/debugger_interface.cc62
-rw-r--r--runtime/jit/debugger_interface.h20
-rw-r--r--runtime/jit/jit_code_cache.cc4
-rw-r--r--runtime/mem_map.cc115
-rw-r--r--runtime/mem_map.h33
-rw-r--r--runtime/mem_map_test.cc200
-rw-r--r--runtime/native/dalvik_system_DexFile.cc2
-rw-r--r--runtime/native/dalvik_system_ZygoteHooks.cc3
-rw-r--r--runtime/native_stack_dump.cc4
-rw-r--r--runtime/quicken_info.h16
-rw-r--r--runtime/runtime.cc15
-rw-r--r--runtime/runtime.h13
-rw-r--r--runtime/vdex_file.cc3
-rw-r--r--test/137-cfi/cfi.cc4
-rw-r--r--test/137-cfi/src-multidex/Base.java22
-rw-r--r--test/137-cfi/src/Main.java6
-rw-r--r--test/530-checker-lse/src/Main.java175
-rw-r--r--test/530-regression-lse/expected.txt0
-rw-r--r--test/530-regression-lse/info.txt2
-rw-r--r--test/530-regression-lse/src/Main.java55
-rw-r--r--test/603-checker-instanceof/src/Main.java2
-rw-r--r--test/608-checker-unresolved-lse/src/Main.java1
-rw-r--r--test/639-checker-code-sinking/expected.txt2
-rw-r--r--test/639-checker-code-sinking/src/Main.java3
-rw-r--r--test/672-checker-throw-method/src/Main.java72
-rw-r--r--test/674-hiddenapi/hiddenapi.cc6
-rw-r--r--test/710-varhandle-creation/src-art/Main.java24
-rw-r--r--test/714-invoke-custom-lambda-metafactory/build22
-rw-r--r--test/714-invoke-custom-lambda-metafactory/expected.txt4
-rw-r--r--test/714-invoke-custom-lambda-metafactory/info.txt1
-rw-r--r--test/714-invoke-custom-lambda-metafactory/src/Main.java32
-rw-r--r--test/956-methodhandles/src/Main.java18
-rw-r--r--test/983-source-transform-verify/source_transform.cc8
-rw-r--r--test/ManyMethods/ManyMethods.java18
-rw-r--r--test/common/runtime_state.cc9
-rwxr-xr-xtest/etc/run-test-jar2
-rw-r--r--test/knownfailures.json12
-rw-r--r--test/testrunner/target_config.py24
-rwxr-xr-xtest/testrunner/testrunner.py4
-rw-r--r--test/ti-stress/stress.cc2
-rw-r--r--test/valgrind-suppressions.txt10
-rwxr-xr-xtools/buildbot-build.sh9
-rw-r--r--tools/external_oj_libjdwp_art_failures.txt7
-rw-r--r--tools/prebuilt_libjdwp_art_failures.txt14
101 files changed, 2649 insertions, 848 deletions
diff --git a/Android.mk b/Android.mk
index 0a90a0bb24..361ceecc2f 100644
--- a/Android.mk
+++ b/Android.mk
@@ -447,6 +447,19 @@ LOCAL_REQUIRED_MODULES := libopenjdkd
include $(BUILD_PHONY_PACKAGE)
endif
+# Create dummy hidden API lists which are normally generated by the framework
+# but which we do not have in the master-art manifest.
+# We need to execute this now to ensure Makefile rules depending on these files can
+# be constructed.
+define build-art-hiddenapi
+$(shell if [ ! -d frameworks/base ]; then \
+ mkdir -p ${TARGET_OUT_COMMON_INTERMEDIATES}/PACKAGING; \
+ touch ${TARGET_OUT_COMMON_INTERMEDIATES}/PACKAGING/hiddenapi-{blacklist,dark-greylist,light-greylist}.txt; \
+ fi;)
+endef
+
+$(eval $(call build-art-hiddenapi))
+
########################################################################
# "m build-art" for quick minimal build
.PHONY: build-art
@@ -460,6 +473,7 @@ build-art-target: $(TARGET_OUT_EXECUTABLES)/art $(ART_TARGET_DEPENDENCIES) $(TAR
########################################################################
# Phony target for only building what go/lem requires for pushing ART on /data.
+
.PHONY: build-art-target-golem
# Also include libartbenchmark, we always include it when running golem.
# libstdc++ is needed when building for ART_TARGET_LINUX.
diff --git a/adbconnection/adbconnection.cc b/adbconnection/adbconnection.cc
index 634d6b56df..a0c99663b4 100644
--- a/adbconnection/adbconnection.cc
+++ b/adbconnection/adbconnection.cc
@@ -320,12 +320,12 @@ void AdbConnectionState::SendDdmPacket(uint32_t id,
// Get the write_event early to fail fast.
ScopedEventFdLock lk(adb_write_event_fd_);
if (adb_connection_socket_ == -1) {
- LOG(WARNING) << "Not sending ddms data of type "
- << StringPrintf("%c%c%c%c",
- static_cast<char>(type >> 24),
- static_cast<char>(type >> 16),
- static_cast<char>(type >> 8),
- static_cast<char>(type)) << " due to no connection!";
+ VLOG(jdwp) << "Not sending ddms data of type "
+ << StringPrintf("%c%c%c%c",
+ static_cast<char>(type >> 24),
+ static_cast<char>(type >> 16),
+ static_cast<char>(type >> 8),
+ static_cast<char>(type)) << " due to no connection!";
// Adb is not connected.
return;
}
@@ -531,7 +531,7 @@ bool AdbConnectionState::SetupAdbConnection() {
/* now try to send our pid to the ADB daemon */
ret = TEMP_FAILURE_RETRY(send(sock, buff, sizeof(pid_t), 0));
if (ret == sizeof(pid_t)) {
- LOG(INFO) << "PID " << getpid() << " send to adb";
+ VLOG(jdwp) << "PID " << getpid() << " send to adb";
control_sock_ = std::move(sock);
return true;
} else {
@@ -555,7 +555,10 @@ bool AdbConnectionState::SetupAdbConnection() {
void AdbConnectionState::RunPollLoop(art::Thread* self) {
CHECK_NE(agent_name_, "");
CHECK_EQ(self->GetState(), art::kNative);
- art::Locks::mutator_lock_->AssertNotHeld(self);
+ // TODO: Clang prebuilt for r316199 produces bogus thread safety analysis warning for holding both
+ // exclusive and shared lock in the same scope. Remove the assertion as a temporary workaround.
+ // http://b/71769596
+ // art::Locks::mutator_lock_->AssertNotHeld(self);
self->SetState(art::kWaitingInMainDebuggerLoop);
// shutting_down_ set by StopDebuggerThreads
while (!shutting_down_) {
diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc
index 0f69dbab94..e41371855d 100644
--- a/compiler/compiled_method.cc
+++ b/compiler/compiled_method.cc
@@ -159,10 +159,4 @@ CompiledMethod::~CompiledMethod() {
storage->ReleaseMethodInfo(method_info_);
}
-void CompiledMethod::ReleaseVMapTable() {
- CompiledMethodStorage* storage = GetCompilerDriver()->GetCompiledMethodStorage();
- storage->ReleaseVMapTable(vmap_table_);
- vmap_table_ = nullptr;
-}
-
} // namespace art
diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h
index 4e8f3efe5a..acdce260e5 100644
--- a/compiler/compiled_method.h
+++ b/compiler/compiled_method.h
@@ -168,10 +168,6 @@ class CompiledMethod FINAL : public CompiledCode {
ArrayRef<const linker::LinkerPatch> GetPatches() const;
- // The compiler sometimes unquickens shared code items. In that case, we need to clear the vmap
- // table to avoid writing the quicken info to the vdex file.
- void ReleaseVMapTable();
-
private:
static constexpr size_t kIsIntrinsicLsb = kNumberOfCompiledCodePackedBits;
static constexpr size_t kIsIntrinsicSize = 1u;
@@ -190,7 +186,7 @@ class CompiledMethod FINAL : public CompiledCode {
// For quick code, method specific information that is not very dedupe friendly (method indices).
const LengthPrefixedArray<uint8_t>* const method_info_;
// For quick code, holds code infos which contain stack maps, inline information, and etc.
- const LengthPrefixedArray<uint8_t>* vmap_table_;
+ const LengthPrefixedArray<uint8_t>* const vmap_table_;
// For quick code, a FDE entry for the debug_frame section.
const LengthPrefixedArray<uint8_t>* const cfi_info_;
// For quick code, linker patches needed by the method.
diff --git a/compiler/debug/elf_symtab_writer.h b/compiler/debug/elf_symtab_writer.h
index 9c9e8b35b8..4b19547d28 100644
--- a/compiler/debug/elf_symtab_writer.h
+++ b/compiler/debug/elf_symtab_writer.h
@@ -101,7 +101,7 @@ static void WriteDebugSymbols(linker::ElfBuilder<ElfTypes>* builder,
}
}
}
- // Add symbols for interpreted methods (with address range of the method's bytecode).
+ // Add symbols for dex files.
if (!debug_info.dex_files.empty() && builder->GetDex()->Exists()) {
auto dex = builder->GetDex();
for (auto it : debug_info.dex_files) {
@@ -109,33 +109,6 @@ static void WriteDebugSymbols(linker::ElfBuilder<ElfTypes>* builder,
const DexFile* dex_file = it.second;
typename ElfTypes::Word dex_name = strtab->Write(kDexFileSymbolName);
symtab->Add(dex_name, dex, dex_address, dex_file->Size(), STB_GLOBAL, STT_FUNC);
- if (mini_debug_info) {
- continue; // Don't add interpreter method names to mini-debug-info for now.
- }
- for (uint32_t i = 0; i < dex_file->NumClassDefs(); ++i) {
- const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
- const uint8_t* class_data = dex_file->GetClassData(class_def);
- if (class_data == nullptr) {
- continue;
- }
- for (ClassDataItemIterator item(*dex_file, class_data); item.HasNext(); item.Next()) {
- if (!item.IsAtMethod()) {
- continue;
- }
- const DexFile::CodeItem* code_item = item.GetMethodCodeItem();
- if (code_item == nullptr) {
- continue;
- }
- CodeItemInstructionAccessor code(*dex_file, code_item);
- DCHECK(code.HasCodeItem());
- std::string name = dex_file->PrettyMethod(item.GetMemberIndex(), !mini_debug_info);
- size_t name_offset = strtab->Write(name);
- uint64_t offset = reinterpret_cast<const uint8_t*>(code.Insns()) - dex_file->Begin();
- uint64_t address = dex_address + offset;
- size_t size = code.InsnsSizeInCodeUnits() * sizeof(uint16_t);
- symtab->Add(name_offset, dex, address, size, STB_GLOBAL, STT_FUNC);
- }
- }
}
}
strtab->End();
diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc
index 28c7fe2c34..9f0aaa4e10 100644
--- a/compiler/dex/dex_to_dex_compiler.cc
+++ b/compiler/dex/dex_to_dex_compiler.cc
@@ -45,6 +45,85 @@ const bool kEnableQuickening = true;
// Control check-cast elision.
const bool kEnableCheckCastEllision = true;
+// Holds the state for compiling a single method.
+struct DexToDexCompiler::CompilationState {
+ struct QuickenedInfo {
+ QuickenedInfo(uint32_t pc, uint16_t index) : dex_pc(pc), dex_member_index(index) {}
+
+ uint32_t dex_pc;
+ uint16_t dex_member_index;
+ };
+
+ CompilationState(DexToDexCompiler* compiler,
+ const DexCompilationUnit& unit,
+ const CompilationLevel compilation_level,
+ const std::vector<uint8_t>* quicken_data);
+
+ const std::vector<QuickenedInfo>& GetQuickenedInfo() const {
+ return quickened_info_;
+ }
+
+ // Returns the quickening info, or an empty array if it was not quickened.
+ // If already_quickened is true, then don't change anything but still return what the quicken
+ // data would have been.
+ std::vector<uint8_t> Compile();
+
+ const DexFile& GetDexFile() const;
+
+ // Compiles a RETURN-VOID into a RETURN-VOID-BARRIER within a constructor where
+ // a barrier is required.
+ void CompileReturnVoid(Instruction* inst, uint32_t dex_pc);
+
+ // Compiles a CHECK-CAST into 2 NOP instructions if it is known to be safe. In
+ // this case, returns the second NOP instruction pointer. Otherwise, returns
+ // the given "inst".
+ Instruction* CompileCheckCast(Instruction* inst, uint32_t dex_pc);
+
+ // Compiles a field access into a quick field access.
+ // The field index is replaced by an offset within an Object where we can read
+ // from / write to this field. Therefore, this does not involve any resolution
+ // at runtime.
+ // Since the field index is encoded with 16 bits, we can replace it only if the
+ // field offset can be encoded with 16 bits too.
+ void CompileInstanceFieldAccess(Instruction* inst, uint32_t dex_pc,
+ Instruction::Code new_opcode, bool is_put);
+
+ // Compiles a virtual method invocation into a quick virtual method invocation.
+ // The method index is replaced by the vtable index where the corresponding
+ // executable can be found. Therefore, this does not involve any resolution
+ // at runtime.
+ // Since the method index is encoded with 16 bits, we can replace it only if the
+ // vtable index can be encoded with 16 bits too.
+ void CompileInvokeVirtual(Instruction* inst, uint32_t dex_pc,
+ Instruction::Code new_opcode, bool is_range);
+
+ // Return the next index.
+ uint16_t NextIndex();
+
+ // Returns the dequickened index if an instruction is quickened, otherwise return index.
+ uint16_t GetIndexForInstruction(const Instruction* inst, uint32_t index);
+
+ DexToDexCompiler* const compiler_;
+ CompilerDriver& driver_;
+ const DexCompilationUnit& unit_;
+ const CompilationLevel compilation_level_;
+
+ // Filled by the compiler when quickening, in order to encode that information
+ // in the .oat file. The runtime will use that information to get to the original
+ // opcodes.
+ std::vector<QuickenedInfo> quickened_info_;
+
+ // True if we optimized a return void to a return void no barrier.
+ bool optimized_return_void_ = false;
+
+ // If the code item was already quickened previously.
+ const bool already_quickened_;
+ const QuickenInfoTable existing_quicken_info_;
+ uint32_t quicken_index_ = 0u;
+
+ DISALLOW_COPY_AND_ASSIGN(CompilationState);
+};
+
DexToDexCompiler::DexToDexCompiler(CompilerDriver* driver)
: driver_(driver),
lock_("Quicken lock", kDexToDexCompilerLock) {
@@ -55,16 +134,13 @@ void DexToDexCompiler::ClearState() {
MutexLock lock(Thread::Current(), lock_);
active_dex_file_ = nullptr;
active_bit_vector_ = nullptr;
- seen_code_items_.clear();
should_quicken_.clear();
- shared_code_items_.clear();
- blacklisted_code_items_.clear();
shared_code_item_quicken_info_.clear();
}
-size_t DexToDexCompiler::NumUniqueCodeItems(Thread* self) const {
+size_t DexToDexCompiler::NumCodeItemsToQuicken(Thread* self) const {
MutexLock lock(self, lock_);
- return seen_code_items_.size();
+ return num_code_items_;
}
BitVector* DexToDexCompiler::GetOrAddBitVectorForDex(const DexFile* dex_file) {
@@ -80,17 +156,13 @@ BitVector* DexToDexCompiler::GetOrAddBitVectorForDex(const DexFile* dex_file) {
}
void DexToDexCompiler::MarkForCompilation(Thread* self,
- const MethodReference& method_ref,
- const DexFile::CodeItem* code_item) {
+ const MethodReference& method_ref) {
MutexLock lock(self, lock_);
BitVector* const bitmap = GetOrAddBitVectorForDex(method_ref.dex_file);
DCHECK(bitmap != nullptr);
DCHECK(!bitmap->IsBitSet(method_ref.index));
bitmap->SetBit(method_ref.index);
- // Detect the shared code items.
- if (!seen_code_items_.insert(code_item).second) {
- shared_code_items_.insert(code_item);
- }
+ ++num_code_items_;
}
DexToDexCompiler::CompilationState::CompilationState(DexToDexCompiler* compiler,
@@ -316,6 +388,7 @@ void DexToDexCompiler::CompilationState::CompileReturnVoid(Instruction* inst, ui
<< " at dex pc " << StringPrintf("0x%x", dex_pc) << " in method "
<< GetDexFile().PrettyMethod(unit_.GetDexMethodIndex(), true);
inst->SetOpcode(Instruction::RETURN_VOID_NO_BARRIER);
+ optimized_return_void_ = true;
}
Instruction* DexToDexCompiler::CompilationState::CompileCheckCast(Instruction* inst,
@@ -464,51 +537,48 @@ CompiledMethod* DexToDexCompiler::CompileMethod(
// If the code item is shared with multiple different method ids, make sure that we quicken only
// once and verify that all the dequicken maps match.
if (UNLIKELY(shared_code_items_.find(code_item) != shared_code_items_.end())) {
- // For shared code items, use a lock to prevent races.
- MutexLock mu(soa.Self(), lock_);
- // Blacklisted means there was a quickening conflict previously, bail early.
- if (blacklisted_code_items_.find(code_item) != blacklisted_code_items_.end()) {
+ // Avoid quickening the shared code items for now because the existing conflict detection logic
+ // does not currently handle cases where the code item is quickened in one place but
+ // compiled in another.
+ static constexpr bool kAvoidQuickeningSharedCodeItems = true;
+ if (kAvoidQuickeningSharedCodeItems) {
return nullptr;
}
+ // For shared code items, use a lock to prevent races.
+ MutexLock mu(soa.Self(), lock_);
auto existing = shared_code_item_quicken_info_.find(code_item);
- const bool already_quickened = existing != shared_code_item_quicken_info_.end();
+ QuickenState* existing_data = nullptr;
+ std::vector<uint8_t>* existing_quicken_data = nullptr;
+ if (existing != shared_code_item_quicken_info_.end()) {
+ existing_data = &existing->second;
+ if (existing_data->conflict_) {
+ return nullptr;
+ }
+ existing_quicken_data = &existing_data->quicken_data_;
+ }
+ bool optimized_return_void;
{
- CompilationState state(this,
- unit,
- compilation_level,
- already_quickened ? &existing->second.quicken_data_ : nullptr);
+ CompilationState state(this, unit, compilation_level, existing_quicken_data);
quicken_data = state.Compile();
+ optimized_return_void = state.optimized_return_void_;
}
// Already quickened, check that the data matches what was previously seen.
MethodReference method_ref(&dex_file, method_idx);
- if (already_quickened) {
- QuickenState* const existing_data = &existing->second;
- if (existing_data->quicken_data_ != quicken_data) {
- VLOG(compiler) << "Quicken data mismatch, dequickening method "
+ if (existing_data != nullptr) {
+ if (*existing_quicken_data != quicken_data ||
+ existing_data->optimized_return_void_ != optimized_return_void) {
+ VLOG(compiler) << "Quicken data mismatch, for method "
<< dex_file.PrettyMethod(method_idx);
- // Unquicken using the existing quicken data.
- optimizer::ArtDecompileDEX(dex_file,
- *code_item,
- ArrayRef<const uint8_t>(existing_data->quicken_data_),
- /* decompile_return_instruction*/ false);
- // Go clear the vmaps for all the methods that were already quickened to avoid writing them
- // out during oat writing.
- for (const MethodReference& ref : existing_data->methods_) {
- CompiledMethod* method = driver_->GetCompiledMethod(ref);
- DCHECK(method != nullptr);
- method->ReleaseVMapTable();
- }
- // Blacklist the method to never attempt to quicken it in the future.
- blacklisted_code_items_.insert(code_item);
- shared_code_item_quicken_info_.erase(existing);
- return nullptr;
+ // Mark the method as a conflict to never attempt to quicken it in the future.
+ existing_data->conflict_ = true;
}
existing_data->methods_.push_back(method_ref);
} else {
QuickenState new_state;
new_state.methods_.push_back(method_ref);
new_state.quicken_data_ = quicken_data;
+ new_state.optimized_return_void_ = optimized_return_void;
bool inserted = shared_code_item_quicken_info_.emplace(code_item, new_state).second;
CHECK(inserted) << "Failed to insert " << dex_file.PrettyMethod(method_idx);
}
@@ -556,9 +626,65 @@ CompiledMethod* DexToDexCompiler::CompileMethod(
ArrayRef<const uint8_t>(quicken_data), // vmap_table
ArrayRef<const uint8_t>(), // cfi data
ArrayRef<const linker::LinkerPatch>());
+ DCHECK(ret != nullptr);
return ret;
}
+void DexToDexCompiler::SetDexFiles(const std::vector<const DexFile*>& dex_files) {
+ // Record what code items are already seen to detect when multiple methods have the same code
+ // item.
+ std::unordered_set<const DexFile::CodeItem*> seen_code_items;
+ for (const DexFile* dex_file : dex_files) {
+ for (size_t i = 0; i < dex_file->NumClassDefs(); ++i) {
+ const DexFile::ClassDef& class_def = dex_file->GetClassDef(i);
+ const uint8_t* class_data = dex_file->GetClassData(class_def);
+ if (class_data == nullptr) {
+ continue;
+ }
+ ClassDataItemIterator it(*dex_file, class_data);
+ it.SkipAllFields();
+ for (; it.HasNextMethod(); it.Next()) {
+ const DexFile::CodeItem* code_item = it.GetMethodCodeItem();
+ // Detect the shared code items.
+ if (!seen_code_items.insert(code_item).second) {
+ shared_code_items_.insert(code_item);
+ }
+ }
+ }
+ }
+ VLOG(compiler) << "Shared code items " << shared_code_items_.size();
+}
+
+void DexToDexCompiler::UnquickenConflictingMethods() {
+ MutexLock mu(Thread::Current(), lock_);
+ size_t unquicken_count = 0;
+ for (const auto& pair : shared_code_item_quicken_info_) {
+ const DexFile::CodeItem* code_item = pair.first;
+ const QuickenState& state = pair.second;
+ CHECK_GE(state.methods_.size(), 1u);
+ if (state.conflict_) {
+ // Unquicken using the existing quicken data.
+ // TODO: Do we really need to pass a dex file in?
+ optimizer::ArtDecompileDEX(*state.methods_[0].dex_file,
+ *code_item,
+ ArrayRef<const uint8_t>(state.quicken_data_),
+ /* decompile_return_instruction*/ true);
+ ++unquicken_count;
+ // Go clear the vmaps for all the methods that were already quickened to avoid writing them
+ // out during oat writing.
+ for (const MethodReference& ref : state.methods_) {
+ CompiledMethod* method = driver_->RemoveCompiledMethod(ref);
+ if (method != nullptr) {
+ // There is up to one compiled method for each method ref. Releasing it leaves the
+ // deduped data intact, this means its safe to do even when other threads might be
+ // compiling.
+ CompiledMethod::ReleaseSwapAllocatedCompiledMethod(driver_, method);
+ }
+ }
+ }
+ }
+}
+
} // namespace optimizer
} // namespace art
diff --git a/compiler/dex/dex_to_dex_compiler.h b/compiler/dex/dex_to_dex_compiler.h
index 2105a9ded4..7df09f140c 100644
--- a/compiler/dex/dex_to_dex_compiler.h
+++ b/compiler/dex/dex_to_dex_compiler.h
@@ -59,99 +59,35 @@ class DexToDexCompiler {
const CompilationLevel compilation_level) WARN_UNUSED;
void MarkForCompilation(Thread* self,
- const MethodReference& method_ref,
- const DexFile::CodeItem* code_item);
+ const MethodReference& method_ref);
void ClearState();
+ // Unquicken all methods that have conflicting quicken info. This is not done during the
+ // quickening process to avoid race conditions.
+ void UnquickenConflictingMethods();
+
CompilerDriver* GetDriver() {
return driver_;
}
bool ShouldCompileMethod(const MethodReference& ref);
- size_t NumUniqueCodeItems(Thread* self) const;
+ // Return the number of code items to quicken.
+ size_t NumCodeItemsToQuicken(Thread* self) const;
+
+ void SetDexFiles(const std::vector<const DexFile*>& dex_files);
private:
// Holds the state for compiling a single method.
- struct CompilationState {
- struct QuickenedInfo {
- QuickenedInfo(uint32_t pc, uint16_t index) : dex_pc(pc), dex_member_index(index) {}
-
- uint32_t dex_pc;
- uint16_t dex_member_index;
- };
-
- CompilationState(DexToDexCompiler* compiler,
- const DexCompilationUnit& unit,
- const CompilationLevel compilation_level,
- const std::vector<uint8_t>* quicken_data);
-
- const std::vector<QuickenedInfo>& GetQuickenedInfo() const {
- return quickened_info_;
- }
-
- // Returns the quickening info, or an empty array if it was not quickened.
- // If already_quickened is true, then don't change anything but still return what the quicken
- // data would have been.
- std::vector<uint8_t> Compile();
-
- const DexFile& GetDexFile() const;
-
- // Compiles a RETURN-VOID into a RETURN-VOID-BARRIER within a constructor where
- // a barrier is required.
- void CompileReturnVoid(Instruction* inst, uint32_t dex_pc);
-
- // Compiles a CHECK-CAST into 2 NOP instructions if it is known to be safe. In
- // this case, returns the second NOP instruction pointer. Otherwise, returns
- // the given "inst".
- Instruction* CompileCheckCast(Instruction* inst, uint32_t dex_pc);
-
- // Compiles a field access into a quick field access.
- // The field index is replaced by an offset within an Object where we can read
- // from / write to this field. Therefore, this does not involve any resolution
- // at runtime.
- // Since the field index is encoded with 16 bits, we can replace it only if the
- // field offset can be encoded with 16 bits too.
- void CompileInstanceFieldAccess(Instruction* inst, uint32_t dex_pc,
- Instruction::Code new_opcode, bool is_put);
-
- // Compiles a virtual method invocation into a quick virtual method invocation.
- // The method index is replaced by the vtable index where the corresponding
- // executable can be found. Therefore, this does not involve any resolution
- // at runtime.
- // Since the method index is encoded with 16 bits, we can replace it only if the
- // vtable index can be encoded with 16 bits too.
- void CompileInvokeVirtual(Instruction* inst, uint32_t dex_pc,
- Instruction::Code new_opcode, bool is_range);
-
- // Return the next index.
- uint16_t NextIndex();
-
- // Returns the dequickened index if an instruction is quickened, otherwise return index.
- uint16_t GetIndexForInstruction(const Instruction* inst, uint32_t index);
-
- DexToDexCompiler* const compiler_;
- CompilerDriver& driver_;
- const DexCompilationUnit& unit_;
- const CompilationLevel compilation_level_;
-
- // Filled by the compiler when quickening, in order to encode that information
- // in the .oat file. The runtime will use that information to get to the original
- // opcodes.
- std::vector<QuickenedInfo> quickened_info_;
-
- // If the code item was already quickened previously.
- const bool already_quickened_;
- const QuickenInfoTable existing_quicken_info_;
- uint32_t quicken_index_ = 0u;
-
- DISALLOW_COPY_AND_ASSIGN(CompilationState);
- };
+ struct CompilationState;
+ // Quicken state for a code item, may be referenced by multiple methods.
struct QuickenState {
std::vector<MethodReference> methods_;
std::vector<uint8_t> quicken_data_;
+ bool optimized_return_void_ = false;
+ bool conflict_ = false;
};
BitVector* GetOrAddBitVectorForDex(const DexFile* dex_file) REQUIRES(lock_);
@@ -166,15 +102,14 @@ class DexToDexCompiler {
mutable Mutex lock_;
// Record what method references are going to get quickened.
std::unordered_map<const DexFile*, BitVector> should_quicken_;
- // Record what code items are already seen to detect when multiple methods have the same code
- // item.
- std::unordered_set<const DexFile::CodeItem*> seen_code_items_ GUARDED_BY(lock_);
// Guarded by lock_ during writing, accessed without a lock during quickening.
// This is safe because no thread is adding to the shared code items during the quickening phase.
std::unordered_set<const DexFile::CodeItem*> shared_code_items_;
- std::unordered_set<const DexFile::CodeItem*> blacklisted_code_items_ GUARDED_BY(lock_);
+ // Blacklisted code items are unquickened in UnquickenConflictingMethods.
std::unordered_map<const DexFile::CodeItem*, QuickenState> shared_code_item_quicken_info_
GUARDED_BY(lock_);
+ // Number of added code items.
+ size_t num_code_items_ GUARDED_BY(lock_) = 0u;
};
std::ostream& operator<<(std::ostream& os, const DexToDexCompiler::CompilationLevel& rhs);
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc
index c617f54f55..fb428b8d9a 100644
--- a/compiler/driver/compiler_driver.cc
+++ b/compiler/driver/compiler_driver.cc
@@ -289,7 +289,6 @@ CompilerDriver::CompilerDriver(
compiled_method_storage_(swap_fd),
profile_compilation_info_(profile_compilation_info),
max_arena_alloc_(0),
- compiling_dex_to_dex_(false),
dex_to_dex_compiler_(this) {
DCHECK(compiler_options_ != nullptr);
@@ -450,99 +449,39 @@ static bool InstructionSetHasGenericJniStub(InstructionSet isa) {
}
}
-static void CompileMethod(Thread* self,
- CompilerDriver* driver,
- const DexFile::CodeItem* code_item,
- uint32_t access_flags,
- InvokeType invoke_type,
- uint16_t class_def_idx,
- uint32_t method_idx,
- Handle<mirror::ClassLoader> class_loader,
- const DexFile& dex_file,
- optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level,
- bool compilation_enabled,
- Handle<mirror::DexCache> dex_cache) {
+template <typename CompileFn>
+static void CompileMethodHarness(
+ Thread* self,
+ CompilerDriver* driver,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ Handle<mirror::ClassLoader> class_loader,
+ const DexFile& dex_file,
+ optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level,
+ bool compilation_enabled,
+ Handle<mirror::DexCache> dex_cache,
+ CompileFn compile_fn) {
DCHECK(driver != nullptr);
- CompiledMethod* compiled_method = nullptr;
+ CompiledMethod* compiled_method;
uint64_t start_ns = kTimeCompileMethod ? NanoTime() : 0;
MethodReference method_ref(&dex_file, method_idx);
- if (driver->GetCompilingDexToDex()) {
- optimizer::DexToDexCompiler* const compiler = &driver->GetDexToDexCompiler();
- // This is the second pass when we dex-to-dex compile previously marked methods.
- // TODO: Refactor the compilation to avoid having to distinguish the two passes
- // here. That should be done on a higher level. http://b/29089975
- if (compiler->ShouldCompileMethod(method_ref)) {
- VerificationResults* results = driver->GetVerificationResults();
- DCHECK(results != nullptr);
- const VerifiedMethod* verified_method = results->GetVerifiedMethod(method_ref);
- // Do not optimize if a VerifiedMethod is missing. SafeCast elision,
- // for example, relies on it.
- compiled_method = compiler->CompileMethod(
- code_item,
- access_flags,
- invoke_type,
- class_def_idx,
- method_idx,
- class_loader,
- dex_file,
- (verified_method != nullptr)
- ? dex_to_dex_compilation_level
- : optimizer::DexToDexCompiler::CompilationLevel::kDontDexToDexCompile);
- }
- } else if ((access_flags & kAccNative) != 0) {
- // Are we extracting only and have support for generic JNI down calls?
- if (!driver->GetCompilerOptions().IsJniCompilationEnabled() &&
- InstructionSetHasGenericJniStub(driver->GetInstructionSet())) {
- // Leaving this empty will trigger the generic JNI version
- } else {
- // Query any JNI optimization annotations such as @FastNative or @CriticalNative.
- access_flags |= annotations::GetNativeMethodAnnotationAccessFlags(
- dex_file, dex_file.GetClassDef(class_def_idx), method_idx);
+ compiled_method = compile_fn(self,
+ driver,
+ code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file,
+ dex_to_dex_compilation_level,
+ compilation_enabled,
+ dex_cache);
- compiled_method = driver->GetCompiler()->JniCompile(
- access_flags, method_idx, dex_file, dex_cache);
- CHECK(compiled_method != nullptr);
- }
- } else if ((access_flags & kAccAbstract) != 0) {
- // Abstract methods don't have code.
- } else {
- VerificationResults* results = driver->GetVerificationResults();
- DCHECK(results != nullptr);
- const VerifiedMethod* verified_method = results->GetVerifiedMethod(method_ref);
- bool compile = compilation_enabled &&
- // Basic checks, e.g., not <clinit>.
- results->IsCandidateForCompilation(method_ref, access_flags) &&
- // Did not fail to create VerifiedMethod metadcata.
- verified_method != nullptr &&
- // Do not have failures that should punt to the interpreter.
- !verified_method->HasRuntimeThrow() &&
- (verified_method->GetEncounteredVerificationFailures() &
- (verifier::VERIFY_ERROR_FORCE_INTERPRETER | verifier::VERIFY_ERROR_LOCKING)) == 0 &&
- // Is eligable for compilation by methods-to-compile filter.
- driver->IsMethodToCompile(method_ref) &&
- driver->ShouldCompileBasedOnProfile(method_ref);
-
- if (compile) {
- // NOTE: if compiler declines to compile this method, it will return null.
- compiled_method = driver->GetCompiler()->Compile(code_item,
- access_flags,
- invoke_type,
- class_def_idx,
- method_idx,
- class_loader,
- dex_file,
- dex_cache);
- }
- if (compiled_method == nullptr &&
- dex_to_dex_compilation_level !=
- optimizer::DexToDexCompiler::CompilationLevel::kDontDexToDexCompile) {
- DCHECK(!Runtime::Current()->UseJitCompilation());
- DCHECK(!driver->GetCompilingDexToDex());
- // TODO: add a command-line option to disable DEX-to-DEX compilation ?
- driver->GetDexToDexCompiler().MarkForCompilation(self, method_ref, code_item);
- }
- }
if (kTimeCompileMethod) {
uint64_t duration_ns = NanoTime() - start_ns;
if (duration_ns > MsToNs(driver->GetCompiler()->GetMaximumCompilationTimeBeforeWarning())) {
@@ -573,6 +512,170 @@ static void CompileMethod(Thread* self,
}
}
+static void CompileMethodDex2Dex(
+ Thread* self,
+ CompilerDriver* driver,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ Handle<mirror::ClassLoader> class_loader,
+ const DexFile& dex_file,
+ optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level,
+ bool compilation_enabled,
+ Handle<mirror::DexCache> dex_cache) {
+ auto dex_2_dex_fn = [](Thread* self ATTRIBUTE_UNUSED,
+ CompilerDriver* driver,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ Handle<mirror::ClassLoader> class_loader,
+ const DexFile& dex_file,
+ optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level,
+ bool compilation_enabled ATTRIBUTE_UNUSED,
+ Handle<mirror::DexCache> dex_cache ATTRIBUTE_UNUSED) -> CompiledMethod* {
+ DCHECK(driver != nullptr);
+ MethodReference method_ref(&dex_file, method_idx);
+
+ optimizer::DexToDexCompiler* const compiler = &driver->GetDexToDexCompiler();
+
+ if (compiler->ShouldCompileMethod(method_ref)) {
+ VerificationResults* results = driver->GetVerificationResults();
+ DCHECK(results != nullptr);
+ const VerifiedMethod* verified_method = results->GetVerifiedMethod(method_ref);
+ // Do not optimize if a VerifiedMethod is missing. SafeCast elision,
+ // for example, relies on it.
+ return compiler->CompileMethod(
+ code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file,
+ (verified_method != nullptr)
+ ? dex_to_dex_compilation_level
+ : optimizer::DexToDexCompiler::CompilationLevel::kDontDexToDexCompile);
+ }
+ return nullptr;
+ };
+ CompileMethodHarness(self,
+ driver,
+ code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file,
+ dex_to_dex_compilation_level,
+ compilation_enabled,
+ dex_cache,
+ dex_2_dex_fn);
+}
+
+static void CompileMethodQuick(
+ Thread* self,
+ CompilerDriver* driver,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ Handle<mirror::ClassLoader> class_loader,
+ const DexFile& dex_file,
+ optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level,
+ bool compilation_enabled,
+ Handle<mirror::DexCache> dex_cache) {
+ auto quick_fn = [](
+ Thread* self,
+ CompilerDriver* driver,
+ const DexFile::CodeItem* code_item,
+ uint32_t access_flags,
+ InvokeType invoke_type,
+ uint16_t class_def_idx,
+ uint32_t method_idx,
+ Handle<mirror::ClassLoader> class_loader,
+ const DexFile& dex_file,
+ optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level,
+ bool compilation_enabled,
+ Handle<mirror::DexCache> dex_cache) {
+ DCHECK(driver != nullptr);
+ CompiledMethod* compiled_method = nullptr;
+ MethodReference method_ref(&dex_file, method_idx);
+
+ if ((access_flags & kAccNative) != 0) {
+ // Are we extracting only and have support for generic JNI down calls?
+ if (!driver->GetCompilerOptions().IsJniCompilationEnabled() &&
+ InstructionSetHasGenericJniStub(driver->GetInstructionSet())) {
+ // Leaving this empty will trigger the generic JNI version
+ } else {
+ // Query any JNI optimization annotations such as @FastNative or @CriticalNative.
+ access_flags |= annotations::GetNativeMethodAnnotationAccessFlags(
+ dex_file, dex_file.GetClassDef(class_def_idx), method_idx);
+
+ compiled_method = driver->GetCompiler()->JniCompile(
+ access_flags, method_idx, dex_file, dex_cache);
+ CHECK(compiled_method != nullptr);
+ }
+ } else if ((access_flags & kAccAbstract) != 0) {
+ // Abstract methods don't have code.
+ } else {
+ VerificationResults* results = driver->GetVerificationResults();
+ DCHECK(results != nullptr);
+ const VerifiedMethod* verified_method = results->GetVerifiedMethod(method_ref);
+ bool compile = compilation_enabled &&
+ // Basic checks, e.g., not <clinit>.
+ results->IsCandidateForCompilation(method_ref, access_flags) &&
+ // Did not fail to create VerifiedMethod metadata.
+ verified_method != nullptr &&
+ // Do not have failures that should punt to the interpreter.
+ !verified_method->HasRuntimeThrow() &&
+ (verified_method->GetEncounteredVerificationFailures() &
+ (verifier::VERIFY_ERROR_FORCE_INTERPRETER | verifier::VERIFY_ERROR_LOCKING)) == 0 &&
+ // Is eligable for compilation by methods-to-compile filter.
+ driver->IsMethodToCompile(method_ref) &&
+ driver->ShouldCompileBasedOnProfile(method_ref);
+
+ if (compile) {
+ // NOTE: if compiler declines to compile this method, it will return null.
+ compiled_method = driver->GetCompiler()->Compile(code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file,
+ dex_cache);
+ }
+ if (compiled_method == nullptr &&
+ dex_to_dex_compilation_level !=
+ optimizer::DexToDexCompiler::CompilationLevel::kDontDexToDexCompile) {
+ DCHECK(!Runtime::Current()->UseJitCompilation());
+ // TODO: add a command-line option to disable DEX-to-DEX compilation ?
+ driver->GetDexToDexCompiler().MarkForCompilation(self, method_ref);
+ }
+ }
+ return compiled_method;
+ };
+ CompileMethodHarness(self,
+ driver,
+ code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ dex_file,
+ dex_to_dex_compilation_level,
+ compilation_enabled,
+ dex_cache,
+ quick_fn);
+}
+
void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* timings) {
DCHECK(!Runtime::Current()->IsStarted());
jobject jclass_loader;
@@ -614,37 +717,34 @@ void CompilerDriver::CompileOne(Thread* self, ArtMethod* method, TimingLogger* t
*dex_file,
dex_file->GetClassDef(class_def_idx));
- DCHECK(!compiling_dex_to_dex_);
- CompileMethod(self,
- this,
- code_item,
- access_flags,
- invoke_type,
- class_def_idx,
- method_idx,
- class_loader,
- *dex_file,
- dex_to_dex_compilation_level,
- true,
- dex_cache);
-
- const size_t num_methods = dex_to_dex_compiler_.NumUniqueCodeItems(self);
+ CompileMethodQuick(self,
+ this,
+ code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ *dex_file,
+ dex_to_dex_compilation_level,
+ true,
+ dex_cache);
+
+ const size_t num_methods = dex_to_dex_compiler_.NumCodeItemsToQuicken(self);
if (num_methods != 0) {
DCHECK_EQ(num_methods, 1u);
- compiling_dex_to_dex_ = true;
- CompileMethod(self,
- this,
- code_item,
- access_flags,
- invoke_type,
- class_def_idx,
- method_idx,
- class_loader,
- *dex_file,
- dex_to_dex_compilation_level,
- true,
- dex_cache);
- compiling_dex_to_dex_ = false;
+ CompileMethodDex2Dex(self,
+ this,
+ code_item,
+ access_flags,
+ invoke_type,
+ class_def_idx,
+ method_idx,
+ class_loader,
+ *dex_file,
+ dex_to_dex_compilation_level,
+ true,
+ dex_cache);
dex_to_dex_compiler_.ClearState();
}
@@ -1444,13 +1544,19 @@ class ParallelCompilationManager {
void ForAll(size_t begin, size_t end, CompilationVisitor* visitor, size_t work_units)
REQUIRES(!*Locks::mutator_lock_) {
+ ForAllLambda(begin, end, [visitor](size_t index) { visitor->Visit(index); }, work_units);
+ }
+
+ template <typename Fn>
+ void ForAllLambda(size_t begin, size_t end, Fn fn, size_t work_units)
+ REQUIRES(!*Locks::mutator_lock_) {
Thread* self = Thread::Current();
self->AssertNoPendingException();
CHECK_GT(work_units, 0U);
index_.StoreRelaxed(begin);
for (size_t i = 0; i < work_units; ++i) {
- thread_pool_->AddTask(self, new ForAllClosure(this, end, visitor));
+ thread_pool_->AddTask(self, new ForAllClosureLambda<Fn>(this, end, fn));
}
thread_pool_->StartWorkers(self);
@@ -1470,32 +1576,33 @@ class ParallelCompilationManager {
}
private:
- class ForAllClosure : public Task {
+ template <typename Fn>
+ class ForAllClosureLambda : public Task {
public:
- ForAllClosure(ParallelCompilationManager* manager, size_t end, CompilationVisitor* visitor)
+ ForAllClosureLambda(ParallelCompilationManager* manager, size_t end, Fn fn)
: manager_(manager),
end_(end),
- visitor_(visitor) {}
+ fn_(fn) {}
- virtual void Run(Thread* self) {
+ void Run(Thread* self) OVERRIDE {
while (true) {
const size_t index = manager_->NextIndex();
if (UNLIKELY(index >= end_)) {
break;
}
- visitor_->Visit(index);
+ fn_(index);
self->AssertNoPendingException();
}
}
- virtual void Finalize() {
+ void Finalize() OVERRIDE {
delete this;
}
private:
ParallelCompilationManager* const manager_;
const size_t end_;
- CompilationVisitor* const visitor_;
+ Fn fn_;
};
AtomicInteger index_;
@@ -2574,64 +2681,33 @@ void CompilerDriver::InitializeClasses(jobject class_loader,
}
}
-void CompilerDriver::Compile(jobject class_loader,
- const std::vector<const DexFile*>& dex_files,
- TimingLogger* timings) {
- if (kDebugProfileGuidedCompilation) {
- LOG(INFO) << "[ProfileGuidedCompilation] " <<
- ((profile_compilation_info_ == nullptr)
- ? "null"
- : profile_compilation_info_->DumpInfo(&dex_files));
- }
-
- dex_to_dex_compiler_.ClearState();
- compiling_dex_to_dex_ = false;
- for (const DexFile* dex_file : dex_files) {
- CHECK(dex_file != nullptr);
- CompileDexFile(class_loader,
- *dex_file,
- dex_files,
- parallel_thread_pool_.get(),
- parallel_thread_count_,
- timings);
- const ArenaPool* const arena_pool = Runtime::Current()->GetArenaPool();
- const size_t arena_alloc = arena_pool->GetBytesAllocated();
- max_arena_alloc_ = std::max(arena_alloc, max_arena_alloc_);
- Runtime::Current()->ReclaimArenaPoolMemory();
- }
-
- if (dex_to_dex_compiler_.NumUniqueCodeItems(Thread::Current()) > 0u) {
- compiling_dex_to_dex_ = true;
- // TODO: Not visit all of the dex files, its probably rare that only one would have quickened
- // methods though.
- for (const DexFile* dex_file : dex_files) {
- CompileDexFile(class_loader,
- *dex_file,
- dex_files,
- parallel_thread_pool_.get(),
- parallel_thread_count_,
- timings);
- }
- dex_to_dex_compiler_.ClearState();
- compiling_dex_to_dex_ = false;
- }
-
- VLOG(compiler) << "Compile: " << GetMemoryUsageString(false);
-}
-
-class CompileClassVisitor : public CompilationVisitor {
- public:
- explicit CompileClassVisitor(const ParallelCompilationManager* manager) : manager_(manager) {}
+template <typename CompileFn>
+static void CompileDexFile(CompilerDriver* driver,
+ jobject class_loader,
+ const DexFile& dex_file,
+ const std::vector<const DexFile*>& dex_files,
+ ThreadPool* thread_pool,
+ size_t thread_count,
+ TimingLogger* timings,
+ const char* timing_name,
+ CompileFn compile_fn) {
+ TimingLogger::ScopedTiming t(timing_name, timings);
+ ParallelCompilationManager context(Runtime::Current()->GetClassLinker(),
+ class_loader,
+ driver,
+ &dex_file,
+ dex_files,
+ thread_pool);
- virtual void Visit(size_t class_def_index) REQUIRES(!Locks::mutator_lock_) OVERRIDE {
+ auto compile = [&context, &compile_fn](size_t class_def_index) {
ScopedTrace trace(__FUNCTION__);
- const DexFile& dex_file = *manager_->GetDexFile();
+ const DexFile& dex_file = *context.GetDexFile();
const DexFile::ClassDef& class_def = dex_file.GetClassDef(class_def_index);
- ClassLinker* class_linker = manager_->GetClassLinker();
- jobject jclass_loader = manager_->GetClassLoader();
+ ClassLinker* class_linker = context.GetClassLinker();
+ jobject jclass_loader = context.GetClassLoader();
ClassReference ref(&dex_file, class_def_index);
// Skip compiling classes with generic verifier failures since they will still fail at runtime
- if (manager_->GetCompiler()->verification_results_->IsClassRejected(ref)) {
+ if (context.GetCompiler()->GetVerificationResults()->IsClassRejected(ref)) {
return;
}
// Use a scoped object access to perform to the quick SkipClass check.
@@ -2662,7 +2738,7 @@ class CompileClassVisitor : public CompilationVisitor {
// Go to native so that we don't block GC during compilation.
ScopedThreadSuspension sts(soa.Self(), kNative);
- CompilerDriver* const driver = manager_->GetCompiler();
+ CompilerDriver* const driver = context.GetCompiler();
// Can we run DEX-to-DEX compiler on this class ?
optimizer::DexToDexCompiler::CompilationLevel dex_to_dex_compilation_level =
@@ -2685,38 +2761,71 @@ class CompileClassVisitor : public CompilationVisitor {
continue;
}
previous_method_idx = method_idx;
- CompileMethod(soa.Self(),
- driver,
- it.GetMethodCodeItem(),
- it.GetMethodAccessFlags(),
- it.GetMethodInvokeType(class_def),
- class_def_index,
- method_idx,
- class_loader,
- dex_file,
- dex_to_dex_compilation_level,
- compilation_enabled,
- dex_cache);
+ compile_fn(soa.Self(),
+ driver,
+ it.GetMethodCodeItem(),
+ it.GetMethodAccessFlags(),
+ it.GetMethodInvokeType(class_def),
+ class_def_index,
+ method_idx,
+ class_loader,
+ dex_file,
+ dex_to_dex_compilation_level,
+ compilation_enabled,
+ dex_cache);
it.Next();
}
DCHECK(!it.HasNext());
+ };
+ context.ForAllLambda(0, dex_file.NumClassDefs(), compile, thread_count);
+}
+
+void CompilerDriver::Compile(jobject class_loader,
+ const std::vector<const DexFile*>& dex_files,
+ TimingLogger* timings) {
+ if (kDebugProfileGuidedCompilation) {
+ LOG(INFO) << "[ProfileGuidedCompilation] " <<
+ ((profile_compilation_info_ == nullptr)
+ ? "null"
+ : profile_compilation_info_->DumpInfo(&dex_files));
}
- private:
- const ParallelCompilationManager* const manager_;
-};
+ dex_to_dex_compiler_.ClearState();
+ for (const DexFile* dex_file : dex_files) {
+ CHECK(dex_file != nullptr);
+ CompileDexFile(this,
+ class_loader,
+ *dex_file,
+ dex_files,
+ parallel_thread_pool_.get(),
+ parallel_thread_count_,
+ timings,
+ "Compile Dex File Quick",
+ CompileMethodQuick);
+ const ArenaPool* const arena_pool = Runtime::Current()->GetArenaPool();
+ const size_t arena_alloc = arena_pool->GetBytesAllocated();
+ max_arena_alloc_ = std::max(arena_alloc, max_arena_alloc_);
+ Runtime::Current()->ReclaimArenaPoolMemory();
+ }
-void CompilerDriver::CompileDexFile(jobject class_loader,
- const DexFile& dex_file,
- const std::vector<const DexFile*>& dex_files,
- ThreadPool* thread_pool,
- size_t thread_count,
- TimingLogger* timings) {
- TimingLogger::ScopedTiming t("Compile Dex File", timings);
- ParallelCompilationManager context(Runtime::Current()->GetClassLinker(), class_loader, this,
- &dex_file, dex_files, thread_pool);
- CompileClassVisitor visitor(&context);
- context.ForAll(0, dex_file.NumClassDefs(), &visitor, thread_count);
+ if (dex_to_dex_compiler_.NumCodeItemsToQuicken(Thread::Current()) > 0u) {
+ // TODO: Not visit all of the dex files, its probably rare that only one would have quickened
+ // methods though.
+ for (const DexFile* dex_file : dex_files) {
+ CompileDexFile(this,
+ class_loader,
+ *dex_file,
+ dex_files,
+ parallel_thread_pool_.get(),
+ parallel_thread_count_,
+ timings,
+ "Compile Dex File Dex2Dex",
+ CompileMethodDex2Dex);
+ }
+ dex_to_dex_compiler_.ClearState();
+ }
+
+ VLOG(compiler) << "Compile: " << GetMemoryUsageString(false);
}
void CompilerDriver::AddCompiledMethod(const MethodReference& method_ref,
@@ -2731,6 +2840,12 @@ void CompilerDriver::AddCompiledMethod(const MethodReference& method_ref,
DCHECK(GetCompiledMethod(method_ref) != nullptr) << method_ref.PrettyMethod();
}
+CompiledMethod* CompilerDriver::RemoveCompiledMethod(const MethodReference& method_ref) {
+ CompiledMethod* ret = nullptr;
+ CHECK(compiled_methods_.Remove(method_ref, &ret));
+ return ret;
+}
+
bool CompilerDriver::GetCompiledClass(const ClassReference& ref, ClassStatus* status) const {
DCHECK(status != nullptr);
// The table doesn't know if something wasn't inserted. For this case it will return
@@ -2904,6 +3019,7 @@ void CompilerDriver::FreeThreadPools() {
void CompilerDriver::SetDexFilesForOatFile(const std::vector<const DexFile*>& dex_files) {
dex_files_for_oat_file_ = dex_files;
compiled_classes_.AddDexFiles(dex_files);
+ dex_to_dex_compiler_.SetDexFiles(dex_files);
}
void CompilerDriver::SetClasspathDexFiles(const std::vector<const DexFile*>& dex_files) {
diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h
index b51e0debbb..2b524a347d 100644
--- a/compiler/driver/compiler_driver.h
+++ b/compiler/driver/compiler_driver.h
@@ -165,6 +165,7 @@ class CompilerDriver {
void AddCompiledMethod(const MethodReference& method_ref,
CompiledMethod* const compiled_method,
size_t non_relative_linker_patch_count);
+ CompiledMethod* RemoveCompiledMethod(const MethodReference& method_ref);
void SetRequiresConstructorBarrier(Thread* self,
const DexFile* dex_file,
@@ -374,10 +375,6 @@ class CompilerDriver {
|| android::base::EndsWith(boot_image_filename, "core-optimizing.art");
}
- bool GetCompilingDexToDex() const {
- return compiling_dex_to_dex_;
- }
-
optimizer::DexToDexCompiler& GetDexToDexCompiler() {
return dex_to_dex_compiler_;
}
@@ -449,13 +446,6 @@ class CompilerDriver {
void Compile(jobject class_loader,
const std::vector<const DexFile*>& dex_files,
TimingLogger* timings);
- void CompileDexFile(jobject class_loader,
- const DexFile& dex_file,
- const std::vector<const DexFile*>& dex_files,
- ThreadPool* thread_pool,
- size_t thread_count,
- TimingLogger* timings)
- REQUIRES(!Locks::mutator_lock_);
bool MayInlineInternal(const DexFile* inlined_from, const DexFile* inlined_into) const;
@@ -541,7 +531,6 @@ class CompilerDriver {
size_t max_arena_alloc_;
// Compiler for dex to dex (quickening).
- bool compiling_dex_to_dex_;
optimizer::DexToDexCompiler dex_to_dex_compiler_;
friend class CompileClassVisitor;
diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc
index 2c62095458..17b94d3bdf 100644
--- a/compiler/jit/jit_compiler.cc
+++ b/compiler/jit/jit_compiler.cc
@@ -76,7 +76,7 @@ extern "C" void jit_types_loaded(void* handle, mirror::Class** types, size_t cou
const ArrayRef<mirror::Class*> types_array(types, count);
std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForClasses(
kRuntimeISA, jit_compiler->GetCompilerDriver()->GetInstructionSetFeatures(), types_array);
- MutexLock mu(Thread::Current(), g_jit_debug_mutex);
+ MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
CreateJITCodeEntry(std::move(elf_file));
}
}
diff --git a/compiler/linker/elf_builder.h b/compiler/linker/elf_builder.h
index 1c875189c5..3145497091 100644
--- a/compiler/linker/elf_builder.h
+++ b/compiler/linker/elf_builder.h
@@ -18,7 +18,6 @@
#define ART_COMPILER_LINKER_ELF_BUILDER_H_
#include <vector>
-#include <unordered_map>
#include "arch/instruction_set.h"
#include "arch/mips/instruction_set_features_mips.h"
@@ -310,24 +309,27 @@ class ElfBuilder FINAL {
/* info */ 0,
align,
/* entsize */ 0),
- current_offset_(0) {
+ current_offset_(0),
+ last_offset_(0) {
}
Elf_Word Write(const std::string& name) {
if (current_offset_ == 0) {
DCHECK(name.empty());
+ } else if (name == last_name_) {
+ return last_offset_; // Very simple string de-duplication.
}
- auto res = written_names_.emplace(name, current_offset_);
- if (res.second) { // Inserted.
- this->WriteFully(name.c_str(), name.length() + 1);
- current_offset_ += name.length() + 1;
- }
- return res.first->second; // Offset.
+ last_name_ = name;
+ last_offset_ = current_offset_;
+ this->WriteFully(name.c_str(), name.length() + 1);
+ current_offset_ += name.length() + 1;
+ return last_offset_;
}
private:
Elf_Word current_offset_;
- std::unordered_map<std::string, Elf_Word> written_names_; // Dedup strings.
+ std::string last_name_;
+ Elf_Word last_offset_;
};
// Writer of .dynsym and .symtab sections.
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 704a0d3b87..6d49b32dbc 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -2498,8 +2498,23 @@ void CodeGeneratorARMVIXL::GenerateFrameEntry() {
}
if (!skip_overflow_check) {
+ // Using r4 instead of IP saves 2 bytes.
UseScratchRegisterScope temps(GetVIXLAssembler());
- vixl32::Register temp = temps.Acquire();
+ vixl32::Register temp;
+ // TODO: Remove this check when R4 is made a callee-save register
+ // in ART compiled code (b/72801708). Currently we need to make
+ // sure r4 is not blocked, e.g. in special purpose
+ // TestCodeGeneratorARMVIXL; also asserting that r4 is available
+ // here.
+ if (!blocked_core_registers_[R4]) {
+ for (vixl32::Register reg : kParameterCoreRegistersVIXL) {
+ DCHECK(!reg.Is(r4));
+ }
+ DCHECK(!kCoreCalleeSaves.Includes(r4));
+ temp = r4;
+ } else {
+ temp = temps.Acquire();
+ }
__ Sub(temp, sp, Operand::From(GetStackOverflowReservedBytes(InstructionSet::kArm)));
// The load must immediately precede RecordPcInfo.
ExactAssemblyScope aas(GetVIXLAssembler(),
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 36c921986b..855da2b18f 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -395,7 +395,7 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS {
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
- if (!is_fatal_) {
+ if (!is_fatal_ || instruction_->CanThrowIntoCatchBlock()) {
SaveLiveRegisters(codegen, locations);
}
@@ -3283,26 +3283,8 @@ static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) {
}
void LocationsBuilderMIPS::VisitCheckCast(HCheckCast* instruction) {
- LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
- bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
-
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
- switch (type_check_kind) {
- case TypeCheckKind::kExactCheck:
- case TypeCheckKind::kAbstractClassCheck:
- case TypeCheckKind::kClassHierarchyCheck:
- case TypeCheckKind::kArrayObjectCheck:
- call_kind = (throws_into_catch || kEmitCompilerReadBarrier)
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall; // In fact, call on a fatal (non-returning) slow path.
- break;
- case TypeCheckKind::kArrayCheck:
- case TypeCheckKind::kUnresolvedCheck:
- case TypeCheckKind::kInterfaceCheck:
- call_kind = LocationSummary::kCallOnSlowPath;
- break;
- }
-
+ LocationSummary::CallKind call_kind = CodeGenerator::GetCheckCastCallKind(instruction);
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
@@ -3331,18 +3313,7 @@ void InstructionCodeGeneratorMIPS::VisitCheckCast(HCheckCast* instruction) {
mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
MipsLabel done;
- // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
- // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
- // read barriers is done for performance and code size reasons.
- bool is_type_check_slow_path_fatal = false;
- if (!kEmitCompilerReadBarrier) {
- is_type_check_slow_path_fatal =
- (type_check_kind == TypeCheckKind::kExactCheck ||
- type_check_kind == TypeCheckKind::kAbstractClassCheck ||
- type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
- type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
- !instruction->CanThrowIntoCatchBlock();
- }
+ bool is_type_check_slow_path_fatal = CodeGenerator::IsTypeCheckSlowPathFatal(instruction);
SlowPathCodeMIPS* slow_path =
new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS(
instruction, is_type_check_slow_path_fatal);
@@ -7213,11 +7184,12 @@ void LocationsBuilderMIPS::VisitInstanceOf(HInstanceOf* instruction) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kAbstractClassCheck:
case TypeCheckKind::kClassHierarchyCheck:
- case TypeCheckKind::kArrayObjectCheck:
- call_kind =
- kEmitCompilerReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
- baker_read_barrier_slow_path = kUseBakerReadBarrier;
+ case TypeCheckKind::kArrayObjectCheck: {
+ bool needs_read_barrier = CodeGenerator::InstanceOfNeedsReadBarrier(instruction);
+ call_kind = needs_read_barrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
+ baker_read_barrier_slow_path = kUseBakerReadBarrier && needs_read_barrier;
break;
+ }
case TypeCheckKind::kArrayCheck:
case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
@@ -7265,13 +7237,15 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Classes must be equal for the instanceof to succeed.
__ Xor(out, out, cls);
__ Sltiu(out, out, 1);
@@ -7279,13 +7253,15 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kAbstractClassCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
MipsLabel loop;
@@ -7295,7 +7271,7 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
super_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If `out` is null, we use it for the result, and jump to `done`.
__ Beqz(out, &done);
__ Bne(out, cls, &loop);
@@ -7304,13 +7280,15 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kClassHierarchyCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Walk over the class hierarchy to find a match.
MipsLabel loop, success;
__ Bind(&loop);
@@ -7320,7 +7298,7 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
super_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
__ Bnez(out, &loop);
// If `out` is null, we use it for the result, and jump to `done`.
__ B(&done);
@@ -7330,13 +7308,15 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kArrayObjectCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Do an exact check.
MipsLabel success;
__ Beq(out, cls, &success);
@@ -7346,7 +7326,7 @@ void InstructionCodeGeneratorMIPS::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
component_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If `out` is null, we use it for the result, and jump to `done`.
__ Beqz(out, &done);
__ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 6657582e2a..8a06061c6a 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -352,7 +352,7 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 {
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
- if (!is_fatal_) {
+ if (!is_fatal_ || instruction_->CanThrowIntoCatchBlock()) {
SaveLiveRegisters(codegen, locations);
}
@@ -2836,26 +2836,8 @@ static size_t NumberOfCheckCastTemps(TypeCheckKind type_check_kind) {
}
void LocationsBuilderMIPS64::VisitCheckCast(HCheckCast* instruction) {
- LocationSummary::CallKind call_kind = LocationSummary::kNoCall;
- bool throws_into_catch = instruction->CanThrowIntoCatchBlock();
-
TypeCheckKind type_check_kind = instruction->GetTypeCheckKind();
- switch (type_check_kind) {
- case TypeCheckKind::kExactCheck:
- case TypeCheckKind::kAbstractClassCheck:
- case TypeCheckKind::kClassHierarchyCheck:
- case TypeCheckKind::kArrayObjectCheck:
- call_kind = (throws_into_catch || kEmitCompilerReadBarrier)
- ? LocationSummary::kCallOnSlowPath
- : LocationSummary::kNoCall; // In fact, call on a fatal (non-returning) slow path.
- break;
- case TypeCheckKind::kArrayCheck:
- case TypeCheckKind::kUnresolvedCheck:
- case TypeCheckKind::kInterfaceCheck:
- call_kind = LocationSummary::kCallOnSlowPath;
- break;
- }
-
+ LocationSummary::CallKind call_kind = CodeGenerator::GetCheckCastCallKind(instruction);
LocationSummary* locations =
new (GetGraph()->GetAllocator()) LocationSummary(instruction, call_kind);
locations->SetInAt(0, Location::RequiresRegister());
@@ -2884,18 +2866,7 @@ void InstructionCodeGeneratorMIPS64::VisitCheckCast(HCheckCast* instruction) {
mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value();
Mips64Label done;
- // Always false for read barriers since we may need to go to the entrypoint for non-fatal cases
- // from false negatives. The false negatives may come from avoiding read barriers below. Avoiding
- // read barriers is done for performance and code size reasons.
- bool is_type_check_slow_path_fatal = false;
- if (!kEmitCompilerReadBarrier) {
- is_type_check_slow_path_fatal =
- (type_check_kind == TypeCheckKind::kExactCheck ||
- type_check_kind == TypeCheckKind::kAbstractClassCheck ||
- type_check_kind == TypeCheckKind::kClassHierarchyCheck ||
- type_check_kind == TypeCheckKind::kArrayObjectCheck) &&
- !instruction->CanThrowIntoCatchBlock();
- }
+ bool is_type_check_slow_path_fatal = CodeGenerator::IsTypeCheckSlowPathFatal(instruction);
SlowPathCodeMIPS64* slow_path =
new (codegen_->GetScopedAllocator()) TypeCheckSlowPathMIPS64(
instruction, is_type_check_slow_path_fatal);
@@ -5528,11 +5499,12 @@ void LocationsBuilderMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
case TypeCheckKind::kExactCheck:
case TypeCheckKind::kAbstractClassCheck:
case TypeCheckKind::kClassHierarchyCheck:
- case TypeCheckKind::kArrayObjectCheck:
- call_kind =
- kEmitCompilerReadBarrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
- baker_read_barrier_slow_path = kUseBakerReadBarrier;
+ case TypeCheckKind::kArrayObjectCheck: {
+ bool needs_read_barrier = CodeGenerator::InstanceOfNeedsReadBarrier(instruction);
+ call_kind = needs_read_barrier ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall;
+ baker_read_barrier_slow_path = kUseBakerReadBarrier && needs_read_barrier;
break;
+ }
case TypeCheckKind::kArrayCheck:
case TypeCheckKind::kUnresolvedCheck:
case TypeCheckKind::kInterfaceCheck:
@@ -5580,13 +5552,15 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
switch (type_check_kind) {
case TypeCheckKind::kExactCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Classes must be equal for the instanceof to succeed.
__ Xor(out, out, cls);
__ Sltiu(out, out, 1);
@@ -5594,13 +5568,15 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kAbstractClassCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If the class is abstract, we eagerly fetch the super class of the
// object to avoid doing a comparison we know will fail.
Mips64Label loop;
@@ -5610,7 +5586,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
super_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If `out` is null, we use it for the result, and jump to `done`.
__ Beqzc(out, &done);
__ Bnec(out, cls, &loop);
@@ -5619,13 +5595,15 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kClassHierarchyCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Walk over the class hierarchy to find a match.
Mips64Label loop, success;
__ Bind(&loop);
@@ -5635,7 +5613,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
super_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
__ Bnezc(out, &loop);
// If `out` is null, we use it for the result, and jump to `done`.
__ Bc(&done);
@@ -5645,13 +5623,15 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
}
case TypeCheckKind::kArrayObjectCheck: {
+ ReadBarrierOption read_barrier_option =
+ CodeGenerator::ReadBarrierOptionForInstanceOf(instruction);
// /* HeapReference<Class> */ out = obj->klass_
GenerateReferenceLoadTwoRegisters(instruction,
out_loc,
obj_loc,
class_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// Do an exact check.
Mips64Label success;
__ Beqc(out, cls, &success);
@@ -5661,7 +5641,7 @@ void InstructionCodeGeneratorMIPS64::VisitInstanceOf(HInstanceOf* instruction) {
out_loc,
component_offset,
maybe_temp_loc,
- kCompilerReadBarrierOption);
+ read_barrier_option);
// If `out` is null, we use it for the result, and jump to `done`.
__ Beqzc(out, &done);
__ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset);
diff --git a/compiler/optimizing/dead_code_elimination.cc b/compiler/optimizing/dead_code_elimination.cc
index cca1055ac8..9fa0f72e80 100644
--- a/compiler/optimizing/dead_code_elimination.cc
+++ b/compiler/optimizing/dead_code_elimination.cc
@@ -146,6 +146,65 @@ static HConstant* Evaluate(HCondition* condition, HInstruction* left, HInstructi
}
}
+static bool RemoveNonNullControlDependences(HBasicBlock* block, HBasicBlock* throws) {
+ // Test for an if as last statement.
+ if (!block->EndsWithIf()) {
+ return false;
+ }
+ HIf* ifs = block->GetLastInstruction()->AsIf();
+ // Find either:
+ // if obj == null
+ // throws
+ // else
+ // not_throws
+ // or:
+ // if obj != null
+ // not_throws
+ // else
+ // throws
+ HInstruction* cond = ifs->InputAt(0);
+ HBasicBlock* not_throws = nullptr;
+ if (throws == ifs->IfTrueSuccessor() && cond->IsEqual()) {
+ not_throws = ifs->IfFalseSuccessor();
+ } else if (throws == ifs->IfFalseSuccessor() && cond->IsNotEqual()) {
+ not_throws = ifs->IfTrueSuccessor();
+ } else {
+ return false;
+ }
+ DCHECK(cond->IsEqual() || cond->IsNotEqual());
+ HInstruction* obj = cond->InputAt(1);
+ if (obj->IsNullConstant()) {
+ obj = cond->InputAt(0);
+ } else if (!cond->InputAt(0)->IsNullConstant()) {
+ return false;
+ }
+ // Scan all uses of obj and find null check under control dependence.
+ HBoundType* bound = nullptr;
+ const HUseList<HInstruction*>& uses = obj->GetUses();
+ for (auto it = uses.begin(), end = uses.end(); it != end;) {
+ HInstruction* user = it->GetUser();
+ ++it; // increment before possibly replacing
+ if (user->IsNullCheck()) {
+ HBasicBlock* user_block = user->GetBlock();
+ if (user_block != block &&
+ user_block != throws &&
+ block->Dominates(user_block)) {
+ if (bound == nullptr) {
+ ReferenceTypeInfo ti = obj->GetReferenceTypeInfo();
+ bound = new (obj->GetBlock()->GetGraph()->GetAllocator()) HBoundType(obj);
+ bound->SetUpperBound(ti, /*can_be_null*/ false);
+ bound->SetReferenceTypeInfo(ti);
+ bound->SetCanBeNull(false);
+ not_throws->InsertInstructionBefore(bound, not_throws->GetFirstInstruction());
+ }
+ user->ReplaceWith(bound);
+ user_block->RemoveInstruction(user);
+ }
+ }
+ }
+ return bound != nullptr;
+}
+
// Simplify the pattern:
//
// B1
@@ -203,6 +262,11 @@ bool HDeadCodeElimination::SimplifyAlwaysThrows() {
block->ReplaceSuccessor(succ, exit);
rerun_dominance_and_loop_analysis = true;
MaybeRecordStat(stats_, MethodCompilationStat::kSimplifyThrowingInvoke);
+ // Perform a quick follow up optimization on object != null control dependences
+ // that is much cheaper to perform now than in a later phase.
+ if (RemoveNonNullControlDependences(pred, block)) {
+ MaybeRecordStat(stats_, MethodCompilationStat::kRemovedNullCheck);
+ }
}
}
}
diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc
index 88326d321b..8b4eae1780 100644
--- a/compiler/optimizing/load_store_elimination.cc
+++ b/compiler/optimizing/load_store_elimination.cc
@@ -25,6 +25,51 @@
#include <iostream>
+/**
+ * The general algorithm of load-store elimination (LSE).
+ * Load-store analysis in the previous pass collects a list of heap locations
+ * and does alias analysis of those heap locations.
+ * LSE keeps track of a list of heap values corresponding to the heap
+ * locations. It visits basic blocks in reverse post order and for
+ * each basic block, visits instructions sequentially, and processes
+ * instructions as follows:
+ * - If the instruction is a load, and the heap location for that load has a
+ * valid heap value, the load can be eliminated. In order to maintain the
+ * validity of all heap locations during the optimization phase, the real
+ * elimination is delayed till the end of LSE.
+ * - If the instruction is a store, it updates the heap value for the heap
+ * location of the store with the store instruction. The real heap value
+ * can be fetched from the store instruction. Heap values are invalidated
+ * for heap locations that may alias with the store instruction's heap
+ * location. The store instruction can be eliminated unless the value stored
+ * is later needed e.g. by a load from the same/aliased heap location or
+ * the heap location persists at method return/deoptimization.
+ * The store instruction is also needed if it's not used to track the heap
+ * value anymore, e.g. when it fails to merge with the heap values from other
+ * predecessors.
+ * - A store that stores the same value as the heap value is eliminated.
+ * - The list of heap values are merged at basic block entry from the basic
+ * block's predecessors. The algorithm is single-pass, so loop side-effects is
+ * used as best effort to decide if a heap location is stored inside the loop.
+ * - A special type of objects called singletons are instantiated in the method
+ * and have a single name, i.e. no aliases. Singletons have exclusive heap
+ * locations since they have no aliases. Singletons are helpful in narrowing
+ * down the life span of a heap location such that they do not always
+ * need to participate in merging heap values. Allocation of a singleton
+ * can be eliminated if that singleton is not used and does not persist
+ * at method return/deoptimization.
+ * - For newly instantiated instances, their heap values are initialized to
+ * language defined default values.
+ * - Some instructions such as invokes are treated as loading and invalidating
+ * all the heap values, depending on the instruction's side effects.
+ * - Finalizable objects are considered as persisting at method
+ * return/deoptimization.
+ * - Currently this LSE algorithm doesn't handle SIMD graph, e.g. with VecLoad
+ * and VecStore instructions.
+ * - Currently this LSE algorithm doesn't handle graph with try-catch, due to
+ * the special block merging structure.
+ */
+
namespace art {
// An unknown heap value. Loads with such a value in the heap location cannot be eliminated.
@@ -59,8 +104,7 @@ class LSEVisitor : public HGraphDelegateVisitor {
removed_loads_(allocator_.Adapter(kArenaAllocLSE)),
substitute_instructions_for_loads_(allocator_.Adapter(kArenaAllocLSE)),
possibly_removed_stores_(allocator_.Adapter(kArenaAllocLSE)),
- singleton_new_instances_(allocator_.Adapter(kArenaAllocLSE)),
- singleton_new_arrays_(allocator_.Adapter(kArenaAllocLSE)) {
+ singleton_new_instances_(allocator_.Adapter(kArenaAllocLSE)) {
}
void VisitBasicBlock(HBasicBlock* block) OVERRIDE {
@@ -88,19 +132,26 @@ class LSEVisitor : public HGraphDelegateVisitor {
return type_conversion;
}
- // Find an instruction's substitute if it should be removed.
+ // Find an instruction's substitute if it's a removed load.
// Return the same instruction if it should not be removed.
HInstruction* FindSubstitute(HInstruction* instruction) {
+ if (!IsLoad(instruction)) {
+ return instruction;
+ }
size_t size = removed_loads_.size();
for (size_t i = 0; i < size; i++) {
if (removed_loads_[i] == instruction) {
- return substitute_instructions_for_loads_[i];
+ HInstruction* substitute = substitute_instructions_for_loads_[i];
+ // The substitute list is a flat hierarchy.
+ DCHECK_EQ(FindSubstitute(substitute), substitute);
+ return substitute;
}
}
return instruction;
}
void AddRemovedLoad(HInstruction* load, HInstruction* heap_value) {
+ DCHECK(IsLoad(load));
DCHECK_EQ(FindSubstitute(heap_value), heap_value) <<
"Unexpected heap_value that has a substitute " << heap_value->DebugName();
removed_loads_.push_back(load);
@@ -207,28 +258,59 @@ class LSEVisitor : public HGraphDelegateVisitor {
new_instance->GetBlock()->RemoveInstruction(new_instance);
}
}
- for (HInstruction* new_array : singleton_new_arrays_) {
- size_t removed = HConstructorFence::RemoveConstructorFences(new_array);
- MaybeRecordStat(stats_,
- MethodCompilationStat::kConstructorFenceRemovedLSE,
- removed);
+ }
- if (!new_array->HasNonEnvironmentUses()) {
- new_array->RemoveEnvironmentUsers();
- new_array->GetBlock()->RemoveInstruction(new_array);
- }
+ private:
+ static bool IsLoad(HInstruction* instruction) {
+ if (instruction == kUnknownHeapValue || instruction == kDefaultHeapValue) {
+ return false;
}
+ // Unresolved load is not treated as a load.
+ return instruction->IsInstanceFieldGet() ||
+ instruction->IsStaticFieldGet() ||
+ instruction->IsArrayGet();
}
- private:
- // If heap_values[index] is an instance field store, need to keep the store.
- // This is necessary if a heap value is killed due to merging, or loop side
- // effects (which is essentially merging also), since a load later from the
- // location won't be eliminated.
+ static bool IsStore(HInstruction* instruction) {
+ if (instruction == kUnknownHeapValue || instruction == kDefaultHeapValue) {
+ return false;
+ }
+ // Unresolved store is not treated as a store.
+ return instruction->IsInstanceFieldSet() ||
+ instruction->IsArraySet() ||
+ instruction->IsStaticFieldSet();
+ }
+
+ // Returns the real heap value by finding its substitute or by "peeling"
+ // a store instruction.
+ HInstruction* GetRealHeapValue(HInstruction* heap_value) {
+ if (IsLoad(heap_value)) {
+ return FindSubstitute(heap_value);
+ }
+ if (!IsStore(heap_value)) {
+ return heap_value;
+ }
+
+ // We keep track of store instructions as the heap values which might be
+ // eliminated if the stores are later found not necessary. The real stored
+ // value needs to be fetched from the store instruction.
+ if (heap_value->IsInstanceFieldSet()) {
+ heap_value = heap_value->AsInstanceFieldSet()->GetValue();
+ } else if (heap_value->IsStaticFieldSet()) {
+ heap_value = heap_value->AsStaticFieldSet()->GetValue();
+ } else {
+ DCHECK(heap_value->IsArraySet());
+ heap_value = heap_value->AsArraySet()->GetValue();
+ }
+ // heap_value may already be a removed load.
+ return FindSubstitute(heap_value);
+ }
+
+ // If heap_value is a store, need to keep the store.
+ // This is necessary if a heap value is killed or replaced by another value,
+ // so that the store is no longer used to track heap value.
void KeepIfIsStore(HInstruction* heap_value) {
- if (heap_value == kDefaultHeapValue ||
- heap_value == kUnknownHeapValue ||
- !(heap_value->IsInstanceFieldSet() || heap_value->IsArraySet())) {
+ if (!IsStore(heap_value)) {
return;
}
auto idx = std::find(possibly_removed_stores_.begin(),
@@ -239,26 +321,41 @@ class LSEVisitor : public HGraphDelegateVisitor {
}
}
+ // If a heap location X may alias with heap location at `loc_index`
+ // and heap_values of that heap location X holds a store, keep that store.
+ // It's needed for a dependent load that's not eliminated since any store
+ // that may put value into the load's heap location needs to be kept.
+ void KeepStoresIfAliasedToLocation(ScopedArenaVector<HInstruction*>& heap_values,
+ size_t loc_index) {
+ for (size_t i = 0; i < heap_values.size(); i++) {
+ if ((i == loc_index) || heap_location_collector_.MayAlias(i, loc_index)) {
+ KeepIfIsStore(heap_values[i]);
+ }
+ }
+ }
+
void HandleLoopSideEffects(HBasicBlock* block) {
DCHECK(block->IsLoopHeader());
int block_id = block->GetBlockId();
ScopedArenaVector<HInstruction*>& heap_values = heap_values_for_[block_id];
+ HBasicBlock* pre_header = block->GetLoopInformation()->GetPreHeader();
+ ScopedArenaVector<HInstruction*>& pre_header_heap_values =
+ heap_values_for_[pre_header->GetBlockId()];
- // Don't eliminate loads in irreducible loops. This is safe for singletons, because
- // they are always used by the non-eliminated loop-phi.
+ // Don't eliminate loads in irreducible loops.
+ // Also keep the stores before the loop.
if (block->GetLoopInformation()->IsIrreducible()) {
if (kIsDebugBuild) {
for (size_t i = 0; i < heap_values.size(); i++) {
DCHECK_EQ(heap_values[i], kUnknownHeapValue);
}
}
+ for (size_t i = 0; i < heap_values.size(); i++) {
+ KeepIfIsStore(pre_header_heap_values[i]);
+ }
return;
}
- HBasicBlock* pre_header = block->GetLoopInformation()->GetPreHeader();
- ScopedArenaVector<HInstruction*>& pre_header_heap_values =
- heap_values_for_[pre_header->GetBlockId()];
-
// Inherit the values from pre-header.
for (size_t i = 0; i < heap_values.size(); i++) {
heap_values[i] = pre_header_heap_values[i];
@@ -270,18 +367,17 @@ class LSEVisitor : public HGraphDelegateVisitor {
for (size_t i = 0; i < heap_values.size(); i++) {
HeapLocation* location = heap_location_collector_.GetHeapLocation(i);
ReferenceInfo* ref_info = location->GetReferenceInfo();
- if (ref_info->IsSingletonAndRemovable() &&
- !location->IsValueKilledByLoopSideEffects()) {
- // A removable singleton's field that's not stored into inside a loop is
+ if (ref_info->IsSingleton() && !location->IsValueKilledByLoopSideEffects()) {
+ // A singleton's field that's not stored into inside a loop is
// invariant throughout the loop. Nothing to do.
} else {
- // heap value is killed by loop side effects (stored into directly, or
- // due to aliasing). Or the heap value may be needed after method return
- // or deoptimization.
+ // heap value is killed by loop side effects.
KeepIfIsStore(pre_header_heap_values[i]);
heap_values[i] = kUnknownHeapValue;
}
}
+ } else {
+ // The loop doesn't kill any value.
}
}
@@ -300,45 +396,73 @@ class LSEVisitor : public HGraphDelegateVisitor {
ScopedArenaVector<HInstruction*>& heap_values = heap_values_for_[block->GetBlockId()];
for (size_t i = 0; i < heap_values.size(); i++) {
HInstruction* merged_value = nullptr;
+ // If we can merge the store itself from the predecessors, we keep
+ // the store as the heap value as long as possible. In case we cannot
+ // merge the store, we try to merge the values of the stores.
+ HInstruction* merged_store_value = nullptr;
// Whether merged_value is a result that's merged from all predecessors.
bool from_all_predecessors = true;
ReferenceInfo* ref_info = heap_location_collector_.GetHeapLocation(i)->GetReferenceInfo();
+ HInstruction* ref = ref_info->GetReference();
HInstruction* singleton_ref = nullptr;
if (ref_info->IsSingleton()) {
- // We do more analysis of liveness when merging heap values for such
- // cases since stores into such references may potentially be eliminated.
- singleton_ref = ref_info->GetReference();
+ // We do more analysis based on singleton's liveness when merging
+ // heap values for such cases.
+ singleton_ref = ref;
}
for (HBasicBlock* predecessor : predecessors) {
HInstruction* pred_value = heap_values_for_[predecessor->GetBlockId()][i];
+ if (!IsStore(pred_value)) {
+ pred_value = FindSubstitute(pred_value);
+ }
+ DCHECK(pred_value != nullptr);
+ HInstruction* pred_store_value = GetRealHeapValue(pred_value);
if ((singleton_ref != nullptr) &&
!singleton_ref->GetBlock()->Dominates(predecessor)) {
- // singleton_ref is not live in this predecessor. Skip this predecessor since
- // it does not really have the location.
+ // singleton_ref is not live in this predecessor. No need to merge
+ // since singleton_ref is not live at the beginning of this block.
DCHECK_EQ(pred_value, kUnknownHeapValue);
from_all_predecessors = false;
- continue;
+ break;
}
if (merged_value == nullptr) {
// First seen heap value.
+ DCHECK(pred_value != nullptr);
merged_value = pred_value;
} else if (pred_value != merged_value) {
// There are conflicting values.
merged_value = kUnknownHeapValue;
+ // We may still be able to merge store values.
+ }
+
+ // Conflicting stores may be storing the same value. We do another merge
+ // of real stored values.
+ if (merged_store_value == nullptr) {
+ // First seen store value.
+ DCHECK(pred_store_value != nullptr);
+ merged_store_value = pred_store_value;
+ } else if (pred_store_value != merged_store_value) {
+ // There are conflicting store values.
+ merged_store_value = kUnknownHeapValue;
+ // There must be conflicting stores also.
+ DCHECK_EQ(merged_value, kUnknownHeapValue);
+ // No need to merge anymore.
break;
}
}
- if (ref_info->IsSingleton()) {
- if (ref_info->IsSingletonAndNonRemovable() ||
- (merged_value == kUnknownHeapValue &&
- !block->IsSingleReturnOrReturnVoidAllowingPhis())) {
- // The heap value may be needed after method return or deoptimization,
- // or there are conflicting heap values from different predecessors and
- // this block is not a single return,
- // keep the last store in each predecessor since future loads may not
- // be eliminated.
+ if (merged_value == nullptr) {
+ DCHECK(!from_all_predecessors);
+ DCHECK(singleton_ref != nullptr);
+ }
+ if (from_all_predecessors) {
+ if (ref_info->IsSingletonAndRemovable() &&
+ block->IsSingleReturnOrReturnVoidAllowingPhis()) {
+ // Values in the singleton are not needed anymore.
+ } else if (!IsStore(merged_value)) {
+ // We don't track merged value as a store anymore. We have to
+ // hold the stores in predecessors live here.
for (HBasicBlock* predecessor : predecessors) {
ScopedArenaVector<HInstruction*>& pred_values =
heap_values_for_[predecessor->GetBlockId()];
@@ -346,18 +470,33 @@ class LSEVisitor : public HGraphDelegateVisitor {
}
}
} else {
- // Currenctly we don't eliminate stores to non-singletons.
+ DCHECK(singleton_ref != nullptr);
+ // singleton_ref is non-existing at the beginning of the block. There is
+ // no need to keep the stores.
}
- if ((merged_value == nullptr) || !from_all_predecessors) {
+ if (!from_all_predecessors) {
DCHECK(singleton_ref != nullptr);
DCHECK((singleton_ref->GetBlock() == block) ||
- !singleton_ref->GetBlock()->Dominates(block));
+ !singleton_ref->GetBlock()->Dominates(block))
+ << "method: " << GetGraph()->GetMethodName();
// singleton_ref is not defined before block or defined only in some of its
// predecessors, so block doesn't really have the location at its entry.
heap_values[i] = kUnknownHeapValue;
- } else {
+ } else if (predecessors.size() == 1) {
+ // Inherit heap value from the single predecessor.
+ DCHECK_EQ(heap_values_for_[predecessors[0]->GetBlockId()][i], merged_value);
heap_values[i] = merged_value;
+ } else {
+ DCHECK(merged_value == kUnknownHeapValue ||
+ merged_value == kDefaultHeapValue ||
+ merged_value->GetBlock()->Dominates(block));
+ if (merged_value != kUnknownHeapValue) {
+ heap_values[i] = merged_value;
+ } else {
+ // Stores in different predecessors may be storing the same value.
+ heap_values[i] = merged_store_value;
+ }
}
}
}
@@ -423,23 +562,12 @@ class LSEVisitor : public HGraphDelegateVisitor {
heap_values[idx] = constant;
return;
}
- if (heap_value != kUnknownHeapValue) {
- if (heap_value->IsInstanceFieldSet() || heap_value->IsArraySet()) {
- HInstruction* store = heap_value;
- // This load must be from a singleton since it's from the same
- // field/element that a "removed" store puts the value. That store
- // must be to a singleton's field/element.
- DCHECK(ref_info->IsSingleton());
- // Get the real heap value of the store.
- heap_value = heap_value->IsInstanceFieldSet() ? store->InputAt(1) : store->InputAt(2);
- // heap_value may already have a substitute.
- heap_value = FindSubstitute(heap_value);
- }
- }
+ heap_value = GetRealHeapValue(heap_value);
if (heap_value == kUnknownHeapValue) {
// Load isn't eliminated. Put the load as the value into the HeapLocation.
// This acts like GVN but with better aliasing analysis.
heap_values[idx] = instruction;
+ KeepStoresIfAliasedToLocation(heap_values, idx);
} else {
if (DataType::Kind(heap_value->GetType()) != DataType::Kind(instruction->GetType())) {
// The only situation where the same heap location has different type is when
@@ -452,6 +580,10 @@ class LSEVisitor : public HGraphDelegateVisitor {
DCHECK(heap_value->IsArrayGet()) << heap_value->DebugName();
DCHECK(instruction->IsArrayGet()) << instruction->DebugName();
}
+ // Load isn't eliminated. Put the load as the value into the HeapLocation.
+ // This acts like GVN but with better aliasing analysis.
+ heap_values[idx] = instruction;
+ KeepStoresIfAliasedToLocation(heap_values, idx);
return;
}
AddRemovedLoad(instruction, heap_value);
@@ -460,12 +592,21 @@ class LSEVisitor : public HGraphDelegateVisitor {
}
bool Equal(HInstruction* heap_value, HInstruction* value) {
+ DCHECK(!IsStore(value)) << value->DebugName();
+ if (heap_value == kUnknownHeapValue) {
+ // Don't compare kUnknownHeapValue with other values.
+ return false;
+ }
if (heap_value == value) {
return true;
}
if (heap_value == kDefaultHeapValue && GetDefaultValue(value->GetType()) == value) {
return true;
}
+ HInstruction* real_heap_value = GetRealHeapValue(heap_value);
+ if (real_heap_value != heap_value) {
+ return Equal(real_heap_value, value);
+ }
return false;
}
@@ -476,6 +617,7 @@ class LSEVisitor : public HGraphDelegateVisitor {
size_t vector_length,
int16_t declaring_class_def_index,
HInstruction* value) {
+ DCHECK(!IsStore(value)) << value->DebugName();
// value may already have a substitute.
value = FindSubstitute(value);
HInstruction* original_ref = heap_location_collector_.HuntForOriginalReference(ref);
@@ -486,59 +628,47 @@ class LSEVisitor : public HGraphDelegateVisitor {
ScopedArenaVector<HInstruction*>& heap_values =
heap_values_for_[instruction->GetBlock()->GetBlockId()];
HInstruction* heap_value = heap_values[idx];
- bool same_value = false;
bool possibly_redundant = false;
+
if (Equal(heap_value, value)) {
// Store into the heap location with the same value.
- same_value = true;
- } else if (index != nullptr &&
- heap_location_collector_.GetHeapLocation(idx)->HasAliasedLocations()) {
- // For array element, don't eliminate stores if the location can be aliased
- // (due to either ref or index aliasing).
- } else if (ref_info->IsSingleton()) {
- // Store into a field/element of a singleton. The value cannot be killed due to
- // aliasing/invocation. It can be redundant since future loads can
- // directly get the value set by this instruction. The value can still be killed due to
- // merging or loop side effects. Stores whose values are killed due to merging/loop side
- // effects later will be removed from possibly_removed_stores_ when that is detected.
- // Stores whose values may be needed after method return or deoptimization
- // are also removed from possibly_removed_stores_ when that is detected.
- possibly_redundant = true;
+ // This store can be eliminated right away.
+ instruction->GetBlock()->RemoveInstruction(instruction);
+ return;
+ } else {
HLoopInformation* loop_info = instruction->GetBlock()->GetLoopInformation();
- if (loop_info != nullptr) {
- // instruction is a store in the loop so the loop must does write.
+ if (loop_info == nullptr) {
+ // Store is not in a loop. We try to precisely track the heap value by
+ // the store.
+ possibly_redundant = true;
+ } else if (!loop_info->IsIrreducible()) {
+ // instruction is a store in the loop so the loop must do write.
DCHECK(side_effects_.GetLoopEffects(loop_info->GetHeader()).DoesAnyWrite());
-
- if (loop_info->IsDefinedOutOfTheLoop(original_ref)) {
- DCHECK(original_ref->GetBlock()->Dominates(loop_info->GetPreHeader()));
- // Keep the store since its value may be needed at the loop header.
- possibly_redundant = false;
- } else {
- // The singleton is created inside the loop. Value stored to it isn't needed at
+ if (ref_info->IsSingleton() && !loop_info->IsDefinedOutOfTheLoop(original_ref)) {
+ // original_ref is created inside the loop. Value stored to it isn't needed at
// the loop header. This is true for outer loops also.
+ possibly_redundant = true;
+ } else {
+ // Keep the store since its value may be needed at the loop header.
}
+ } else {
+ // Keep the store inside irreducible loops.
}
}
- if (same_value || possibly_redundant) {
+ if (possibly_redundant) {
possibly_removed_stores_.push_back(instruction);
}
- if (!same_value) {
- if (possibly_redundant) {
- DCHECK(instruction->IsInstanceFieldSet() || instruction->IsArraySet());
- // Put the store as the heap value. If the value is loaded from heap
- // by a load later, this store isn't really redundant.
- heap_values[idx] = instruction;
- } else {
- heap_values[idx] = value;
- }
- }
+ // Put the store as the heap value. If the value is loaded or needed after
+ // return/deoptimization later, this store isn't really redundant.
+ heap_values[idx] = instruction;
+
// This store may kill values in other heap locations due to aliasing.
for (size_t i = 0; i < heap_values.size(); i++) {
if (i == idx) {
continue;
}
- if (heap_values[i] == value) {
+ if (Equal(heap_values[i], value)) {
// Same value should be kept even if aliasing happens.
continue;
}
@@ -547,7 +677,9 @@ class LSEVisitor : public HGraphDelegateVisitor {
continue;
}
if (heap_location_collector_.MayAlias(i, idx)) {
- // Kill heap locations that may alias.
+ // Kill heap locations that may alias and as a result if the heap value
+ // is a store, the store needs to be kept.
+ KeepIfIsStore(heap_values[i]);
heap_values[i] = kUnknownHeapValue;
}
}
@@ -633,24 +765,35 @@ class LSEVisitor : public HGraphDelegateVisitor {
const ScopedArenaVector<HInstruction*>& heap_values =
heap_values_for_[instruction->GetBlock()->GetBlockId()];
for (HInstruction* heap_value : heap_values) {
- // Filter out fake instructions before checking instruction kind below.
- if (heap_value == kUnknownHeapValue || heap_value == kDefaultHeapValue) {
- continue;
- }
// A store is kept as the heap value for possibly removed stores.
- if (heap_value->IsInstanceFieldSet() || heap_value->IsArraySet()) {
- // Check whether the reference for a store is used by an environment local of
- // HDeoptimize.
+ // That value stored is generally observeable after deoptimization, except
+ // for singletons that don't escape after deoptimization.
+ if (IsStore(heap_value)) {
+ if (heap_value->IsStaticFieldSet()) {
+ KeepIfIsStore(heap_value);
+ continue;
+ }
HInstruction* reference = heap_value->InputAt(0);
- DCHECK(heap_location_collector_.FindReferenceInfoOf(reference)->IsSingleton());
- for (const HUseListNode<HEnvironment*>& use : reference->GetEnvUses()) {
- HEnvironment* user = use.GetUser();
- if (user->GetHolder() == instruction) {
- // The singleton for the store is visible at this deoptimization
- // point. Need to keep the store so that the heap value is
- // seen by the interpreter.
+ if (heap_location_collector_.FindReferenceInfoOf(reference)->IsSingleton()) {
+ if (reference->IsNewInstance() && reference->AsNewInstance()->IsFinalizable()) {
+ // Finalizable objects alway escape.
KeepIfIsStore(heap_value);
+ continue;
+ }
+ // Check whether the reference for a store is used by an environment local of
+ // HDeoptimize. If not, the singleton is not observed after
+ // deoptimizion.
+ for (const HUseListNode<HEnvironment*>& use : reference->GetEnvUses()) {
+ HEnvironment* user = use.GetUser();
+ if (user->GetHolder() == instruction) {
+ // The singleton for the store is visible at this deoptimization
+ // point. Need to keep the store so that the heap value is
+ // seen by the interpreter.
+ KeepIfIsStore(heap_value);
+ }
}
+ } else {
+ KeepIfIsStore(heap_value);
}
}
}
@@ -691,9 +834,12 @@ class LSEVisitor : public HGraphDelegateVisitor {
// Singleton references cannot be seen by the callee.
} else {
if (side_effects.DoesAnyRead()) {
+ // Invocation may read the heap value.
KeepIfIsStore(heap_values[i]);
}
if (side_effects.DoesAnyWrite()) {
+ // Keep the store since it's not used to track the heap value anymore.
+ KeepIfIsStore(heap_values[i]);
heap_values[i] = kUnknownHeapValue;
}
}
@@ -758,7 +904,7 @@ class LSEVisitor : public HGraphDelegateVisitor {
return;
}
if (ref_info->IsSingletonAndRemovable()) {
- singleton_new_arrays_.push_back(new_array);
+ singleton_new_instances_.push_back(new_array);
}
ScopedArenaVector<HInstruction*>& heap_values =
heap_values_for_[new_array->GetBlock()->GetBlockId()];
@@ -791,7 +937,6 @@ class LSEVisitor : public HGraphDelegateVisitor {
ScopedArenaVector<HInstruction*> possibly_removed_stores_;
ScopedArenaVector<HInstruction*> singleton_new_instances_;
- ScopedArenaVector<HInstruction*> singleton_new_arrays_;
DISALLOW_COPY_AND_ASSIGN(LSEVisitor);
};
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 47ef194574..b3f23a0dcd 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -1410,7 +1410,7 @@ void OptimizingCompiler::GenerateJitDebugInfo(ArtMethod* method, debug::MethodDe
GetCompilerDriver()->GetInstructionSetFeatures(),
mini_debug_info,
ArrayRef<const debug::MethodDebugInfo>(&info, 1));
- MutexLock mu(Thread::Current(), g_jit_debug_mutex);
+ MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
JITCodeEntry* entry = CreateJITCodeEntry(elf_file);
IncrementJITCodeEntryRefcount(entry, info.code_address);
diff --git a/compiler/utils/atomic_dex_ref_map-inl.h b/compiler/utils/atomic_dex_ref_map-inl.h
index 203e484fb7..7023b9a0e8 100644
--- a/compiler/utils/atomic_dex_ref_map-inl.h
+++ b/compiler/utils/atomic_dex_ref_map-inl.h
@@ -75,6 +75,18 @@ inline bool AtomicDexRefMap<DexFileReferenceType, Value>::Get(const DexFileRefer
}
template <typename DexFileReferenceType, typename Value>
+inline bool AtomicDexRefMap<DexFileReferenceType, Value>::Remove(const DexFileReferenceType& ref,
+ Value* out) {
+ ElementArray* const array = GetArray(ref.dex_file);
+ if (array == nullptr) {
+ return false;
+ }
+ *out = (*array)[ref.index].LoadRelaxed();
+ (*array)[ref.index].StoreSequentiallyConsistent(nullptr);
+ return true;
+}
+
+template <typename DexFileReferenceType, typename Value>
inline void AtomicDexRefMap<DexFileReferenceType, Value>::AddDexFile(const DexFile* dex_file) {
arrays_.Put(dex_file, std::move(ElementArray(NumberOfDexIndices(dex_file))));
}
diff --git a/compiler/utils/atomic_dex_ref_map.h b/compiler/utils/atomic_dex_ref_map.h
index 9ff506d6a4..3474e16b8d 100644
--- a/compiler/utils/atomic_dex_ref_map.h
+++ b/compiler/utils/atomic_dex_ref_map.h
@@ -45,6 +45,9 @@ class AtomicDexRefMap {
// Retreive an item, returns false if the dex file is not added.
bool Get(const DexFileReferenceType& ref, Value* out) const;
+ // Remove an item and return the existing value. Returns false if the dex file is not added.
+ bool Remove(const DexFileReferenceType& ref, Value* out);
+
// Dex files must be added before method references belonging to them can be used as keys. Not
// thread safe.
void AddDexFile(const DexFile* dex_file);
diff --git a/dex2oat/linker/oat_writer.cc b/dex2oat/linker/oat_writer.cc
index d245cd1c63..0953e0813f 100644
--- a/dex2oat/linker/oat_writer.cc
+++ b/dex2oat/linker/oat_writer.cc
@@ -4364,12 +4364,15 @@ const uint8_t* OatWriter::LookupBootImageClassTableSlot(const DexFile& dex_file,
debug::DebugInfo OatWriter::GetDebugInfo() const {
debug::DebugInfo debug_info{};
debug_info.compiled_methods = ArrayRef<const debug::MethodDebugInfo>(method_info_);
- if (dex_files_ != nullptr) {
+ if (VdexWillContainDexFiles()) {
DCHECK_EQ(dex_files_->size(), oat_dex_files_.size());
for (size_t i = 0, size = dex_files_->size(); i != size; ++i) {
const DexFile* dex_file = (*dex_files_)[i];
const OatDexFile& oat_dex_file = oat_dex_files_[i];
- debug_info.dex_files.emplace(oat_dex_file.dex_file_offset_, dex_file);
+ uint32_t dex_file_offset = oat_dex_file.dex_file_offset_;
+ if (dex_file_offset != 0) {
+ debug_info.dex_files.emplace(dex_file_offset, dex_file);
+ }
}
}
return debug_info;
diff --git a/dex2oat/linker/oat_writer.h b/dex2oat/linker/oat_writer.h
index dfcaafc336..7edb032dd0 100644
--- a/dex2oat/linker/oat_writer.h
+++ b/dex2oat/linker/oat_writer.h
@@ -340,6 +340,10 @@ class OatWriter {
bool MayHaveCompiledMethods() const;
+ bool VdexWillContainDexFiles() const {
+ return dex_files_ != nullptr && !only_contains_uncompressed_zip_entries_;
+ }
+
// Find the address of the GcRoot<String> in the InternTable for a boot image string.
const uint8_t* LookupBootImageInternTableSlot(const DexFile& dex_file,
dex::StringIndex string_idx);
diff --git a/dexdump/dexdump.cc b/dexdump/dexdump.cc
index 01b28b55cf..8778b129c5 100644
--- a/dexdump/dexdump.cc
+++ b/dexdump/dexdump.cc
@@ -1176,14 +1176,20 @@ static void dumpBytecodes(const DexFile* pDexFile, u4 idx,
// Iterate over all instructions.
CodeItemDataAccessor accessor(*pDexFile, pCode);
+ const u4 maxPc = accessor.InsnsSizeInCodeUnits();
for (const DexInstructionPcPair& pair : accessor) {
+ const u4 dexPc = pair.DexPc();
+ if (dexPc >= maxPc) {
+ LOG(WARNING) << "GLITCH: run-away instruction at idx=0x" << std::hex << dexPc;
+ break;
+ }
const Instruction* instruction = &pair.Inst();
const u4 insnWidth = instruction->SizeInCodeUnits();
if (insnWidth == 0) {
- LOG(WARNING) << "GLITCH: zero-width instruction at idx=0x" << std::hex << pair.DexPc();
+ LOG(WARNING) << "GLITCH: zero-width instruction at idx=0x" << std::hex << dexPc;
break;
}
- dumpInstruction(pDexFile, pCode, codeOffset, pair.DexPc(), insnWidth, instruction);
+ dumpInstruction(pDexFile, pCode, codeOffset, dexPc, insnWidth, instruction);
} // for
}
diff --git a/dexlayout/compact_dex_writer.cc b/dexlayout/compact_dex_writer.cc
index 08438c4f4a..ca13f7588b 100644
--- a/dexlayout/compact_dex_writer.cc
+++ b/dexlayout/compact_dex_writer.cc
@@ -162,11 +162,18 @@ void CompactDexWriter::WriteCodeItem(Stream* stream,
static constexpr size_t kPayloadInstructionRequiredAlignment = 4;
const uint32_t current_code_item_start = stream->Tell() + preheader_bytes;
- if (!IsAlignedParam(current_code_item_start, kPayloadInstructionRequiredAlignment)) {
+ if (!IsAlignedParam(current_code_item_start, kPayloadInstructionRequiredAlignment) ||
+ kIsDebugBuild) {
// If the preheader is going to make the code unaligned, consider adding 2 bytes of padding
// before if required.
- for (const DexInstructionPcPair& instruction : code_item->Instructions()) {
- const Instruction::Code opcode = instruction->Opcode();
+ IterationRange<DexInstructionIterator> instructions = code_item->Instructions();
+ SafeDexInstructionIterator it(instructions.begin(), instructions.end());
+ for (; !it.IsErrorState() && it < instructions.end(); ++it) {
+ // In case the instruction goes past the end of the code item, make sure to not process it.
+ if (std::next(it).IsErrorState()) {
+ break;
+ }
+ const Instruction::Code opcode = it->Opcode();
// Payload instructions possibly require special alignment for their data.
if (opcode == Instruction::FILL_ARRAY_DATA ||
opcode == Instruction::PACKED_SWITCH ||
@@ -460,6 +467,11 @@ void CompactDexWriter::Write(DexContainer* output) {
// Rewrite the header with the calculated checksum.
WriteHeader(main_stream);
}
+
+ // Clear the dedupe to prevent interdex code item deduping. This does not currently work well with
+ // dex2oat's class unloading. The issue is that verification encounters quickened opcodes after
+ // the first dex gets unloaded.
+ code_item_dedupe_->Clear();
}
std::unique_ptr<DexContainer> CompactDexWriter::CreateDexContainer() const {
diff --git a/dexlayout/compact_dex_writer.h b/dexlayout/compact_dex_writer.h
index 24d0fbf61d..ea9f7d13db 100644
--- a/dexlayout/compact_dex_writer.h
+++ b/dexlayout/compact_dex_writer.h
@@ -44,6 +44,11 @@ class CompactDexWriter : public DexWriter {
// Returns the offset of the deduplicated data or kDidNotDedupe did deduplication did not occur.
uint32_t Dedupe(uint32_t data_start, uint32_t data_end, uint32_t item_offset);
+ // Clear dedupe state to prevent deduplication against existing items in the future.
+ void Clear() {
+ dedupe_map_.clear();
+ }
+
private:
class HashedMemoryRange {
public:
diff --git a/dexlayout/dexlayout.h b/dexlayout/dexlayout.h
index d2f9cb9ce5..5635271dc1 100644
--- a/dexlayout/dexlayout.h
+++ b/dexlayout/dexlayout.h
@@ -66,8 +66,7 @@ class Options {
bool visualize_pattern_ = false;
bool update_checksum_ = false;
CompactDexLevel compact_dex_level_ = CompactDexLevel::kCompactDexLevelNone;
- // Disabled until dex2oat properly handles quickening of deduped code items.
- bool dedupe_code_items_ = false;
+ bool dedupe_code_items_ = true;
OutputFormat output_format_ = kOutputPlain;
const char* output_dex_directory_ = nullptr;
const char* output_file_name_ = nullptr;
diff --git a/dexlayout/dexlayout_test.cc b/dexlayout/dexlayout_test.cc
index be272fcf2c..bebdc202e7 100644
--- a/dexlayout/dexlayout_test.cc
+++ b/dexlayout/dexlayout_test.cc
@@ -221,6 +221,12 @@ static const char kDuplicateCodeItemInputDex[] =
"AHAAAAACAAAAAwAAAIwAAAADAAAAAQAAAJgAAAAFAAAABAAAAKQAAAAGAAAAAQAAAMQAAAABIAAA"
"AwAAAOQAAAACIAAABwAAACQBAAADIAAAAwAAAFYBAAAAIAAAAQAAAGUBAAAAEAAAAQAAAHgBAAA=";
+// Returns the default compact dex option for dexlayout based on kDefaultCompactDexLevel.
+static std::vector<std::string> DefaultCompactDexOption() {
+ return (kDefaultCompactDexLevel == CompactDexLevel::kCompactDexLevelFast) ?
+ std::vector<std::string>{"-x", "fast"} : std::vector<std::string>{"-x", "none"};
+}
+
static void WriteBase64ToFile(const char* base64, File* file) {
// Decode base64.
CHECK(base64 != nullptr);
@@ -289,7 +295,7 @@ class DexLayoutTest : public CommonRuntimeTest {
for (const std::string &dex_file : GetLibCoreDexFileNames()) {
std::vector<std::string> dexlayout_args =
{ "-w", tmp_dir, "-o", tmp_name, dex_file };
- if (!DexLayoutExec(dexlayout_args, error_msg)) {
+ if (!DexLayoutExec(dexlayout_args, error_msg, /*pass_default_cdex_option*/ false)) {
return false;
}
size_t dex_file_last_slash = dex_file.rfind('/');
@@ -304,12 +310,10 @@ class DexLayoutTest : public CommonRuntimeTest {
if (!::art::Exec(diff_exec_argv, error_msg)) {
return false;
}
- std::vector<std::string> rm_zip_exec_argv = { "/bin/rm", tmp_dir + "classes.dex" };
- if (!::art::Exec(rm_zip_exec_argv, error_msg)) {
+ if (!UnlinkFile(tmp_dir + "classes.dex")) {
return false;
}
- std::vector<std::string> rm_out_exec_argv = { "/bin/rm", tmp_dir + dex_file_name };
- if (!::art::Exec(rm_out_exec_argv, error_msg)) {
+ if (!UnlinkFile(tmp_dir + dex_file_name)) {
return false;
}
}
@@ -426,10 +430,7 @@ class DexLayoutTest : public CommonRuntimeTest {
}
// -v makes sure that the layout did not corrupt the dex file.
-
- std::vector<std::string> rm_exec_argv =
- { "/bin/rm", dex_file, profile_file, output_dex };
- if (!::art::Exec(rm_exec_argv, error_msg)) {
+ if (!UnlinkFile(dex_file) || !UnlinkFile(profile_file) || !UnlinkFile(output_dex)) {
return false;
}
return true;
@@ -467,7 +468,7 @@ class DexLayoutTest : public CommonRuntimeTest {
// -v makes sure that the layout did not corrupt the dex file.
std::vector<std::string> dexlayout_args =
{ "-i", "-v", "-w", tmp_dir, "-o", tmp_name, "-p", profile_file, dex_file };
- if (!DexLayoutExec(dexlayout_args, error_msg)) {
+ if (!DexLayoutExec(dexlayout_args, error_msg, /*pass_default_cdex_option*/ false)) {
return false;
}
@@ -479,7 +480,7 @@ class DexLayoutTest : public CommonRuntimeTest {
// -i since the checksum won't match from the first layout.
std::vector<std::string> second_dexlayout_args =
{ "-i", "-v", "-w", tmp_dir, "-o", tmp_name, "-p", profile_file, output_dex };
- if (!DexLayoutExec(second_dexlayout_args, error_msg)) {
+ if (!DexLayoutExec(second_dexlayout_args, error_msg, /*pass_default_cdex_option*/ false)) {
return false;
}
@@ -490,10 +491,11 @@ class DexLayoutTest : public CommonRuntimeTest {
diff_result = false;
}
- std::vector<std::string> rm_exec_argv =
- { "/bin/rm", dex_file, profile_file, output_dex, second_output_dex };
- if (!::art::Exec(rm_exec_argv, error_msg)) {
- return false;
+ std::vector<std::string> test_files = { dex_file, profile_file, output_dex, second_output_dex };
+ for (auto test_file : test_files) {
+ if (!UnlinkFile(test_file)) {
+ return false;
+ }
}
return diff_result;
@@ -512,7 +514,7 @@ class DexLayoutTest : public CommonRuntimeTest {
std::string output_dex = tmp_dir + "classes.dex.new";
std::vector<std::string> dexlayout_args = { "-w", tmp_dir, "-o", "/dev/null", input_dex };
- if (!DexLayoutExec(dexlayout_args, error_msg)) {
+ if (!DexLayoutExec(dexlayout_args, error_msg, /*pass_default_cdex_option*/ false)) {
return false;
}
@@ -522,9 +524,11 @@ class DexLayoutTest : public CommonRuntimeTest {
return false;
}
- std::vector<std::string> rm_exec_argv = { "/bin/rm", input_dex, output_dex };
- if (!::art::Exec(rm_exec_argv, error_msg)) {
- return false;
+ std::vector<std::string> dex_files = { input_dex, output_dex };
+ for (auto dex_file : dex_files) {
+ if (!UnlinkFile(dex_file)) {
+ return false;
+ }
}
return true;
}
@@ -550,17 +554,27 @@ class DexLayoutTest : public CommonRuntimeTest {
return true;
}
- bool DexLayoutExec(const std::vector<std::string>& dexlayout_args, std::string* error_msg) {
+ bool DexLayoutExec(const std::vector<std::string>& dexlayout_args,
+ std::string* error_msg,
+ bool pass_default_cdex_option = true) {
std::vector<std::string> argv;
std::string dexlayout = GetDexLayoutPath();
CHECK(OS::FileExists(dexlayout.c_str())) << dexlayout << " should be a valid file path";
argv.push_back(dexlayout);
+ if (pass_default_cdex_option) {
+ std::vector<std::string> cdex_level = DefaultCompactDexOption();
+ argv.insert(argv.end(), cdex_level.begin(), cdex_level.end());
+ }
argv.insert(argv.end(), dexlayout_args.begin(), dexlayout_args.end());
return ::art::Exec(argv, error_msg);
}
+
+ bool UnlinkFile(const std::string& file_path) {
+ return unix_file::FdFile(file_path, 0, false).Unlink();
+ }
};
@@ -730,11 +744,29 @@ TEST_F(DexLayoutTest, CodeItemOverrun) {
CHECK(mutated_successfully)
<< "Failed to find candidate code item with only one code unit in last instruction.";
});
- std::vector<std::string> dexlayout_args = { "-i", "-o", "/dev/null", temp_dex.GetFilename() };
+
+ std::string error_msg;
+
+ ScratchFile tmp_file;
+ const std::string& tmp_name = tmp_file.GetFilename();
+ size_t tmp_last_slash = tmp_name.rfind('/');
+ std::string tmp_dir = tmp_name.substr(0, tmp_last_slash + 1);
+ ScratchFile profile_file;
+
+ std::vector<std::string> dexlayout_args =
+ { "-i",
+ "-v",
+ "-w", tmp_dir,
+ "-o", tmp_name,
+ "-p", profile_file.GetFilename(),
+ temp_dex.GetFilename()
+ };
+ // -v makes sure that the layout did not corrupt the dex file.
ASSERT_TRUE(DexLayoutExec(&temp_dex,
/*dex_filename*/ nullptr,
- nullptr /* profile_file */,
+ &profile_file,
dexlayout_args));
+ ASSERT_TRUE(UnlinkFile(temp_dex.GetFilename() + ".new"));
}
// Test that link data is written out (or at least the header is updated).
@@ -772,11 +804,7 @@ TEST_F(DexLayoutTest, LinkData) {
/*dex_filename*/ nullptr,
&profile_file,
dexlayout_args));
-
- std::string output_dex = temp_dex.GetFilename() + ".new";
- std::vector<std::string> rm_exec_argv =
- { "/bin/rm", output_dex };
- ASSERT_TRUE(::art::Exec(rm_exec_argv, &error_msg));
+ ASSERT_TRUE(UnlinkFile(temp_dex.GetFilename() + ".new"));
}
TEST_F(DexLayoutTest, ClassFilter) {
diff --git a/openjdkjvmti/OpenjdkJvmTi.cc b/openjdkjvmti/OpenjdkJvmTi.cc
index 027635bbb5..a0c7f40b6f 100644
--- a/openjdkjvmti/OpenjdkJvmTi.cc
+++ b/openjdkjvmti/OpenjdkJvmTi.cc
@@ -1535,6 +1535,7 @@ extern "C" bool ArtPlugin_Initialize() {
MethodUtil::Register(&gEventHandler);
SearchUtil::Register();
HeapUtil::Register();
+ Transformer::Setup();
{
// Make sure we can deopt anything we need to.
diff --git a/openjdkjvmti/fixed_up_dex_file.cc b/openjdkjvmti/fixed_up_dex_file.cc
index a8d2a37fa6..e9522b3984 100644
--- a/openjdkjvmti/fixed_up_dex_file.cc
+++ b/openjdkjvmti/fixed_up_dex_file.cc
@@ -45,14 +45,13 @@
namespace openjdkjvmti {
-static void RecomputeDexChecksum(art::DexFile* dex_file)
- REQUIRES_SHARED(art::Locks::mutator_lock_) {
+static void RecomputeDexChecksum(art::DexFile* dex_file) {
reinterpret_cast<art::DexFile::Header*>(const_cast<uint8_t*>(dex_file->Begin()))->checksum_ =
dex_file->CalculateChecksum();
}
-static void DoDexUnquicken(const art::DexFile& new_dex_file, const art::DexFile& original_dex_file)
- REQUIRES_SHARED(art::Locks::mutator_lock_) {
+static void DoDexUnquicken(const art::DexFile& new_dex_file,
+ const art::DexFile& original_dex_file) {
const art::OatDexFile* oat_dex = original_dex_file.GetOatDexFile();
if (oat_dex == nullptr) {
return;
diff --git a/openjdkjvmti/fixed_up_dex_file.h b/openjdkjvmti/fixed_up_dex_file.h
index 7f05a2930a..594e8a7358 100644
--- a/openjdkjvmti/fixed_up_dex_file.h
+++ b/openjdkjvmti/fixed_up_dex_file.h
@@ -50,8 +50,7 @@ namespace openjdkjvmti {
class FixedUpDexFile {
public:
static std::unique_ptr<FixedUpDexFile> Create(const art::DexFile& original,
- const char* descriptor)
- REQUIRES_SHARED(art::Locks::mutator_lock_);
+ const char* descriptor);
const art::DexFile& GetDexFile() {
return *dex_file_;
diff --git a/openjdkjvmti/ti_class_definition.cc b/openjdkjvmti/ti_class_definition.cc
index 7f2f80009a..1b641cd905 100644
--- a/openjdkjvmti/ti_class_definition.cc
+++ b/openjdkjvmti/ti_class_definition.cc
@@ -45,6 +45,31 @@
namespace openjdkjvmti {
+void ArtClassDefinition::InitializeMemory() const {
+ DCHECK(art::MemMap::kCanReplaceMapping);
+ VLOG(signals) << "Initializing de-quickened memory for dex file of " << name_;
+ CHECK(dex_data_mmap_ != nullptr);
+ CHECK(temp_mmap_ != nullptr);
+ CHECK_EQ(dex_data_mmap_->GetProtect(), PROT_NONE);
+ CHECK_EQ(temp_mmap_->GetProtect(), PROT_READ | PROT_WRITE);
+
+ std::string desc = std::string("L") + name_ + ";";
+ std::unique_ptr<FixedUpDexFile>
+ fixed_dex_file(FixedUpDexFile::Create(*initial_dex_file_unquickened_, desc.c_str()));
+ CHECK(fixed_dex_file.get() != nullptr);
+ CHECK_LE(fixed_dex_file->Size(), temp_mmap_->Size());
+ CHECK_EQ(temp_mmap_->Size(), dex_data_mmap_->Size());
+ // Copy the data to the temp mmap.
+ memcpy(temp_mmap_->Begin(), fixed_dex_file->Begin(), fixed_dex_file->Size());
+
+ // Move the mmap atomically.
+ art::MemMap* source = temp_mmap_.release();
+ std::string error;
+ CHECK(dex_data_mmap_->ReplaceWith(&source, &error)) << "Failed to replace mmap for "
+ << name_ << " because " << error;
+ CHECK(dex_data_mmap_->Protect(PROT_READ));
+}
+
bool ArtClassDefinition::IsModified() const {
// RedefineClasses calls always are 'modified' since they need to change the current_dex_file of
// the class.
@@ -58,6 +83,27 @@ bool ArtClassDefinition::IsModified() const {
return false;
}
+ // The dex_data_ was never touched by the agents.
+ if (dex_data_mmap_ != nullptr && dex_data_mmap_->GetProtect() == PROT_NONE) {
+ if (current_dex_file_.data() == dex_data_mmap_->Begin()) {
+ // the dex_data_ looks like it changed (not equal to current_dex_file_) but we never
+ // initialized the dex_data_mmap_. This means the new_dex_data was filled in without looking
+ // at the initial dex_data_.
+ return true;
+ } else if (dex_data_.data() == dex_data_mmap_->Begin()) {
+ // The dex file used to have modifications but they were not added again.
+ return true;
+ } else {
+ // It's not clear what happened. It's possible that the agent got the current dex file data
+ // from some other source so we need to initialize everything to see if it is the same.
+ VLOG(signals) << "Lazy dex file for " << name_ << " was never touched but the dex_data_ is "
+ << "changed! Need to initialize the memory to see if anything changed";
+ InitializeMemory();
+ }
+ }
+
+ // We can definitely read current_dex_file_ and dex_file_ without causing page faults.
+
// Check if the dex file we want to set is the same as the current one.
// Unfortunately we need to do this check even if no modifications have been done since it could
// be that agents were removed in the mean-time so we still have a different dex file. The dex
@@ -194,6 +240,53 @@ void ArtClassDefinition::InitWithDex(GetOriginalDexFile get_original,
const art::DexFile* quick_dex) {
art::Thread* self = art::Thread::Current();
DCHECK(quick_dex != nullptr);
+ if (art::MemMap::kCanReplaceMapping && kEnableOnDemandDexDequicken) {
+ size_t dequick_size = quick_dex->GetDequickenedSize();
+ std::string mmap_name("anon-mmap-for-redefine: ");
+ mmap_name += name_;
+ std::string error;
+ dex_data_mmap_.reset(art::MemMap::MapAnonymous(mmap_name.c_str(),
+ nullptr,
+ dequick_size,
+ PROT_NONE,
+ /*low_4gb*/ false,
+ /*reuse*/ false,
+ &error));
+ mmap_name += "-TEMP";
+ temp_mmap_.reset(art::MemMap::MapAnonymous(mmap_name.c_str(),
+ nullptr,
+ dequick_size,
+ PROT_READ | PROT_WRITE,
+ /*low_4gb*/ false,
+ /*reuse*/ false,
+ &error));
+ if (UNLIKELY(dex_data_mmap_ != nullptr && temp_mmap_ != nullptr)) {
+ // Need to save the initial dexfile so we don't need to search for it in the fault-handler.
+ initial_dex_file_unquickened_ = quick_dex;
+ dex_data_ = art::ArrayRef<const unsigned char>(dex_data_mmap_->Begin(),
+ dex_data_mmap_->Size());
+ if (from_class_ext_) {
+ // We got initial from class_ext so the current one must have undergone redefinition so no
+ // cdex or quickening stuff.
+ // We can only do this if it's not a first load.
+ DCHECK(klass_ != nullptr);
+ const art::DexFile& cur_dex = self->DecodeJObject(klass_)->AsClass()->GetDexFile();
+ current_dex_file_ = art::ArrayRef<const unsigned char>(cur_dex.Begin(), cur_dex.Size());
+ } else {
+ // This class hasn't been redefined before. The dequickened current data is the same as the
+ // dex_data_mmap_ when it's filled it. We don't need to copy anything because the mmap will
+ // not be cleared until after everything is done.
+ current_dex_file_ = art::ArrayRef<const unsigned char>(dex_data_mmap_->Begin(),
+ dequick_size);
+ }
+ return;
+ }
+ }
+ dex_data_mmap_.reset(nullptr);
+ temp_mmap_.reset(nullptr);
+ // Failed to mmap a large enough area (or on-demand dequickening was disabled). This is
+ // unfortunate. Since currently the size is just a guess though we might as well try to do it
+ // manually.
get_original(/*out*/&dex_data_memory_);
dex_data_ = art::ArrayRef<const unsigned char>(dex_data_memory_);
if (from_class_ext_) {
diff --git a/openjdkjvmti/ti_class_definition.h b/openjdkjvmti/ti_class_definition.h
index 00847394e7..31c3611e72 100644
--- a/openjdkjvmti/ti_class_definition.h
+++ b/openjdkjvmti/ti_class_definition.h
@@ -32,9 +32,14 @@
#ifndef ART_OPENJDKJVMTI_TI_CLASS_DEFINITION_H_
#define ART_OPENJDKJVMTI_TI_CLASS_DEFINITION_H_
+#include <stddef.h>
+#include <sys/mman.h>
+#include <sys/types.h>
+
#include "art_jvmti.h"
#include "base/array_ref.h"
+#include "mem_map.h"
namespace openjdkjvmti {
@@ -43,13 +48,20 @@ namespace openjdkjvmti {
// redefinition/retransformation function that created it.
class ArtClassDefinition {
public:
+ // If we support doing a on-demand dex-dequickening using signal handlers.
+ static constexpr bool kEnableOnDemandDexDequicken = true;
+
ArtClassDefinition()
: klass_(nullptr),
loader_(nullptr),
name_(),
protection_domain_(nullptr),
+ dex_data_mmap_(nullptr),
+ temp_mmap_(nullptr),
dex_data_memory_(),
+ initial_dex_file_unquickened_(nullptr),
dex_data_(),
+ current_dex_memory_(),
current_dex_file_(),
redefined_(false),
from_class_ext_(false),
@@ -87,6 +99,12 @@ class ArtClassDefinition {
}
}
+ bool ContainsAddress(uintptr_t ptr) const {
+ return dex_data_mmap_ != nullptr &&
+ reinterpret_cast<uintptr_t>(dex_data_mmap_->Begin()) <= ptr &&
+ reinterpret_cast<uintptr_t>(dex_data_mmap_->End()) > ptr;
+ }
+
bool IsModified() const REQUIRES_SHARED(art::Locks::mutator_lock_);
bool IsInitialized() const {
@@ -108,6 +126,13 @@ class ArtClassDefinition {
return name_;
}
+ bool IsLazyDefinition() const {
+ DCHECK(IsInitialized());
+ return dex_data_mmap_ != nullptr &&
+ dex_data_.data() == dex_data_mmap_->Begin() &&
+ dex_data_mmap_->GetProtect() == PROT_NONE;
+ }
+
jobject GetProtectionDomain() const {
DCHECK(IsInitialized());
return protection_domain_;
@@ -118,6 +143,8 @@ class ArtClassDefinition {
return dex_data_;
}
+ void InitializeMemory() const;
+
private:
jvmtiError InitCommon(art::Thread* self, jclass klass);
@@ -130,9 +157,17 @@ class ArtClassDefinition {
std::string name_;
jobject protection_domain_;
+ // Mmap that will be filled with the original-dex-file lazily if it needs to be de-quickened or
+ // de-compact-dex'd
+ mutable std::unique_ptr<art::MemMap> dex_data_mmap_;
+ // This is a temporary mmap we will use to be able to fill the dex file data atomically.
+ mutable std::unique_ptr<art::MemMap> temp_mmap_;
+
// A unique_ptr to the current dex_data if it needs to be cleaned up.
std::vector<unsigned char> dex_data_memory_;
+ const art::DexFile* initial_dex_file_unquickened_;
+
// A ref to the current dex data. This is either dex_data_memory_, or current_dex_file_. This is
// what the dex file will be turned into.
art::ArrayRef<const unsigned char> dex_data_;
diff --git a/openjdkjvmti/transform.cc b/openjdkjvmti/transform.cc
index af838d62b9..dc9f69a96a 100644
--- a/openjdkjvmti/transform.cc
+++ b/openjdkjvmti/transform.cc
@@ -29,6 +29,9 @@
* questions.
*/
+#include <stddef.h>
+#include <sys/types.h>
+
#include <unordered_map>
#include <unordered_set>
@@ -41,6 +44,7 @@
#include "dex/dex_file_types.h"
#include "dex/utf.h"
#include "events-inl.h"
+#include "fault_handler.h"
#include "gc_root-inl.h"
#include "globals.h"
#include "jni_env_ext-inl.h"
@@ -63,6 +67,179 @@
namespace openjdkjvmti {
+// A FaultHandler that will deal with initializing ClassDefinitions when they are actually needed.
+class TransformationFaultHandler FINAL : public art::FaultHandler {
+ public:
+ explicit TransformationFaultHandler(art::FaultManager* manager)
+ : art::FaultHandler(manager),
+ uninitialized_class_definitions_lock_("JVMTI Initialized class definitions lock",
+ art::LockLevel::kSignalHandlingLock),
+ class_definition_initialized_cond_("JVMTI Initialized class definitions condition",
+ uninitialized_class_definitions_lock_) {
+ manager->AddHandler(this, /* generated_code */ false);
+ }
+
+ ~TransformationFaultHandler() {
+ art::MutexLock mu(art::Thread::Current(), uninitialized_class_definitions_lock_);
+ uninitialized_class_definitions_.clear();
+ }
+
+ bool Action(int sig, siginfo_t* siginfo, void* context ATTRIBUTE_UNUSED) OVERRIDE {
+ DCHECK_EQ(sig, SIGSEGV);
+ art::Thread* self = art::Thread::Current();
+ if (UNLIKELY(uninitialized_class_definitions_lock_.IsExclusiveHeld(self))) {
+ if (self != nullptr) {
+ LOG(FATAL) << "Recursive call into Transformation fault handler!";
+ UNREACHABLE();
+ } else {
+ LOG(ERROR) << "Possible deadlock due to recursive signal delivery of segv.";
+ }
+ }
+ uintptr_t ptr = reinterpret_cast<uintptr_t>(siginfo->si_addr);
+ ArtClassDefinition* res = nullptr;
+
+ {
+ // NB Technically using a mutex and condition variables here is non-posix compliant but
+ // everything should be fine since both glibc and bionic implementations of mutexs and
+ // condition variables work fine so long as the thread was not interrupted during a
+ // lock/unlock (which it wasn't) on all architectures we care about.
+ art::MutexLock mu(self, uninitialized_class_definitions_lock_);
+ auto it = std::find_if(uninitialized_class_definitions_.begin(),
+ uninitialized_class_definitions_.end(),
+ [&](const auto op) { return op->ContainsAddress(ptr); });
+ if (it != uninitialized_class_definitions_.end()) {
+ res = *it;
+ // Remove the class definition.
+ uninitialized_class_definitions_.erase(it);
+ // Put it in the initializing list
+ initializing_class_definitions_.push_back(res);
+ } else {
+ // Wait for the ptr to be initialized (if it is currently initializing).
+ while (DefinitionIsInitializing(ptr)) {
+ WaitForClassInitializationToFinish();
+ }
+ // Return true (continue with user code) if we find that the definition has been
+ // initialized. Return false (continue on to next signal handler) if the definition is not
+ // initialized or found.
+ return std::find_if(initialized_class_definitions_.begin(),
+ initialized_class_definitions_.end(),
+ [&](const auto op) { return op->ContainsAddress(ptr); }) !=
+ uninitialized_class_definitions_.end();
+ }
+ }
+
+ if (LIKELY(self != nullptr)) {
+ CHECK_EQ(self->GetState(), art::ThreadState::kNative)
+ << "Transformation fault handler occurred outside of native mode";
+ }
+
+ VLOG(signals) << "Lazy initialization of dex file for transformation of " << res->GetName()
+ << " during SEGV";
+ res->InitializeMemory();
+
+ {
+ art::MutexLock mu(self, uninitialized_class_definitions_lock_);
+ // Move to initialized state and notify waiters.
+ initializing_class_definitions_.erase(std::find(initializing_class_definitions_.begin(),
+ initializing_class_definitions_.end(),
+ res));
+ initialized_class_definitions_.push_back(res);
+ class_definition_initialized_cond_.Broadcast(self);
+ }
+
+ return true;
+ }
+
+ void RemoveDefinition(ArtClassDefinition* def) REQUIRES(!uninitialized_class_definitions_lock_) {
+ art::MutexLock mu(art::Thread::Current(), uninitialized_class_definitions_lock_);
+ auto it = std::find(uninitialized_class_definitions_.begin(),
+ uninitialized_class_definitions_.end(),
+ def);
+ if (it != uninitialized_class_definitions_.end()) {
+ uninitialized_class_definitions_.erase(it);
+ return;
+ }
+ while (std::find(initializing_class_definitions_.begin(),
+ initializing_class_definitions_.end(),
+ def) != initializing_class_definitions_.end()) {
+ WaitForClassInitializationToFinish();
+ }
+ it = std::find(initialized_class_definitions_.begin(),
+ initialized_class_definitions_.end(),
+ def);
+ CHECK(it != initialized_class_definitions_.end()) << "Could not find class definition for "
+ << def->GetName();
+ initialized_class_definitions_.erase(it);
+ }
+
+ void AddArtDefinition(ArtClassDefinition* def) REQUIRES(!uninitialized_class_definitions_lock_) {
+ DCHECK(def->IsLazyDefinition());
+ art::MutexLock mu(art::Thread::Current(), uninitialized_class_definitions_lock_);
+ uninitialized_class_definitions_.push_back(def);
+ }
+
+ private:
+ bool DefinitionIsInitializing(uintptr_t ptr) REQUIRES(uninitialized_class_definitions_lock_) {
+ return std::find_if(initializing_class_definitions_.begin(),
+ initializing_class_definitions_.end(),
+ [&](const auto op) { return op->ContainsAddress(ptr); }) !=
+ initializing_class_definitions_.end();
+ }
+
+ void WaitForClassInitializationToFinish() REQUIRES(uninitialized_class_definitions_lock_) {
+ class_definition_initialized_cond_.Wait(art::Thread::Current());
+ }
+
+ art::Mutex uninitialized_class_definitions_lock_ ACQUIRED_BEFORE(art::Locks::abort_lock_);
+ art::ConditionVariable class_definition_initialized_cond_
+ GUARDED_BY(uninitialized_class_definitions_lock_);
+
+ // A list of the class definitions that have a non-readable map.
+ std::vector<ArtClassDefinition*> uninitialized_class_definitions_
+ GUARDED_BY(uninitialized_class_definitions_lock_);
+
+ // A list of class definitions that are currently undergoing unquickening. Threads should wait
+ // until the definition is no longer in this before returning.
+ std::vector<ArtClassDefinition*> initializing_class_definitions_
+ GUARDED_BY(uninitialized_class_definitions_lock_);
+
+ // A list of class definitions that are already unquickened. Threads should immediately return if
+ // it is here.
+ std::vector<ArtClassDefinition*> initialized_class_definitions_
+ GUARDED_BY(uninitialized_class_definitions_lock_);
+};
+
+static TransformationFaultHandler* gTransformFaultHandler = nullptr;
+
+void Transformer::Setup() {
+ // Although we create this the fault handler is actually owned by the 'art::fault_manager' which
+ // will take care of destroying it.
+ if (art::MemMap::kCanReplaceMapping && ArtClassDefinition::kEnableOnDemandDexDequicken) {
+ gTransformFaultHandler = new TransformationFaultHandler(&art::fault_manager);
+ }
+}
+
+// Simple helper to add and remove the class definition from the fault handler.
+class ScopedDefinitionHandler {
+ public:
+ explicit ScopedDefinitionHandler(ArtClassDefinition* def)
+ : def_(def), is_lazy_(def_->IsLazyDefinition()) {
+ if (is_lazy_) {
+ gTransformFaultHandler->AddArtDefinition(def_);
+ }
+ }
+
+ ~ScopedDefinitionHandler() {
+ if (is_lazy_) {
+ gTransformFaultHandler->RemoveDefinition(def_);
+ }
+ }
+
+ private:
+ ArtClassDefinition* def_;
+ bool is_lazy_;
+};
+
// Initialize templates.
template
void Transformer::TransformSingleClassDirect<ArtJvmtiEvent::kClassFileLoadHookNonRetransformable>(
@@ -78,6 +255,11 @@ void Transformer::TransformSingleClassDirect(EventHandler* event_handler,
static_assert(kEvent == ArtJvmtiEvent::kClassFileLoadHookNonRetransformable ||
kEvent == ArtJvmtiEvent::kClassFileLoadHookRetransformable,
"bad event type");
+ // We don't want to do transitions between calling the event and setting the new data so change to
+ // native state early. This also avoids any problems that the FaultHandler might have in
+ // determining if an access to the dex_data is from generated code or not.
+ art::ScopedThreadStateChange stsc(self, art::ThreadState::kNative);
+ ScopedDefinitionHandler handler(def);
jint new_len = -1;
unsigned char* new_data = nullptr;
art::ArrayRef<const unsigned char> dex_data = def->GetDexData();
diff --git a/openjdkjvmti/transform.h b/openjdkjvmti/transform.h
index f43af174f0..8bbeda4b09 100644
--- a/openjdkjvmti/transform.h
+++ b/openjdkjvmti/transform.h
@@ -48,6 +48,8 @@ jvmtiError GetClassLocation(ArtJvmTiEnv* env, jclass klass, /*out*/std::string*
class Transformer {
public:
+ static void Setup();
+
template<ArtJvmtiEvent kEvent>
static void TransformSingleClassDirect(
EventHandler* event_handler,
diff --git a/profman/profile_assistant.cc b/profman/profile_assistant.cc
index ff02b5d59f..a00b1fa5bd 100644
--- a/profman/profile_assistant.cc
+++ b/profman/profile_assistant.cc
@@ -31,12 +31,13 @@ static constexpr const uint32_t kMinNewClassesPercentChangeForCompilation = 2;
ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfilesInternal(
const std::vector<ScopedFlock>& profile_files,
- const ScopedFlock& reference_profile_file) {
+ const ScopedFlock& reference_profile_file,
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn) {
DCHECK(!profile_files.empty());
ProfileCompilationInfo info;
// Load the reference profile.
- if (!info.Load(reference_profile_file->Fd())) {
+ if (!info.Load(reference_profile_file->Fd(), /*merge_classes*/ true, filter_fn)) {
LOG(WARNING) << "Could not load reference profile file";
return kErrorBadProfiles;
}
@@ -48,7 +49,7 @@ ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfilesInternal(
// Merge all current profiles.
for (size_t i = 0; i < profile_files.size(); i++) {
ProfileCompilationInfo cur_info;
- if (!cur_info.Load(profile_files[i]->Fd())) {
+ if (!cur_info.Load(profile_files[i]->Fd(), /*merge_classes*/ true, filter_fn)) {
LOG(WARNING) << "Could not load profile file at index " << i;
return kErrorBadProfiles;
}
@@ -122,7 +123,8 @@ class ScopedFlockList {
ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfiles(
const std::vector<int>& profile_files_fd,
- int reference_profile_file_fd) {
+ int reference_profile_file_fd,
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn) {
DCHECK_GE(reference_profile_file_fd, 0);
std::string error;
@@ -143,12 +145,15 @@ ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfiles(
return kErrorCannotLock;
}
- return ProcessProfilesInternal(profile_files.Get(), reference_profile_file);
+ return ProcessProfilesInternal(profile_files.Get(),
+ reference_profile_file,
+ filter_fn);
}
ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfiles(
const std::vector<std::string>& profile_files,
- const std::string& reference_profile_file) {
+ const std::string& reference_profile_file,
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn) {
std::string error;
ScopedFlockList profile_files_list(profile_files.size());
@@ -164,7 +169,9 @@ ProfileAssistant::ProcessingResult ProfileAssistant::ProcessProfiles(
return kErrorCannotLock;
}
- return ProcessProfilesInternal(profile_files_list.Get(), locked_reference_profile_file);
+ return ProcessProfilesInternal(profile_files_list.Get(),
+ locked_reference_profile_file,
+ filter_fn);
}
} // namespace art
diff --git a/profman/profile_assistant.h b/profman/profile_assistant.h
index be703abda8..ee555840d7 100644
--- a/profman/profile_assistant.h
+++ b/profman/profile_assistant.h
@@ -53,16 +53,21 @@ class ProfileAssistant {
//
static ProcessingResult ProcessProfiles(
const std::vector<std::string>& profile_files,
- const std::string& reference_profile_file);
+ const std::string& reference_profile_file,
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn
+ = ProfileCompilationInfo::ProfileFilterFnAcceptAll);
static ProcessingResult ProcessProfiles(
const std::vector<int>& profile_files_fd_,
- int reference_profile_file_fd);
+ int reference_profile_file_fd,
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn
+ = ProfileCompilationInfo::ProfileFilterFnAcceptAll);
private:
static ProcessingResult ProcessProfilesInternal(
const std::vector<ScopedFlock>& profile_files,
- const ScopedFlock& reference_profile_file);
+ const ScopedFlock& reference_profile_file,
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn);
DISALLOW_COPY_AND_ASSIGN(ProfileAssistant);
};
diff --git a/profman/profile_assistant_test.cc b/profman/profile_assistant_test.cc
index c75f3e9688..79310ac166 100644
--- a/profman/profile_assistant_test.cc
+++ b/profman/profile_assistant_test.cc
@@ -16,6 +16,7 @@
#include <gtest/gtest.h>
+#include "android-base/strings.h"
#include "art_method-inl.h"
#include "base/unix_file/fd_file.h"
#include "common_runtime_test.h"
@@ -51,6 +52,28 @@ class ProfileAssistantTest : public CommonRuntimeTest {
uint32_t dex_location_checksum1 = checksum;
std::string dex_location2 = "location2" + id;
uint32_t dex_location_checksum2 = 10 * checksum;
+ SetupProfile(dex_location1,
+ dex_location_checksum1,
+ dex_location2,
+ dex_location_checksum2,
+ number_of_methods,
+ number_of_classes,
+ profile,
+ info,
+ start_method_index,
+ reverse_dex_write_order);
+ }
+
+ void SetupProfile(const std::string& dex_location1,
+ uint32_t dex_location_checksum1,
+ const std::string& dex_location2,
+ uint32_t dex_location_checksum2,
+ uint16_t number_of_methods,
+ uint16_t number_of_classes,
+ const ScratchFile& profile,
+ ProfileCompilationInfo* info,
+ uint16_t start_method_index = 0,
+ bool reverse_dex_write_order = false) {
for (uint16_t i = start_method_index; i < start_method_index + number_of_methods; i++) {
// reverse_dex_write_order controls the order in which the dex files will be added to
// the profile and thus written to disk.
@@ -1128,4 +1151,89 @@ TEST_F(ProfileAssistantTest, DumpOnly) {
}
}
+TEST_F(ProfileAssistantTest, MergeProfilesWithFilter) {
+ ScratchFile profile1;
+ ScratchFile profile2;
+ ScratchFile reference_profile;
+
+ std::vector<int> profile_fds({
+ GetFd(profile1),
+ GetFd(profile2)});
+ int reference_profile_fd = GetFd(reference_profile);
+
+ // Use a real dex file to generate profile test data.
+ // The file will be used during merging to filter unwanted data.
+ std::vector<std::unique_ptr<const DexFile>> dex_files = OpenTestDexFiles("ProfileTestMultiDex");
+ const DexFile& d1 = *dex_files[0];
+ const DexFile& d2 = *dex_files[1];
+ // The new profile info will contain the methods with indices 0-100.
+ const uint16_t kNumberOfMethodsToEnableCompilation = 100;
+ ProfileCompilationInfo info1;
+ SetupProfile(d1.GetLocation(), d1.GetLocationChecksum(), "p1", 1,
+ kNumberOfMethodsToEnableCompilation, 0, profile1, &info1);
+ ProfileCompilationInfo info2;
+ SetupProfile(d2.GetLocation(), d2.GetLocationChecksum(), "p2", 2,
+ kNumberOfMethodsToEnableCompilation, 0, profile2, &info2);
+
+
+ // The reference profile info will contain the methods with indices 50-150.
+ const uint16_t kNumberOfMethodsAlreadyCompiled = 100;
+ ProfileCompilationInfo reference_info;
+ SetupProfile(d1.GetLocation(), d1.GetLocationChecksum(), "p1", 1,
+ kNumberOfMethodsAlreadyCompiled, 0, reference_profile,
+ &reference_info, kNumberOfMethodsToEnableCompilation / 2);
+
+ // Run profman and pass the dex file with --apk-fd.
+ android::base::unique_fd apk_fd(
+ open(GetTestDexFileName("ProfileTestMultiDex").c_str(), O_RDONLY));
+ ASSERT_GE(apk_fd.get(), 0);
+
+ std::string profman_cmd = GetProfmanCmd();
+ std::vector<std::string> argv_str;
+ argv_str.push_back(profman_cmd);
+ argv_str.push_back("--profile-file-fd=" + std::to_string(profile1.GetFd()));
+ argv_str.push_back("--profile-file-fd=" + std::to_string(profile2.GetFd()));
+ argv_str.push_back("--reference-profile-file-fd=" + std::to_string(reference_profile.GetFd()));
+ argv_str.push_back("--apk-fd=" + std::to_string(apk_fd.get()));
+ std::string error;
+
+ EXPECT_EQ(ExecAndReturnCode(argv_str, &error), 0) << error;
+
+ // Verify that we can load the result.
+
+ ProfileCompilationInfo result;
+ ASSERT_TRUE(reference_profile.GetFile()->ResetOffset());
+ ASSERT_TRUE(result.Load(reference_profile_fd));
+
+
+ ASSERT_TRUE(profile1.GetFile()->ResetOffset());
+ ASSERT_TRUE(profile2.GetFile()->ResetOffset());
+ ASSERT_TRUE(reference_profile.GetFile()->ResetOffset());
+
+ // Verify that the result filtered out data not belonging to the dex file.
+ // This is equivalent to checking that the result is equal to the merging of
+ // all profiles while filtering out data not belonging to the dex file.
+
+ ProfileCompilationInfo::ProfileLoadFilterFn filter_fn =
+ [&d1, &d2](const std::string& dex_location, uint32_t checksum) -> bool {
+ return (dex_location == ProfileCompilationInfo::GetProfileDexFileKey(d1.GetLocation())
+ && checksum == d1.GetLocationChecksum())
+ || (dex_location == ProfileCompilationInfo::GetProfileDexFileKey(d2.GetLocation())
+ && checksum == d2.GetLocationChecksum());
+ };
+
+ ProfileCompilationInfo info1_filter;
+ ProfileCompilationInfo info2_filter;
+ ProfileCompilationInfo expected;
+
+ info2_filter.Load(profile1.GetFd(), /*merge_classes*/ true, filter_fn);
+ info2_filter.Load(profile2.GetFd(), /*merge_classes*/ true, filter_fn);
+ expected.Load(reference_profile.GetFd(), /*merge_classes*/ true, filter_fn);
+
+ ASSERT_TRUE(expected.MergeWith(info1_filter));
+ ASSERT_TRUE(expected.MergeWith(info2_filter));
+
+ ASSERT_TRUE(expected.Equals(result));
+}
+
} // namespace art
diff --git a/profman/profman.cc b/profman/profman.cc
index ea6c382a81..387ce8dfae 100644
--- a/profman/profman.cc
+++ b/profman/profman.cc
@@ -18,6 +18,7 @@
#include <stdio.h>
#include <stdlib.h>
#include <sys/file.h>
+#include <sys/param.h>
#include <sys/stat.h>
#include <unistd.h>
@@ -297,6 +298,22 @@ class ProfMan FINAL {
}
}
+ struct ProfileFilterKey {
+ ProfileFilterKey(const std::string& dex_location, uint32_t checksum)
+ : dex_location_(dex_location), checksum_(checksum) {}
+ const std::string dex_location_;
+ uint32_t checksum_;
+
+ bool operator==(const ProfileFilterKey& other) const {
+ return checksum_ == other.checksum_ && dex_location_ == other.dex_location_;
+ }
+ bool operator<(const ProfileFilterKey& other) const {
+ return checksum_ == other.checksum_
+ ? dex_location_ < other.dex_location_
+ : checksum_ < other.checksum_;
+ }
+ };
+
ProfileAssistant::ProcessingResult ProcessProfiles() {
// Validate that at least one profile file was passed, as well as a reference profile.
if (profile_files_.empty() && profile_files_fd_.empty()) {
@@ -310,6 +327,27 @@ class ProfMan FINAL {
Usage("Options --profile-file-fd and --reference-profile-file-fd "
"should only be used together");
}
+
+ // Check if we have any apks which we should use to filter the profile data.
+ std::set<ProfileFilterKey> profile_filter_keys;
+ if (!GetProfileFilterKeyFromApks(&profile_filter_keys)) {
+ return ProfileAssistant::kErrorIO;
+ }
+
+ // Build the profile filter function. If the set of keys is empty it means we
+ // don't have any apks; as such we do not filter anything.
+ const ProfileCompilationInfo::ProfileLoadFilterFn& filter_fn =
+ [profile_filter_keys](const std::string& dex_location, uint32_t checksum) {
+ if (profile_filter_keys.empty()) {
+ // No --apk was specified. Accept all dex files.
+ return true;
+ } else {
+ bool res = profile_filter_keys.find(
+ ProfileFilterKey(dex_location, checksum)) != profile_filter_keys.end();
+ return res;
+ }
+ };
+
ProfileAssistant::ProcessingResult result;
if (profile_files_.empty()) {
@@ -317,10 +355,13 @@ class ProfMan FINAL {
// so don't check the usage.
File file(reference_profile_file_fd_, false);
result = ProfileAssistant::ProcessProfiles(profile_files_fd_,
- reference_profile_file_fd_);
+ reference_profile_file_fd_,
+ filter_fn);
CloseAllFds(profile_files_fd_, "profile_files_fd_");
} else {
- result = ProfileAssistant::ProcessProfiles(profile_files_, reference_profile_file_);
+ result = ProfileAssistant::ProcessProfiles(profile_files_,
+ reference_profile_file_,
+ filter_fn);
}
return result;
}
@@ -329,18 +370,48 @@ class ProfMan FINAL {
return skip_apk_verification_;
}
- void OpenApkFilesFromLocations(std::vector<std::unique_ptr<const DexFile>>* dex_files) const {
+ bool GetProfileFilterKeyFromApks(std::set<ProfileFilterKey>* profile_filter_keys) {
+ auto process_fn = [profile_filter_keys](std::unique_ptr<const DexFile>&& dex_file) {
+ // Store the profile key of the location instead of the location itself.
+ // This will make the matching in the profile filter method much easier.
+ profile_filter_keys->emplace(ProfileCompilationInfo::GetProfileDexFileKey(
+ dex_file->GetLocation()), dex_file->GetLocationChecksum());
+ };
+ return OpenApkFilesFromLocations(process_fn);
+ }
+
+ bool OpenApkFilesFromLocations(std::vector<std::unique_ptr<const DexFile>>* dex_files) {
+ auto process_fn = [dex_files](std::unique_ptr<const DexFile>&& dex_file) {
+ dex_files->emplace_back(std::move(dex_file));
+ };
+ return OpenApkFilesFromLocations(process_fn);
+ }
+
+ bool OpenApkFilesFromLocations(
+ std::function<void(std::unique_ptr<const DexFile>&&)> process_fn) {
bool use_apk_fd_list = !apks_fd_.empty();
if (use_apk_fd_list) {
// Get the APKs from the collection of FDs.
- CHECK_EQ(dex_locations_.size(), apks_fd_.size());
+ if (dex_locations_.empty()) {
+ // Try to compute the dex locations from the file paths of the descriptions.
+ // This will make it easier to invoke profman with --apk-fd and without
+ // being force to pass --dex-location when the location would be the apk path.
+ if (!ComputeDexLocationsFromApkFds()) {
+ return false;
+ }
+ } else {
+ if (dex_locations_.size() != apks_fd_.size()) {
+ Usage("The number of apk-fds must match the number of dex-locations.");
+ }
+ }
} else if (!apk_files_.empty()) {
- // Get the APKs from the collection of filenames.
- CHECK_EQ(dex_locations_.size(), apk_files_.size());
+ if (dex_locations_.size() != apk_files_.size()) {
+ Usage("The number of apk-fds must match the number of dex-locations.");
+ }
} else {
// No APKs were specified.
CHECK(dex_locations_.empty());
- return;
+ return true;
}
static constexpr bool kVerifyChecksum = true;
for (size_t i = 0; i < dex_locations_.size(); ++i) {
@@ -355,8 +426,8 @@ class ProfMan FINAL {
&error_msg,
&dex_files_for_location)) {
} else {
- LOG(WARNING) << "OpenZip failed for '" << dex_locations_[i] << "' " << error_msg;
- continue;
+ LOG(ERROR) << "OpenZip failed for '" << dex_locations_[i] << "' " << error_msg;
+ return false;
}
} else {
if (dex_file_loader.Open(apk_files_[i].c_str(),
@@ -366,14 +437,36 @@ class ProfMan FINAL {
&error_msg,
&dex_files_for_location)) {
} else {
- LOG(WARNING) << "Open failed for '" << dex_locations_[i] << "' " << error_msg;
- continue;
+ LOG(ERROR) << "Open failed for '" << dex_locations_[i] << "' " << error_msg;
+ return false;
}
}
for (std::unique_ptr<const DexFile>& dex_file : dex_files_for_location) {
- dex_files->emplace_back(std::move(dex_file));
+ process_fn(std::move(dex_file));
}
}
+ return true;
+ }
+
+ // Get the dex locations from the apk fds.
+ // The methods reads the links from /proc/self/fd/ to find the original apk paths
+ // and puts them in the dex_locations_ vector.
+ bool ComputeDexLocationsFromApkFds() {
+ // We can't use a char array of PATH_MAX size without exceeding the frame size.
+ // So we use a vector as the buffer for the path.
+ std::vector<char> buffer(PATH_MAX, 0);
+ for (size_t i = 0; i < apks_fd_.size(); ++i) {
+ std::string fd_path = "/proc/self/fd/" + std::to_string(apks_fd_[i]);
+ ssize_t len = readlink(fd_path.c_str(), buffer.data(), buffer.size() - 1);
+ if (len == -1) {
+ PLOG(ERROR) << "Could not open path from fd";
+ return false;
+ }
+
+ buffer[len] = '\0';
+ dex_locations_.push_back(buffer.data());
+ }
+ return true;
}
std::unique_ptr<const ProfileCompilationInfo> LoadProfile(const std::string& filename, int fd) {
@@ -416,8 +509,6 @@ class ProfMan FINAL {
static const char* kOrdinaryProfile = "=== profile ===";
static const char* kReferenceProfile = "=== reference profile ===";
- // Open apk/zip files and and read dex files.
- MemMap::Init(); // for ZipArchive::OpenFromFd
std::vector<std::unique_ptr<const DexFile>> dex_files;
OpenApkFilesFromLocations(&dex_files);
std::string dump;
@@ -553,8 +644,7 @@ class ProfMan FINAL {
reference_profile_file_.empty() && !FdIsValid(reference_profile_file_fd_)) {
Usage("No profile files or reference profile specified.");
}
- // Open apk/zip files and and read dex files.
- MemMap::Init(); // for ZipArchive::OpenFromFd
+
// Open the dex files to get the names for classes.
std::vector<std::unique_ptr<const DexFile>> dex_files;
OpenApkFilesFromLocations(&dex_files);
@@ -948,8 +1038,6 @@ class ProfMan FINAL {
Usage("Profile must be specified with --reference-profile-file or "
"--reference-profile-file-fd");
}
- // for ZipArchive::OpenFromFd
- MemMap::Init();
// Open the profile output file if needed.
int fd = OpenReferenceProfile();
if (!FdIsValid(fd)) {
@@ -984,8 +1072,6 @@ class ProfMan FINAL {
}
int CreateBootProfile() {
- // Initialize memmap since it's required to open dex files.
- MemMap::Init();
// Open the profile output file.
const int reference_fd = OpenReferenceProfile();
if (!FdIsValid(reference_fd)) {
@@ -1065,8 +1151,6 @@ class ProfMan FINAL {
test_profile_class_percentage_,
test_profile_seed_);
} else {
- // Initialize MemMap for ZipArchive::OpenFromFd.
- MemMap::Init();
// Open the dex files to look up classes and methods.
std::vector<std::unique_ptr<const DexFile>> dex_files;
OpenApkFilesFromLocations(&dex_files);
@@ -1089,7 +1173,7 @@ class ProfMan FINAL {
return copy_and_update_profile_key_;
}
- bool CopyAndUpdateProfileKey() const {
+ bool CopyAndUpdateProfileKey() {
// Validate that at least one profile file was passed, as well as a reference profile.
if (!(profile_files_.size() == 1 ^ profile_files_fd_.size() == 1)) {
Usage("Only one profile file should be specified.");
@@ -1133,7 +1217,8 @@ class ProfMan FINAL {
static void CloseAllFds(const std::vector<int>& fds, const char* descriptor) {
for (size_t i = 0; i < fds.size(); i++) {
if (close(fds[i]) < 0) {
- PLOG(WARNING) << "Failed to close descriptor for " << descriptor << " at index " << i;
+ PLOG(WARNING) << "Failed to close descriptor for "
+ << descriptor << " at index " << i << ": " << fds[i];
}
}
}
@@ -1176,6 +1261,9 @@ static int profman(int argc, char** argv) {
// Parse arguments. Argument mistakes will lead to exit(EXIT_FAILURE) in UsageError.
profman.ParseArgs(argc, argv);
+ // Initialize MemMap for ZipArchive::OpenFromFd.
+ MemMap::Init();
+
if (profman.ShouldGenerateTestProfile()) {
return profman.GenerateTestProfile();
}
diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S
index 63f4f6cb8c..58e0e44813 100644
--- a/runtime/arch/mips64/quick_entrypoints_mips64.S
+++ b/runtime/arch/mips64/quick_entrypoints_mips64.S
@@ -2207,8 +2207,9 @@ ENTRY art_quick_instrumentation_entry
# Deliver exception if we got nullptr as function.
move $t9, $v0 # $t9 holds reference to code
ld $a0, 8($sp) # Restore arg0.
+ dla $v0, art_quick_instrumentation_exit
RESTORE_SAVE_REFS_AND_ARGS_FRAME
- dla $ra, art_quick_instrumentation_exit
+ move $ra, $v0
jic $t9, 0 # call method, returning to art_quick_instrumentation_exit
.Ldeliver_instrumentation_entry_exception:
RESTORE_SAVE_REFS_AND_ARGS_FRAME
diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc
index bd51809c22..4be4b12611 100644
--- a/runtime/arch/stub_test.cc
+++ b/runtime/arch/stub_test.cc
@@ -186,10 +186,9 @@ class StubTest : public CommonRuntimeTest {
"stp x2, x3, [sp, #16]\n\t"
"stp x4, x5, [sp, #32]\n\t"
"stp x6, x7, [sp, #48]\n\t"
- // To be extra defensive, store x20. We do this because some of the stubs might make a
+ // To be extra defensive, store x20,x21. We do this because some of the stubs might make a
// transition into the runtime via the blr instruction below and *not* save x20.
- "str x20, [sp, #64]\n\t"
- // 8 byte buffer
+ "stp x20, x21, [sp, #64]\n\t"
"sub sp, sp, #16\n\t" // Reserve stack space, 16B aligned
".cfi_adjust_cfa_offset 16\n\t"
@@ -288,7 +287,7 @@ class StubTest : public CommonRuntimeTest {
"ldp x2, x3, [sp, #16]\n\t"
"ldp x4, x5, [sp, #32]\n\t"
"ldp x6, x7, [sp, #48]\n\t"
- "ldr x20, [sp, #64]\n\t"
+ "ldp x20, x21, [sp, #64]\n\t"
"add sp, sp, #80\n\t" // Free stack space, now sp as on entry
".cfi_adjust_cfa_offset -80\n\t"
@@ -312,8 +311,9 @@ class StubTest : public CommonRuntimeTest {
// -fstack-protector-strong. According to AAPCS64 registers x9-x15 are caller-saved,
// which means we should unclobber one of the callee-saved registers that are unused.
// Here we use x20.
+ // http://b/72613441, Clang 7.0 asks for one more register, so we do not reserve x21.
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "x19",
- "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x30",
+ "x22", "x23", "x24", "x25", "x26", "x27", "x28", "x30",
"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
"d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
diff --git a/runtime/base/array_ref.h b/runtime/base/array_ref.h
index ef86512cf7..2753c81bd5 100644
--- a/runtime/base/array_ref.h
+++ b/runtime/base/array_ref.h
@@ -106,6 +106,12 @@ class ArrayRef {
return *this = ArrayRef(other);
}
+ template <typename U>
+ static ArrayRef Cast(const ArrayRef<U>& src) {
+ return ArrayRef(reinterpret_cast<const T*>(src.data()),
+ src.size() * sizeof(T) / sizeof(U));
+ }
+
// Destructor.
~ArrayRef() = default;
diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc
index 9f17ad051c..a4c32dd814 100644
--- a/runtime/base/mutex.cc
+++ b/runtime/base/mutex.cc
@@ -74,6 +74,7 @@ Uninterruptible Roles::uninterruptible_;
ReaderWriterMutex* Locks::jni_globals_lock_ = nullptr;
Mutex* Locks::jni_weak_globals_lock_ = nullptr;
ReaderWriterMutex* Locks::dex_lock_ = nullptr;
+Mutex* Locks::native_debug_interface_lock_ = nullptr;
std::vector<BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_;
Atomic<const BaseMutex*> Locks::expected_mutexes_on_weak_ref_access_guard_;
@@ -1073,6 +1074,7 @@ void Locks::Init() {
DCHECK(unexpected_signal_lock_ != nullptr);
DCHECK(user_code_suspension_lock_ != nullptr);
DCHECK(dex_lock_ != nullptr);
+ DCHECK(native_debug_interface_lock_ != nullptr);
} else {
// Create global locks in level order from highest lock level to lowest.
LockLevel current_lock_level = kInstrumentEntrypointsLock;
@@ -1228,6 +1230,10 @@ void Locks::Init() {
DCHECK(unexpected_signal_lock_ == nullptr);
unexpected_signal_lock_ = new Mutex("unexpected signal lock", current_lock_level, true);
+ UPDATE_CURRENT_LOCK_LEVEL(kNativeDebugInterfaceLock);
+ DCHECK(native_debug_interface_lock_ == nullptr);
+ native_debug_interface_lock_ = new Mutex("Native debug interface lock", current_lock_level);
+
UPDATE_CURRENT_LOCK_LEVEL(kLoggingLock);
DCHECK(logging_lock_ == nullptr);
logging_lock_ = new Mutex("logging lock", current_lock_level, true);
diff --git a/runtime/base/mutex.h b/runtime/base/mutex.h
index d541b79a98..bf27b7f17c 100644
--- a/runtime/base/mutex.h
+++ b/runtime/base/mutex.h
@@ -58,10 +58,12 @@ class Thread;
// [1] http://www.drdobbs.com/parallel/use-lock-hierarchies-to-avoid-deadlock/204801163
enum LockLevel {
kLoggingLock = 0,
+ kNativeDebugInterfaceLock,
kSwapMutexesLock,
kUnexpectedSignalLock,
kThreadSuspendCountLock,
kAbortLock,
+ kSignalHandlingLock,
kJdwpAdbStateLock,
kJdwpSocketLock,
kRegionSpaceRegionLock,
@@ -745,8 +747,11 @@ class Locks {
// One unexpected signal at a time lock.
static Mutex* unexpected_signal_lock_ ACQUIRED_AFTER(thread_suspend_count_lock_);
+ // Guards the magic global variables used by native tools (e.g. libunwind).
+ static Mutex* native_debug_interface_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
+
// Have an exclusive logging thread.
- static Mutex* logging_lock_ ACQUIRED_AFTER(unexpected_signal_lock_);
+ static Mutex* logging_lock_ ACQUIRED_AFTER(native_debug_interface_lock_);
// List of mutexes that we expect a thread may hold when accessing weak refs. This is used to
// avoid a deadlock in the empty checkpoint while weak ref access is disabled (b/34964016). If we
diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc
index 32d304073c..800427d6ab 100644
--- a/runtime/class_linker.cc
+++ b/runtime/class_linker.cc
@@ -73,6 +73,7 @@
#include "intern_table.h"
#include "interpreter/interpreter.h"
#include "java_vm_ext.h"
+#include "jit/debugger_interface.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "jit/profile_compilation_info.h"
@@ -3432,6 +3433,7 @@ void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
data.weak_root = dex_cache_jweak;
data.dex_file = dex_cache->GetDexFile();
data.class_table = ClassTableForClassLoader(class_loader);
+ RegisterDexFileForNative(self, data.dex_file->Begin());
DCHECK(data.class_table != nullptr);
// Make sure to hold the dex cache live in the class table. This case happens for the boot class
// path dex caches without an image.
@@ -8368,7 +8370,6 @@ mirror::MethodHandle* ClassLinker::ResolveMethodHandleForField(
mirror::MethodHandle* ClassLinker::ResolveMethodHandleForMethod(
Thread* self,
- const DexFile* const dex_file,
const DexFile::MethodHandleItem& method_handle,
ArtMethod* referrer) {
DexFile::MethodHandleType handle_type =
@@ -8492,19 +8493,20 @@ mirror::MethodHandle* ClassLinker::ResolveMethodHandleForMethod(
return nullptr;
}
- Handle<mirror::DexCache> dex_cache(hs.NewHandle(referrer->GetDexCache()));
- Handle<mirror::ClassLoader> class_loader(hs.NewHandle(referrer->GetClassLoader()));
int32_t index = 0;
-
if (receiver_count != 0) {
// Insert receiver
method_params->Set(index++, target_method->GetDeclaringClass());
}
-
- DexFileParameterIterator it(*dex_file, target_method->GetPrototype());
+ DexFileParameterIterator it(*target_method->GetDexFile(), target_method->GetPrototype());
+ Handle<mirror::DexCache> target_method_dex_cache(hs.NewHandle(target_method->GetDexCache()));
+ Handle<mirror::ClassLoader> target_method_class_loader(hs.NewHandle(target_method->GetClassLoader()));
while (it.HasNext()) {
+ DCHECK_LT(index, num_params);
const dex::TypeIndex type_idx = it.GetTypeIdx();
- ObjPtr<mirror::Class> klass = ResolveType(type_idx, dex_cache, class_loader);
+ ObjPtr<mirror::Class> klass = ResolveType(type_idx,
+ target_method_dex_cache,
+ target_method_class_loader);
if (nullptr == klass) {
DCHECK(self->IsExceptionPending());
return nullptr;
@@ -8554,7 +8556,7 @@ ObjPtr<mirror::MethodHandle> ClassLinker::ResolveMethodHandle(Thread* self,
case DexFile::MethodHandleType::kInvokeConstructor:
case DexFile::MethodHandleType::kInvokeDirect:
case DexFile::MethodHandleType::kInvokeInterface:
- return ResolveMethodHandleForMethod(self, dex_file, method_handle, referrer);
+ return ResolveMethodHandleForMethod(self, method_handle, referrer);
}
}
diff --git a/runtime/class_linker.h b/runtime/class_linker.h
index 3e3425f5ac..16fa1ce801 100644
--- a/runtime/class_linker.h
+++ b/runtime/class_linker.h
@@ -979,7 +979,6 @@ class ClassLinker {
REQUIRES_SHARED(Locks::mutator_lock_);
mirror::MethodHandle* ResolveMethodHandleForMethod(Thread* self,
- const DexFile* const dex_file,
const DexFile::MethodHandleItem& method_handle,
ArtMethod* referrer)
REQUIRES_SHARED(Locks::mutator_lock_);
diff --git a/runtime/dex/compact_dex_file.h b/runtime/dex/compact_dex_file.h
index 1ecff04cba..31aeb27872 100644
--- a/runtime/dex/compact_dex_file.h
+++ b/runtime/dex/compact_dex_file.h
@@ -245,6 +245,12 @@ class CompactDexFile : public DexFile {
static bool IsVersionValid(const uint8_t* magic);
virtual bool IsVersionValid() const OVERRIDE;
+ // TODO This is completely a guess. We really need to do better. b/72402467
+ // We ask for 64 megabytes which should be big enough for any realistic dex file.
+ virtual size_t GetDequickenedSize() const OVERRIDE {
+ return 64 * MB;
+ }
+
const Header& GetHeader() const {
return down_cast<const Header&>(DexFile::GetHeader());
}
diff --git a/runtime/dex/dex_file.h b/runtime/dex/dex_file.h
index 7e2fe98923..cf8c840b59 100644
--- a/runtime/dex/dex_file.h
+++ b/runtime/dex/dex_file.h
@@ -456,6 +456,13 @@ class DexFile {
// Returns true if the dex file supports default methods.
virtual bool SupportsDefaultMethods() const = 0;
+ // Returns the maximum size in bytes needed to store an equivalent dex file strictly conforming to
+ // the dex file specification. That is the size if we wanted to get rid of all the
+ // quickening/compact-dexing/etc.
+ //
+ // TODO This should really be an exact size! b/72402467
+ virtual size_t GetDequickenedSize() const = 0;
+
// Returns the number of string identifiers in the .dex file.
size_t NumStringIds() const {
DCHECK(header_ != nullptr) << GetLocation();
diff --git a/runtime/dex/dex_file_layout.h b/runtime/dex/dex_file_layout.h
index a7b9051f24..793e3b5de7 100644
--- a/runtime/dex/dex_file_layout.h
+++ b/runtime/dex/dex_file_layout.h
@@ -83,7 +83,7 @@ class DexLayoutSection {
}
void CombineSection(uint32_t start_offset, uint32_t end_offset) {
- DCHECK_LT(start_offset, end_offset);
+ DCHECK_LE(start_offset, end_offset);
if (start_offset_ == end_offset_) {
start_offset_ = start_offset;
end_offset_ = end_offset;
diff --git a/runtime/dex/standard_dex_file.h b/runtime/dex/standard_dex_file.h
index 94ef1f2a8e..e0e9f2f11c 100644
--- a/runtime/dex/standard_dex_file.h
+++ b/runtime/dex/standard_dex_file.h
@@ -83,6 +83,10 @@ class StandardDexFile : public DexFile {
uint32_t GetCodeItemSize(const DexFile::CodeItem& item) const OVERRIDE;
+ virtual size_t GetDequickenedSize() const OVERRIDE {
+ return Size();
+ }
+
private:
StandardDexFile(const uint8_t* base,
size_t size,
diff --git a/runtime/fault_handler.cc b/runtime/fault_handler.cc
index 9d6e5de803..3015b10103 100644
--- a/runtime/fault_handler.cc
+++ b/runtime/fault_handler.cc
@@ -17,6 +17,7 @@
#include "fault_handler.h"
#include <setjmp.h>
+#include <string.h>
#include <sys/mman.h>
#include <sys/ucontext.h>
@@ -183,8 +184,31 @@ bool FaultManager::HandleFaultByOtherHandlers(int sig, siginfo_t* info, void* co
return false;
}
+static const char* SignalCodeName(int sig, int code) {
+ if (sig != SIGSEGV) {
+ return "UNKNOWN";
+ } else {
+ switch (code) {
+ case SEGV_MAPERR: return "SEGV_MAPERR";
+ case SEGV_ACCERR: return "SEGV_ACCERR";
+ default: return "UNKNOWN";
+ }
+ }
+}
+static std::ostream& PrintSignalInfo(std::ostream& os, siginfo_t* info) {
+ os << " si_signo: " << info->si_signo << " (" << strsignal(info->si_signo) << ")\n"
+ << " si_code: " << info->si_code
+ << " (" << SignalCodeName(info->si_signo, info->si_code) << ")";
+ if (info->si_signo == SIGSEGV) {
+ os << "\n" << " si_addr: " << info->si_addr;
+ }
+ return os;
+}
+
bool FaultManager::HandleFault(int sig, siginfo_t* info, void* context) {
- VLOG(signals) << "Handling fault";
+ if (VLOG_IS_ON(signals)) {
+ PrintSignalInfo(VLOG_STREAM(signals) << "Handling fault:" << "\n", info);
+ }
#ifdef TEST_NESTED_SIGNAL
// Simulate a crash in a handler.
diff --git a/runtime/gc/allocator/dlmalloc.cc b/runtime/gc/allocator/dlmalloc.cc
index 65062208d6..4570e9c1b8 100644
--- a/runtime/gc/allocator/dlmalloc.cc
+++ b/runtime/gc/allocator/dlmalloc.cc
@@ -37,6 +37,7 @@ static void art_heap_usage_error(const char* function, void* p);
#pragma GCC diagnostic ignored "-Wredundant-decls"
#pragma GCC diagnostic ignored "-Wempty-body"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
+#pragma GCC diagnostic ignored "-Wnull-pointer-arithmetic"
#include "../../../external/dlmalloc/malloc.c"
// Note: malloc.c uses a DEBUG define to drive debug code. This interferes with the DEBUG severity
// of libbase, so undefine it now.
diff --git a/runtime/gc/allocator/dlmalloc.h b/runtime/gc/allocator/dlmalloc.h
index 29b96ee96c..b12691ad0e 100644
--- a/runtime/gc/allocator/dlmalloc.h
+++ b/runtime/gc/allocator/dlmalloc.h
@@ -32,6 +32,7 @@
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wredundant-decls"
+#pragma GCC diagnostic ignored "-Wnull-pointer-arithmetic"
#include "../../external/dlmalloc/malloc.h"
#pragma GCC diagnostic pop
diff --git a/runtime/gc/heap-inl.h b/runtime/gc/heap-inl.h
index 52dd104ac8..6735961591 100644
--- a/runtime/gc/heap-inl.h
+++ b/runtime/gc/heap-inl.h
@@ -106,8 +106,8 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
pre_fence_visitor(obj, usable_size);
QuasiAtomic::ThreadFenceForConstructor();
} else {
- // bytes allocated that takes bulk thread-local buffer allocations into account.
- size_t bytes_tl_bulk_allocated = 0;
+ // Bytes allocated that takes bulk thread-local buffer allocations into account.
+ size_t bytes_tl_bulk_allocated = 0u;
obj = TryToAllocate<kInstrumented, false>(self, allocator, byte_count, &bytes_allocated,
&usable_size, &bytes_tl_bulk_allocated);
if (UNLIKELY(obj == nullptr)) {
@@ -154,12 +154,13 @@ inline mirror::Object* Heap::AllocObjectWithAllocator(Thread* self,
}
pre_fence_visitor(obj, usable_size);
QuasiAtomic::ThreadFenceForConstructor();
- new_num_bytes_allocated = num_bytes_allocated_.FetchAndAddRelaxed(bytes_tl_bulk_allocated) +
- bytes_tl_bulk_allocated;
+ size_t num_bytes_allocated_before =
+ num_bytes_allocated_.FetchAndAddRelaxed(bytes_tl_bulk_allocated);
+ new_num_bytes_allocated = num_bytes_allocated_before + bytes_tl_bulk_allocated;
if (bytes_tl_bulk_allocated > 0) {
// Only trace when we get an increase in the number of bytes allocated. This happens when
// obtaining a new TLAB and isn't often enough to hurt performance according to golem.
- TraceHeapSize(new_num_bytes_allocated + bytes_tl_bulk_allocated);
+ TraceHeapSize(new_num_bytes_allocated);
}
}
if (kIsDebugBuild && Runtime::Current()->IsStarted()) {
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index b1932d1a29..cf5bd4aed2 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -1890,7 +1890,10 @@ HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
count_requested_homogeneous_space_compaction_++;
// Store performed homogeneous space compaction at a new request arrival.
ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
- Locks::mutator_lock_->AssertNotHeld(self);
+ // TODO: Clang prebuilt for r316199 produces bogus thread safety analysis warning for holding both
+ // exclusive and shared lock in the same scope. Remove the assertion as a temporary workaround.
+ // http://b/71769596
+ // Locks::mutator_lock_->AssertNotHeld(self);
{
ScopedThreadStateChange tsc2(self, kWaitingForGcToComplete);
MutexLock mu(self, *gc_complete_lock_);
@@ -1968,7 +1971,10 @@ void Heap::TransitionCollector(CollectorType collector_type) {
Runtime* const runtime = Runtime::Current();
Thread* const self = Thread::Current();
ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
- Locks::mutator_lock_->AssertNotHeld(self);
+ // TODO: Clang prebuilt for r316199 produces bogus thread safety analysis warning for holding both
+ // exclusive and shared lock in the same scope. Remove the assertion as a temporary workaround.
+ // http://b/71769596
+ // Locks::mutator_lock_->AssertNotHeld(self);
// Busy wait until we can GC (StartGC can fail if we have a non-zero
// compacting_gc_disable_count_, this should rarely occurs).
for (;;) {
@@ -2511,7 +2517,10 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type,
}
}
ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
- Locks::mutator_lock_->AssertNotHeld(self);
+ // TODO: Clang prebuilt for r316199 produces bogus thread safety analysis warning for holding both
+ // exclusive and shared lock in the same scope. Remove the assertion as a temporary workaround.
+ // http://b/71769596
+ // Locks::mutator_lock_->AssertNotHeld(self);
if (self->IsHandlingStackOverflow()) {
// If we are throwing a stack overflow error we probably don't have enough remaining stack
// space to run the GC.
diff --git a/runtime/gc/heap_test.cc b/runtime/gc/heap_test.cc
index 6d426c2dd0..9d8e5d23eb 100644
--- a/runtime/gc/heap_test.cc
+++ b/runtime/gc/heap_test.cc
@@ -81,6 +81,7 @@ class ZygoteHeapTest : public CommonRuntimeTest {
void SetUpRuntimeOptions(RuntimeOptions* options) {
CommonRuntimeTest::SetUpRuntimeOptions(options);
options->push_back(std::make_pair("-Xzygote", nullptr));
+ options->push_back(std::make_pair("-Xno-hidden-api-checks", nullptr));
}
};
diff --git a/runtime/hidden_api.h b/runtime/hidden_api.h
index f476028d5f..05e68e66dd 100644
--- a/runtime/hidden_api.h
+++ b/runtime/hidden_api.h
@@ -27,6 +27,7 @@ namespace hiddenapi {
enum Action {
kAllow,
kAllowButWarn,
+ kAllowButWarnAndToast,
kDeny
};
@@ -35,8 +36,9 @@ inline Action GetMemberAction(uint32_t access_flags) {
case HiddenApiAccessFlags::kWhitelist:
return kAllow;
case HiddenApiAccessFlags::kLightGreylist:
- case HiddenApiAccessFlags::kDarkGreylist:
return kAllowButWarn;
+ case HiddenApiAccessFlags::kDarkGreylist:
+ return kAllowButWarnAndToast;
case HiddenApiAccessFlags::kBlacklist:
return kDeny;
}
@@ -70,8 +72,9 @@ inline bool ShouldBlockAccessToMember(T* member,
std::function<bool(Thread*)> fn_caller_in_boot)
REQUIRES_SHARED(Locks::mutator_lock_) {
DCHECK(member != nullptr);
+ Runtime* runtime = Runtime::Current();
- if (!Runtime::Current()->AreHiddenApiChecksEnabled()) {
+ if (!runtime->AreHiddenApiChecksEnabled()) {
// Exit early. Nothing to enforce.
return false;
}
@@ -90,20 +93,23 @@ inline bool ShouldBlockAccessToMember(T* member,
}
// Member is hidden and we are not in the boot class path. Act accordingly.
- if (action == kAllowButWarn) {
+ if (action == kDeny) {
+ return true;
+ } else {
+ DCHECK(action == kAllowButWarn || action == kAllowButWarnAndToast);
+
// Allow access to this member but print a warning. Depending on a runtime
// flag, we might move the member into whitelist and skip the warning the
// next time the member is used.
- Runtime::Current()->SetPendingHiddenApiWarning(true);
- if (Runtime::Current()->ShouldDedupeHiddenApiWarnings()) {
+ if (runtime->ShouldDedupeHiddenApiWarnings()) {
member->SetAccessFlags(HiddenApiAccessFlags::EncodeForRuntime(
member->GetAccessFlags(), HiddenApiAccessFlags::kWhitelist));
}
WarnAboutMemberAccess(member);
+ if (action == kAllowButWarnAndToast || runtime->ShouldAlwaysSetHiddenApiWarningFlag()) {
+ Runtime::Current()->SetPendingHiddenApiWarning(true);
+ }
return false;
- } else {
- DCHECK_EQ(action, hiddenapi::kDeny);
- return true;
}
}
diff --git a/runtime/instrumentation.cc b/runtime/instrumentation.cc
index 2101f6837b..24cedb093b 100644
--- a/runtime/instrumentation.cc
+++ b/runtime/instrumentation.cc
@@ -1384,8 +1384,8 @@ TwoWordReturn Instrumentation::PopInstrumentationStackFrame(Thread* self,
reinterpret_cast<uintptr_t>(GetQuickDeoptimizationEntryPoint()));
} else {
if (deoptimize && !Runtime::Current()->IsAsyncDeoptimizeable(*return_pc)) {
- LOG(WARNING) << "Got a deoptimization request on un-deoptimizable " << method->PrettyMethod()
- << " at PC " << reinterpret_cast<void*>(*return_pc);
+ VLOG(deopt) << "Got a deoptimization request on un-deoptimizable " << method->PrettyMethod()
+ << " at PC " << reinterpret_cast<void*>(*return_pc);
}
if (kVerboseInstrumentation) {
LOG(INFO) << "Returning from " << method->PrettyMethod()
diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc
index d53da215a2..12b8c38bbb 100644
--- a/runtime/interpreter/interpreter_common.cc
+++ b/runtime/interpreter/interpreter_common.cc
@@ -1183,10 +1183,9 @@ static ObjPtr<mirror::CallSite> InvokeBootstrapMethod(Thread* self,
}
Handle<mirror::Object> object(hs.NewHandle(result.GetL()));
-
- // Check the result is not null.
if (UNLIKELY(object.IsNull())) {
- ThrowNullPointerException("CallSite == null");
+ // This will typically be for LambdaMetafactory which is not supported.
+ ThrowNullPointerException("Bootstrap method returned null");
return nullptr;
}
@@ -1202,7 +1201,7 @@ static ObjPtr<mirror::CallSite> InvokeBootstrapMethod(Thread* self,
// Check the call site target is not null as we're going to invoke it.
Handle<mirror::MethodHandle> target = hs.NewHandle(call_site->GetTarget());
if (UNLIKELY(target.IsNull())) {
- ThrowNullPointerException("CallSite target == null");
+ ThrowNullPointerException("Target for call-site is null");
return nullptr;
}
diff --git a/runtime/jit/debugger_interface.cc b/runtime/jit/debugger_interface.cc
index 0e295e2442..d60f70a54f 100644
--- a/runtime/jit/debugger_interface.cc
+++ b/runtime/jit/debugger_interface.cc
@@ -68,11 +68,65 @@ extern "C" {
// Static initialization is necessary to prevent GDB from seeing
// uninitialized descriptor.
JITDescriptor __jit_debug_descriptor = { 1, JIT_NOACTION, nullptr, nullptr };
+
+ // Incremented whenever __jit_debug_descriptor is modified.
+ uint32_t __jit_debug_descriptor_timestamp = 0;
+
+ struct DEXFileEntry {
+ DEXFileEntry* next_;
+ DEXFileEntry* prev_;
+ const void* dexfile_;
+ };
+
+ DEXFileEntry* __art_debug_dexfiles = nullptr;
+
+ // Incremented whenever __art_debug_dexfiles is modified.
+ uint32_t __art_debug_dexfiles_timestamp = 0;
}
-Mutex g_jit_debug_mutex("JIT debug interface lock", kJitDebugInterfaceLock);
+static size_t g_jit_debug_mem_usage
+ GUARDED_BY(Locks::native_debug_interface_lock_) = 0;
+
+static std::unordered_map<const void*, DEXFileEntry*> g_dexfile_entries
+ GUARDED_BY(Locks::native_debug_interface_lock_);
+
+void RegisterDexFileForNative(Thread* current_thread, const void* dexfile_header) {
+ MutexLock mu(current_thread, *Locks::native_debug_interface_lock_);
+ if (g_dexfile_entries.count(dexfile_header) == 0) {
+ DEXFileEntry* entry = new DEXFileEntry();
+ CHECK(entry != nullptr);
+ entry->dexfile_ = dexfile_header;
+ entry->prev_ = nullptr;
+ entry->next_ = __art_debug_dexfiles;
+ if (entry->next_ != nullptr) {
+ entry->next_->prev_ = entry;
+ }
+ __art_debug_dexfiles = entry;
+ __art_debug_dexfiles_timestamp++;
+ g_dexfile_entries.emplace(dexfile_header, entry);
+ }
+}
-static size_t g_jit_debug_mem_usage = 0;
+void DeregisterDexFileForNative(Thread* current_thread, const void* dexfile_header) {
+ MutexLock mu(current_thread, *Locks::native_debug_interface_lock_);
+ auto it = g_dexfile_entries.find(dexfile_header);
+ // We register dex files in the class linker and free them in DexFile_closeDexFile,
+ // but might be cases where we load the dex file without using it in the class linker.
+ if (it != g_dexfile_entries.end()) {
+ DEXFileEntry* entry = it->second;
+ if (entry->prev_ != nullptr) {
+ entry->prev_->next_ = entry->next_;
+ } else {
+ __art_debug_dexfiles = entry->next_;
+ }
+ if (entry->next_ != nullptr) {
+ entry->next_->prev_ = entry->prev_;
+ }
+ __art_debug_dexfiles_timestamp++;
+ delete entry;
+ g_dexfile_entries.erase(it);
+ }
+}
JITCodeEntry* CreateJITCodeEntry(const std::vector<uint8_t>& symfile) {
DCHECK_NE(symfile.size(), 0u);
@@ -96,6 +150,7 @@ JITCodeEntry* CreateJITCodeEntry(const std::vector<uint8_t>& symfile) {
__jit_debug_descriptor.first_entry_ = entry;
__jit_debug_descriptor.relevant_entry_ = entry;
__jit_debug_descriptor.action_flag_ = JIT_REGISTER_FN;
+ __jit_debug_descriptor_timestamp++;
(*__jit_debug_register_code_ptr)();
return entry;
}
@@ -114,6 +169,7 @@ void DeleteJITCodeEntry(JITCodeEntry* entry) {
g_jit_debug_mem_usage -= sizeof(JITCodeEntry) + entry->symfile_size_;
__jit_debug_descriptor.relevant_entry_ = entry;
__jit_debug_descriptor.action_flag_ = JIT_UNREGISTER_FN;
+ __jit_debug_descriptor_timestamp++;
(*__jit_debug_register_code_ptr)();
delete[] entry->symfile_addr_;
delete entry;
@@ -121,7 +177,7 @@ void DeleteJITCodeEntry(JITCodeEntry* entry) {
// Mapping from code address to entry. Used to manage life-time of the entries.
static std::unordered_map<uintptr_t, JITCodeEntry*> g_jit_code_entries
- GUARDED_BY(g_jit_debug_mutex);
+ GUARDED_BY(Locks::native_debug_interface_lock_);
void IncrementJITCodeEntryRefcount(JITCodeEntry* entry, uintptr_t code_address) {
DCHECK(entry != nullptr);
diff --git a/runtime/jit/debugger_interface.h b/runtime/jit/debugger_interface.h
index 9aec988f67..8c4bb3fdf4 100644
--- a/runtime/jit/debugger_interface.h
+++ b/runtime/jit/debugger_interface.h
@@ -30,36 +30,42 @@ extern "C" {
struct JITCodeEntry;
}
-extern Mutex g_jit_debug_mutex;
+// Notify native tools (e.g. libunwind) that DEX file has been opened.
+// The pointer needs to point the start of the dex data (not the DexFile* object).
+void RegisterDexFileForNative(Thread* current_thread, const void* dexfile_header);
+
+// Notify native tools (e.g. libunwind) that DEX file has been closed.
+// The pointer needs to point the start of the dex data (not the DexFile* object).
+void DeregisterDexFileForNative(Thread* current_thread, const void* dexfile_header);
// Notify native debugger about new JITed code by passing in-memory ELF.
// It takes ownership of the in-memory ELF file.
JITCodeEntry* CreateJITCodeEntry(const std::vector<uint8_t>& symfile)
- REQUIRES(g_jit_debug_mutex);
+ REQUIRES(Locks::native_debug_interface_lock_);
// Notify native debugger that JITed code has been removed.
// It also releases the associated in-memory ELF file.
void DeleteJITCodeEntry(JITCodeEntry* entry)
- REQUIRES(g_jit_debug_mutex);
+ REQUIRES(Locks::native_debug_interface_lock_);
// Helper method to track life-time of JITCodeEntry.
// It registers given code address as being described by the given entry.
void IncrementJITCodeEntryRefcount(JITCodeEntry* entry, uintptr_t code_address)
- REQUIRES(g_jit_debug_mutex);
+ REQUIRES(Locks::native_debug_interface_lock_);
// Helper method to track life-time of JITCodeEntry.
// It de-registers given code address as being described by the given entry.
void DecrementJITCodeEntryRefcount(JITCodeEntry* entry, uintptr_t code_address)
- REQUIRES(g_jit_debug_mutex);
+ REQUIRES(Locks::native_debug_interface_lock_);
// Find the registered JITCodeEntry for given code address.
// There can be only one entry per address at any given time.
JITCodeEntry* GetJITCodeEntry(uintptr_t code_address)
- REQUIRES(g_jit_debug_mutex);
+ REQUIRES(Locks::native_debug_interface_lock_);
// Returns approximate memory used by all JITCodeEntries.
size_t GetJITCodeEntryMemUsage()
- REQUIRES(g_jit_debug_mutex);
+ REQUIRES(Locks::native_debug_interface_lock_);
} // namespace art
diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc
index 7f0447732e..c8c13cb20f 100644
--- a/runtime/jit/jit_code_cache.cc
+++ b/runtime/jit/jit_code_cache.cc
@@ -549,7 +549,7 @@ void JitCodeCache::FreeCode(const void* code_ptr) {
uintptr_t allocation = FromCodeToAllocation(code_ptr);
// Notify native debugger that we are about to remove the code.
// It does nothing if we are not using native debugger.
- MutexLock mu(Thread::Current(), g_jit_debug_mutex);
+ MutexLock mu(Thread::Current(), *Locks::native_debug_interface_lock_);
JITCodeEntry* entry = GetJITCodeEntry(reinterpret_cast<uintptr_t>(code_ptr));
if (entry != nullptr) {
DecrementJITCodeEntryRefcount(entry, reinterpret_cast<uintptr_t>(code_ptr));
@@ -1825,7 +1825,7 @@ void JitCodeCache::FreeData(uint8_t* data) {
void JitCodeCache::Dump(std::ostream& os) {
MutexLock mu(Thread::Current(), lock_);
- MutexLock mu2(Thread::Current(), g_jit_debug_mutex);
+ MutexLock mu2(Thread::Current(), *Locks::native_debug_interface_lock_);
os << "Current JIT code cache size: " << PrettySize(used_memory_for_code_) << "\n"
<< "Current JIT data cache size: " << PrettySize(used_memory_for_data_) << "\n"
<< "Current JIT mini-debug-info size: " << PrettySize(GetJITCodeEntryMemUsage()) << "\n"
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 55e9c390cd..26acef06d6 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -396,6 +396,91 @@ MemMap* MemMap::MapDummy(const char* name, uint8_t* addr, size_t byte_count) {
return new MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */);
}
+template<typename A, typename B>
+static ptrdiff_t PointerDiff(A* a, B* b) {
+ return static_cast<ptrdiff_t>(reinterpret_cast<intptr_t>(a) - reinterpret_cast<intptr_t>(b));
+}
+
+bool MemMap::ReplaceWith(MemMap** source_ptr, /*out*/std::string* error) {
+#if !HAVE_MREMAP_SYSCALL
+ UNUSED(source_ptr);
+ *error = "Cannot perform atomic replace because we are missing the required mremap syscall";
+ return false;
+#else // !HAVE_MREMAP_SYSCALL
+ CHECK(source_ptr != nullptr);
+ CHECK(*source_ptr != nullptr);
+ if (!MemMap::kCanReplaceMapping) {
+ *error = "Unable to perform atomic replace due to runtime environment!";
+ return false;
+ }
+ MemMap* source = *source_ptr;
+ // neither can be reuse.
+ if (source->reuse_ || reuse_) {
+ *error = "One or both mappings is not a real mmap!";
+ return false;
+ }
+ // TODO Support redzones.
+ if (source->redzone_size_ != 0 || redzone_size_ != 0) {
+ *error = "source and dest have different redzone sizes";
+ return false;
+ }
+ // Make sure they have the same offset from the actual mmap'd address
+ if (PointerDiff(BaseBegin(), Begin()) != PointerDiff(source->BaseBegin(), source->Begin())) {
+ *error =
+ "source starts at a different offset from the mmap. Cannot atomically replace mappings";
+ return false;
+ }
+ // mremap doesn't allow the final [start, end] to overlap with the initial [start, end] (it's like
+ // memcpy but the check is explicit and actually done).
+ if (source->BaseBegin() > BaseBegin() &&
+ reinterpret_cast<uint8_t*>(BaseBegin()) + source->BaseSize() >
+ reinterpret_cast<uint8_t*>(source->BaseBegin())) {
+ *error = "destination memory pages overlap with source memory pages";
+ return false;
+ }
+ // Change the protection to match the new location.
+ int old_prot = source->GetProtect();
+ if (!source->Protect(GetProtect())) {
+ *error = "Could not change protections for source to those required for dest.";
+ return false;
+ }
+
+ // Do the mremap.
+ void* res = mremap(/*old_address*/source->BaseBegin(),
+ /*old_size*/source->BaseSize(),
+ /*new_size*/source->BaseSize(),
+ /*flags*/MREMAP_MAYMOVE | MREMAP_FIXED,
+ /*new_address*/BaseBegin());
+ if (res == MAP_FAILED) {
+ int saved_errno = errno;
+ // Wasn't able to move mapping. Change the protection of source back to the original one and
+ // return.
+ source->Protect(old_prot);
+ *error = std::string("Failed to mremap source to dest. Error was ") + strerror(saved_errno);
+ return false;
+ }
+ CHECK(res == BaseBegin());
+
+ // The new base_size is all the pages of the 'source' plus any remaining dest pages. We will unmap
+ // them later.
+ size_t new_base_size = std::max(source->base_size_, base_size_);
+
+ // Delete the old source, don't unmap it though (set reuse) since it is already gone.
+ *source_ptr = nullptr;
+ size_t source_size = source->size_;
+ source->already_unmapped_ = true;
+ delete source;
+ source = nullptr;
+
+ size_ = source_size;
+ base_size_ = new_base_size;
+ // Reduce base_size if needed (this will unmap the extra pages).
+ SetSize(source_size);
+
+ return true;
+#endif // !HAVE_MREMAP_SYSCALL
+}
+
MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr,
size_t byte_count,
int prot,
@@ -499,9 +584,11 @@ MemMap::~MemMap() {
if (!reuse_) {
MEMORY_TOOL_MAKE_UNDEFINED(base_begin_, base_size_);
- int result = munmap(base_begin_, base_size_);
- if (result == -1) {
- PLOG(FATAL) << "munmap failed";
+ if (!already_unmapped_) {
+ int result = munmap(base_begin_, base_size_);
+ if (result == -1) {
+ PLOG(FATAL) << "munmap failed";
+ }
}
}
@@ -523,7 +610,7 @@ MemMap::~MemMap() {
MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin,
size_t base_size, int prot, bool reuse, size_t redzone_size)
: name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
- prot_(prot), reuse_(reuse), redzone_size_(redzone_size) {
+ prot_(prot), reuse_(reuse), already_unmapped_(false), redzone_size_(redzone_size) {
if (size_ == 0) {
CHECK(begin_ == nullptr);
CHECK(base_begin_ == nullptr);
@@ -794,19 +881,21 @@ void MemMap::Shutdown() {
}
void MemMap::SetSize(size_t new_size) {
- if (new_size == base_size_) {
+ CHECK_LE(new_size, size_);
+ size_t new_base_size = RoundUp(new_size + static_cast<size_t>(PointerDiff(Begin(), BaseBegin())),
+ kPageSize);
+ if (new_base_size == base_size_) {
+ size_ = new_size;
return;
}
- CHECK_ALIGNED(new_size, kPageSize);
- CHECK_EQ(base_size_, size_) << "Unsupported";
- CHECK_LE(new_size, base_size_);
+ CHECK_LT(new_base_size, base_size_);
MEMORY_TOOL_MAKE_UNDEFINED(
reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) +
- new_size),
- base_size_ - new_size);
- CHECK_EQ(munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + new_size),
- base_size_ - new_size), 0) << new_size << " " << base_size_;
- base_size_ = new_size;
+ new_base_size),
+ base_size_ - new_base_size);
+ CHECK_EQ(munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(BaseBegin()) + new_base_size),
+ base_size_ - new_base_size), 0) << new_base_size << " " << base_size_;
+ base_size_ = new_base_size;
size_ = new_size;
}
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 5603963eac..0ecb414614 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -39,8 +39,12 @@ namespace art {
#ifdef __linux__
static constexpr bool kMadviseZeroes = true;
+#define HAVE_MREMAP_SYSCALL true
#else
static constexpr bool kMadviseZeroes = false;
+// We cannot ever perform MemMap::ReplaceWith on non-linux hosts since the syscall is not
+// present.
+#define HAVE_MREMAP_SYSCALL false
#endif
// Used to keep track of mmap segments.
@@ -52,6 +56,32 @@ static constexpr bool kMadviseZeroes = false;
// Otherwise, calls might see uninitialized values.
class MemMap {
public:
+ static constexpr bool kCanReplaceMapping = HAVE_MREMAP_SYSCALL;
+
+ // Replace the data in this memmmap with the data in the memmap pointed to by source. The caller
+ // relinquishes ownership of the source mmap.
+ //
+ // For the call to be successful:
+ // * The range [dest->Begin, dest->Begin() + source->Size()] must not overlap with
+ // [source->Begin(), source->End()].
+ // * Neither source nor dest may be 'reused' mappings (they must own all the pages associated
+ // with them.
+ // * kCanReplaceMapping must be true.
+ // * Neither source nor dest may use manual redzones.
+ // * Both source and dest must have the same offset from the nearest page boundary.
+ // * mremap must succeed when called on the mappings.
+ //
+ // If this call succeeds it will return true and:
+ // * Deallocate *source
+ // * Sets *source to nullptr
+ // * The protection of this will remain the same.
+ // * The size of this will be the size of the source
+ // * The data in this will be the data from source.
+ //
+ // If this call fails it will return false and make no changes to *source or this. The ownership
+ // of the source mmap is returned to the caller.
+ bool ReplaceWith(/*in-out*/MemMap** source, /*out*/std::string* error);
+
// Request an anonymous region of length 'byte_count' and a requested base address.
// Use null as the requested base address if you don't care.
// "reuse" allows re-mapping an address range from an existing mapping.
@@ -246,6 +276,9 @@ class MemMap {
// unmapping.
const bool reuse_;
+ // When already_unmapped_ is true the destructor will not call munmap.
+ bool already_unmapped_;
+
const size_t redzone_size_;
#if USE_ART_LOW_4G_ALLOCATOR
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index a4ebb16d09..3adbf18a7a 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -19,6 +19,7 @@
#include <sys/mman.h>
#include <memory>
+#include <random>
#include "base/memory_tool.h"
#include "base/unix_file/fd_file.h"
@@ -36,6 +37,25 @@ class MemMapTest : public CommonRuntimeTest {
return mem_map->base_size_;
}
+ static bool IsAddressMapped(void* addr) {
+ bool res = msync(addr, 1, MS_SYNC) == 0;
+ if (!res && errno != ENOMEM) {
+ PLOG(FATAL) << "Unexpected error occurred on msync";
+ }
+ return res;
+ }
+
+ static std::vector<uint8_t> RandomData(size_t size) {
+ std::random_device rd;
+ std::uniform_int_distribution<uint8_t> dist;
+ std::vector<uint8_t> res;
+ res.resize(size);
+ for (size_t i = 0; i < size; i++) {
+ res[i] = dist(rd);
+ }
+ return res;
+ }
+
static uint8_t* GetValidMapAddress(size_t size, bool low_4gb) {
// Find a valid map address and unmap it before returning.
std::string error_msg;
@@ -143,6 +163,186 @@ TEST_F(MemMapTest, Start) {
}
#endif
+// We need mremap to be able to test ReplaceMapping at all
+#if HAVE_MREMAP_SYSCALL
+TEST_F(MemMapTest, ReplaceMapping_SameSize) {
+ std::string error_msg;
+ std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
+ nullptr,
+ kPageSize,
+ PROT_READ,
+ false,
+ false,
+ &error_msg));
+ ASSERT_TRUE(dest != nullptr);
+ MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ nullptr,
+ kPageSize,
+ PROT_WRITE | PROT_READ,
+ false,
+ false,
+ &error_msg);
+ ASSERT_TRUE(source != nullptr);
+ void* source_addr = source->Begin();
+ void* dest_addr = dest->Begin();
+ ASSERT_TRUE(IsAddressMapped(source_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+
+ std::vector<uint8_t> data = RandomData(kPageSize);
+ memcpy(source->Begin(), data.data(), data.size());
+
+ ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+
+ ASSERT_FALSE(IsAddressMapped(source_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_TRUE(source == nullptr);
+
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+
+ ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
+}
+
+TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
+ std::string error_msg;
+ std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
+ nullptr,
+ 5 * kPageSize, // Need to make it larger
+ // initially so we know
+ // there won't be mappings
+ // in the way we we move
+ // source.
+ PROT_READ,
+ false,
+ false,
+ &error_msg));
+ ASSERT_TRUE(dest != nullptr);
+ MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ nullptr,
+ 3 * kPageSize,
+ PROT_WRITE | PROT_READ,
+ false,
+ false,
+ &error_msg);
+ ASSERT_TRUE(source != nullptr);
+ uint8_t* source_addr = source->Begin();
+ uint8_t* dest_addr = dest->Begin();
+ ASSERT_TRUE(IsAddressMapped(source_addr));
+
+ // Fill the source with random data.
+ std::vector<uint8_t> data = RandomData(3 * kPageSize);
+ memcpy(source->Begin(), data.data(), data.size());
+
+ // Make the dest smaller so that we know we'll have space.
+ dest->SetSize(kPageSize);
+
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+
+ ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+
+ ASSERT_FALSE(IsAddressMapped(source_addr));
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(3 * kPageSize));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
+ ASSERT_TRUE(source == nullptr);
+
+ ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
+}
+
+TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
+ std::string error_msg;
+ std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
+ nullptr,
+ 3 * kPageSize,
+ PROT_READ,
+ false,
+ false,
+ &error_msg));
+ ASSERT_TRUE(dest != nullptr);
+ MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ nullptr,
+ kPageSize,
+ PROT_WRITE | PROT_READ,
+ false,
+ false,
+ &error_msg);
+ ASSERT_TRUE(source != nullptr);
+ uint8_t* source_addr = source->Begin();
+ uint8_t* dest_addr = dest->Begin();
+ ASSERT_TRUE(IsAddressMapped(source_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(3 * kPageSize));
+
+ std::vector<uint8_t> data = RandomData(kPageSize);
+ memcpy(source->Begin(), data.data(), kPageSize);
+
+ ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+
+ ASSERT_FALSE(IsAddressMapped(source_addr));
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
+ ASSERT_TRUE(source == nullptr);
+
+ ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
+}
+
+TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) {
+ std::string error_msg;
+ std::unique_ptr<MemMap> dest(
+ MemMap::MapAnonymous(
+ "MapAnonymousEmpty-atomic-replace-dest",
+ nullptr,
+ 3 * kPageSize, // Need to make it larger initially so we know there won't be mappings in
+ // the way we we move source.
+ PROT_READ | PROT_WRITE,
+ false,
+ false,
+ &error_msg));
+ ASSERT_TRUE(dest != nullptr);
+ // Resize down to 1 page so we can remap the rest.
+ dest->SetSize(kPageSize);
+ // Create source from the last 2 pages
+ MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+ dest->Begin() + kPageSize,
+ 2 * kPageSize,
+ PROT_WRITE | PROT_READ,
+ false,
+ false,
+ &error_msg);
+ ASSERT_TRUE(source != nullptr);
+ MemMap* orig_source = source;
+ ASSERT_EQ(dest->Begin() + kPageSize, source->Begin());
+ uint8_t* source_addr = source->Begin();
+ uint8_t* dest_addr = dest->Begin();
+ ASSERT_TRUE(IsAddressMapped(source_addr));
+
+ // Fill the source and dest with random data.
+ std::vector<uint8_t> data = RandomData(2 * kPageSize);
+ memcpy(source->Begin(), data.data(), data.size());
+ std::vector<uint8_t> dest_data = RandomData(kPageSize);
+ memcpy(dest->Begin(), dest_data.data(), dest_data.size());
+
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+
+ ASSERT_FALSE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+
+ ASSERT_TRUE(source == orig_source);
+ ASSERT_TRUE(IsAddressMapped(source_addr));
+ ASSERT_TRUE(IsAddressMapped(dest_addr));
+ ASSERT_EQ(source->Size(), data.size());
+ ASSERT_EQ(dest->Size(), dest_data.size());
+
+ ASSERT_EQ(memcmp(source->Begin(), data.data(), data.size()), 0);
+ ASSERT_EQ(memcmp(dest->Begin(), dest_data.data(), dest_data.size()), 0);
+
+ delete source;
+}
+#endif // HAVE_MREMAP_SYSCALL
+
TEST_F(MemMapTest, MapAnonymousEmpty) {
CommonInit();
std::string error_msg;
diff --git a/runtime/native/dalvik_system_DexFile.cc b/runtime/native/dalvik_system_DexFile.cc
index 0f430874cf..6ea9a7ad62 100644
--- a/runtime/native/dalvik_system_DexFile.cc
+++ b/runtime/native/dalvik_system_DexFile.cc
@@ -30,6 +30,7 @@
#include "dex/art_dex_file_loader.h"
#include "dex/dex_file-inl.h"
#include "dex/dex_file_loader.h"
+#include "jit/debugger_interface.h"
#include "jni_internal.h"
#include "mirror/class_loader.h"
#include "mirror/object-inl.h"
@@ -331,6 +332,7 @@ static jboolean DexFile_closeDexFile(JNIEnv* env, jclass, jobject cookie) {
int32_t i = kDexFileIndexStart; // Oat file is at index 0.
for (const DexFile* dex_file : dex_files) {
if (dex_file != nullptr) {
+ DeregisterDexFileForNative(soa.Self(), dex_file->Begin());
// Only delete the dex file if the dex cache is not found to prevent runtime crashes if there
// are calls to DexFile.close while the ART DexFile is still in use.
if (!class_linker->IsDexFileRegistered(soa.Self(), *dex_file)) {
diff --git a/runtime/native/dalvik_system_ZygoteHooks.cc b/runtime/native/dalvik_system_ZygoteHooks.cc
index e58fd9dac9..648a464b6e 100644
--- a/runtime/native/dalvik_system_ZygoteHooks.cc
+++ b/runtime/native/dalvik_system_ZygoteHooks.cc
@@ -350,6 +350,9 @@ static void ZygoteHooks_nativePostForkChild(JNIEnv* env,
<< "SystemServer should be forked with DISABLE_HIDDEN_API_CHECKS";
Runtime::Current()->SetHiddenApiChecksEnabled(do_hidden_api_checks);
+ // Clear the hidden API warning flag, in case it was set.
+ Runtime::Current()->SetPendingHiddenApiWarning(false);
+
if (instruction_set != nullptr && !is_system_server) {
ScopedUtfChars isa_string(env, instruction_set);
InstructionSet isa = GetInstructionSetFromString(isa_string.c_str());
diff --git a/runtime/native_stack_dump.cc b/runtime/native_stack_dump.cc
index 2fef70b2ae..099d77edaa 100644
--- a/runtime/native_stack_dump.cc
+++ b/runtime/native_stack_dump.cc
@@ -393,6 +393,10 @@ void DumpKernelStack(std::ostream& os, pid_t tid, const char* prefix, bool inclu
std::vector<std::string> kernel_stack_frames;
Split(kernel_stack, '\n', &kernel_stack_frames);
+ if (kernel_stack_frames.empty()) {
+ os << prefix << "(" << kernel_stack_filename << " is empty)\n";
+ return;
+ }
// We skip the last stack frame because it's always equivalent to "[<ffffffff>] 0xffffffff",
// which looking at the source appears to be the kernel's way of saying "that's all, folks!".
kernel_stack_frames.pop_back();
diff --git a/runtime/quicken_info.h b/runtime/quicken_info.h
index 52eca61c06..32f70054ba 100644
--- a/runtime/quicken_info.h
+++ b/runtime/quicken_info.h
@@ -50,16 +50,17 @@ class QuickenInfoOffsetTableAccessor {
return index % kElementsPerIndex == 0;
}
- explicit QuickenInfoOffsetTableAccessor(const uint8_t* data, uint32_t max_index)
- : table_(reinterpret_cast<const uint32_t*>(data)),
- num_indices_(RoundUp(max_index, kElementsPerIndex) / kElementsPerIndex) {}
+ QuickenInfoOffsetTableAccessor(const ArrayRef<const uint8_t>& data, uint32_t max_index)
+ : table_(ArrayRef<const TableType>::Cast(data).SubArray(
+ 0,
+ RoundUp(max_index, kElementsPerIndex) / kElementsPerIndex)) {}
size_t SizeInBytes() const {
return NumIndices() * sizeof(table_[0]);
}
uint32_t NumIndices() const {
- return num_indices_;
+ return table_.size();
}
// Returns the offset for the index at or before the desired index. If the offset is for an index
@@ -69,17 +70,12 @@ class QuickenInfoOffsetTableAccessor {
return table_[index / kElementsPerIndex];
}
- const uint8_t* DataEnd() const {
- return reinterpret_cast<const uint8_t*>(table_ + NumIndices());
- }
-
static uint32_t Alignment() {
return alignof(TableType);
}
private:
- const TableType* table_;
- uint32_t num_indices_;
+ const ArrayRef<const TableType> table_;
};
// QuickenInfoTable is a table of 16 bit dex indices. There is one slot for every instruction that
diff --git a/runtime/runtime.cc b/runtime/runtime.cc
index 5a3a6f0cf4..3afd320f05 100644
--- a/runtime/runtime.cc
+++ b/runtime/runtime.cc
@@ -268,6 +268,7 @@ Runtime::Runtime()
do_hidden_api_checks_(true),
pending_hidden_api_warning_(false),
dedupe_hidden_api_warnings_(true),
+ always_set_hidden_api_warning_flag_(false),
dump_native_stack_on_sig_quit_(true),
pruned_dalvik_cache_(false),
// Initially assume we perceive jank in case the process state is never updated.
@@ -1171,11 +1172,15 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) {
target_sdk_version_ = runtime_options.GetOrDefault(Opt::TargetSdkVersion);
- // Check whether to enforce hidden API access checks. Zygote needs to be exempt
- // but checks may be enabled for forked processes (see dalvik_system_ZygoteHooks).
- if (is_zygote_ || runtime_options.Exists(Opt::NoHiddenApiChecks)) {
- do_hidden_api_checks_ = false;
- }
+ // Check whether to enforce hidden API access checks. The checks are disabled
+ // by default and we only enable them if:
+ // (a) runtime was started with a flag that enables the checks, or
+ // (b) Zygote forked a new process that is not exempt (see ZygoteHooks).
+ // TODO(dbrazdil): Turn the NoHiddenApiChecks negative flag into a positive one
+ // to clean up this logic.
+ do_hidden_api_checks_ = IsAotCompiler() && !runtime_options.Exists(Opt::NoHiddenApiChecks);
+ DCHECK(!is_zygote_ || !do_hidden_api_checks_)
+ << "Zygote should not be started with hidden API checks";
no_sig_chain_ = runtime_options.Exists(Opt::NoSigChain);
force_native_bridge_ = runtime_options.Exists(Opt::ForceNativeBridge);
diff --git a/runtime/runtime.h b/runtime/runtime.h
index 184e4e5b91..7ab9be5c5b 100644
--- a/runtime/runtime.h
+++ b/runtime/runtime.h
@@ -544,6 +544,14 @@ class Runtime {
return dedupe_hidden_api_warnings_;
}
+ void AlwaysSetHiddenApiWarningFlag() {
+ always_set_hidden_api_warning_flag_ = true;
+ }
+
+ bool ShouldAlwaysSetHiddenApiWarningFlag() const {
+ return always_set_hidden_api_warning_flag_;
+ }
+
bool IsDexFileFallbackEnabled() const {
return allow_dex_file_fallback_;
}
@@ -992,6 +1000,11 @@ class Runtime {
// This is only used for testing.
bool dedupe_hidden_api_warnings_;
+ // Hidden API can print warnings into the log and/or set a flag read by the
+ // framework to show a UI warning. If this flag is set, always set the flag
+ // when there is a warning. This is only used for testing.
+ bool always_set_hidden_api_warning_flag_;
+
// Whether threads should dump their native stack on SIGQUIT.
bool dump_native_stack_on_sig_quit_;
diff --git a/runtime/vdex_file.cc b/runtime/vdex_file.cc
index 7428e98dbb..0829c5422e 100644
--- a/runtime/vdex_file.cc
+++ b/runtime/vdex_file.cc
@@ -228,8 +228,7 @@ QuickenInfoOffsetTableAccessor VdexFile::GetQuickenInfoOffsetTable(
const ArrayRef<const uint8_t>& quickening_info) const {
// The offset a is in preheader right before the dex file.
const uint32_t offset = GetQuickeningInfoTableOffset(source_dex_begin);
- const uint8_t* data_ptr = quickening_info.data() + offset;
- return QuickenInfoOffsetTableAccessor(data_ptr, num_method_ids);
+ return QuickenInfoOffsetTableAccessor(quickening_info.SubArray(offset), num_method_ids);
}
QuickenInfoOffsetTableAccessor VdexFile::GetQuickenInfoOffsetTable(
diff --git a/test/137-cfi/cfi.cc b/test/137-cfi/cfi.cc
index bdfb44a87e..83234f0382 100644
--- a/test/137-cfi/cfi.cc
+++ b/test/137-cfi/cfi.cc
@@ -83,7 +83,7 @@ static bool CheckStack(Backtrace* bt, const std::vector<std::string>& seq) {
printf("Cannot find %s in backtrace:\n", seq[cur_search_index].c_str());
for (Backtrace::const_iterator it = bt->begin(); it != bt->end(); ++it) {
if (BacktraceMap::IsValid(it->map)) {
- printf(" %s\n", it->func_name.c_str());
+ printf(" %s\n", Backtrace::FormatFrameData(&*it).c_str());
}
}
@@ -130,6 +130,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_unwindInProcess(
"Java_Main_unwindInProcess", // This function.
"Main.unwindInProcess", // The corresponding Java native method frame.
"java.util.Arrays.binarySearch0", // Framework method.
+ "Base.runBase", // Method in other dex file.
"Main.main" // The Java entry method.
};
@@ -230,6 +231,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_unwindOtherProcess(
// resolved, so don't look for it right now.
"Main.sleep", // The corresponding Java native method frame.
"java.util.Arrays.binarySearch0", // Framework method.
+ "Base.runBase", // Method in other dex file.
"Main.main" // The Java entry method.
};
diff --git a/test/137-cfi/src-multidex/Base.java b/test/137-cfi/src-multidex/Base.java
new file mode 100644
index 0000000000..d3f8a5681d
--- /dev/null
+++ b/test/137-cfi/src-multidex/Base.java
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+public abstract class Base {
+ abstract public void runImpl();
+ public void runBase() {
+ runImpl();
+ }
+}
diff --git a/test/137-cfi/src/Main.java b/test/137-cfi/src/Main.java
index 1ec70722b5..9a2e352b8c 100644
--- a/test/137-cfi/src/Main.java
+++ b/test/137-cfi/src/Main.java
@@ -20,7 +20,7 @@ import java.io.InputStreamReader;
import java.util.Arrays;
import java.util.Comparator;
-public class Main implements Comparator<Main> {
+public class Main extends Base implements Comparator<Main> {
// Whether to test local unwinding.
private boolean testLocal;
@@ -57,10 +57,10 @@ public class Main implements Comparator<Main> {
}
public static void main(String[] args) throws Exception {
- new Main(args).run();
+ new Main(args).runBase();
}
- private void run() {
+ public void runImpl() {
if (secondary) {
if (!testRemote) {
throw new RuntimeException("Should not be running secondary!");
diff --git a/test/530-checker-lse/src/Main.java b/test/530-checker-lse/src/Main.java
index f6332b5503..98838c5089 100644
--- a/test/530-checker-lse/src/Main.java
+++ b/test/530-checker-lse/src/Main.java
@@ -398,7 +398,6 @@ public class Main {
/// CHECK-START: int Main.test15() load_store_elimination (after)
/// CHECK: <<Const2:i\d+>> IntConstant 2
/// CHECK: StaticFieldSet
- /// CHECK: StaticFieldSet
/// CHECK-NOT: StaticFieldGet
/// CHECK: Return [<<Const2>>]
@@ -773,6 +772,127 @@ public class Main {
return obj;
}
+ /// CHECK-START: void Main.testStoreStore2(TestClass2) load_store_elimination (before)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+
+ /// CHECK-START: void Main.testStoreStore2(TestClass2) load_store_elimination (after)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldSet
+
+ private static void testStoreStore2(TestClass2 obj) {
+ obj.i = 41;
+ obj.j = 42;
+ obj.i = 43;
+ obj.j = 44;
+ }
+
+ /// CHECK-START: void Main.testStoreStore3(TestClass2, boolean) load_store_elimination (before)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+
+ /// CHECK-START: void Main.testStoreStore3(TestClass2, boolean) load_store_elimination (after)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldSet
+ /// CHECK-NOT: InstanceFieldSet
+
+ private static void testStoreStore3(TestClass2 obj, boolean flag) {
+ obj.i = 41;
+ obj.j = 42; // redundant since it's overwritten in both branches below.
+ if (flag) {
+ obj.j = 43;
+ } else {
+ obj.j = 44;
+ }
+ }
+
+ /// CHECK-START: void Main.testStoreStore4() load_store_elimination (before)
+ /// CHECK: StaticFieldSet
+ /// CHECK: StaticFieldSet
+
+ /// CHECK-START: void Main.testStoreStore4() load_store_elimination (after)
+ /// CHECK: StaticFieldSet
+ /// CHECK-NOT: StaticFieldSet
+
+ private static void testStoreStore4() {
+ TestClass.si = 61;
+ TestClass.si = 62;
+ }
+
+ /// CHECK-START: int Main.testStoreStore5(TestClass2, TestClass2) load_store_elimination (before)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: InstanceFieldSet
+
+ /// CHECK-START: int Main.testStoreStore5(TestClass2, TestClass2) load_store_elimination (after)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: InstanceFieldSet
+
+ private static int testStoreStore5(TestClass2 obj1, TestClass2 obj2) {
+ obj1.i = 71; // This store is needed since obj2.i may load from it.
+ int i = obj2.i;
+ obj1.i = 72;
+ return i;
+ }
+
+ /// CHECK-START: int Main.testStoreStore6(TestClass2, TestClass2) load_store_elimination (before)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: InstanceFieldSet
+
+ /// CHECK-START: int Main.testStoreStore6(TestClass2, TestClass2) load_store_elimination (after)
+ /// CHECK-NOT: InstanceFieldSet
+ /// CHECK: InstanceFieldGet
+ /// CHECK: InstanceFieldSet
+
+ private static int testStoreStore6(TestClass2 obj1, TestClass2 obj2) {
+ obj1.i = 81; // This store is not needed since obj2.j cannot load from it.
+ int j = obj2.j;
+ obj1.i = 82;
+ return j;
+ }
+
+ /// CHECK-START: int Main.testNoSideEffects(int[]) load_store_elimination (before)
+ /// CHECK: ArraySet
+ /// CHECK: ArraySet
+ /// CHECK: ArraySet
+ /// CHECK: ArrayGet
+
+ /// CHECK-START: int Main.testNoSideEffects(int[]) load_store_elimination (after)
+ /// CHECK: ArraySet
+ /// CHECK: ArraySet
+ /// CHECK-NOT: ArraySet
+ /// CHECK-NOT: ArrayGet
+
+ private static int testNoSideEffects(int[] array) {
+ array[0] = 101;
+ array[1] = 102;
+ int bitCount = Integer.bitCount(0x3456);
+ array[1] = 103;
+ return array[0] + bitCount;
+ }
+
+ /// CHECK-START: void Main.testThrow(TestClass2, java.lang.Exception) load_store_elimination (before)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: Throw
+
+ /// CHECK-START: void Main.testThrow(TestClass2, java.lang.Exception) load_store_elimination (after)
+ /// CHECK: InstanceFieldSet
+ /// CHECK: Throw
+
+ // Make sure throw keeps the store.
+ private static void testThrow(TestClass2 obj, Exception e) throws Exception {
+ obj.i = 55;
+ throw e;
+ }
+
/// CHECK-START: int Main.testStoreStoreWithDeoptimize(int[]) load_store_elimination (before)
/// CHECK: NewInstance
/// CHECK: InstanceFieldSet
@@ -814,23 +934,6 @@ public class Main {
return arr[0] + arr[1] + arr[2] + arr[3];
}
- /// CHECK-START: int Main.testNoSideEffects(int[]) load_store_elimination (before)
- /// CHECK: ArraySet
- /// CHECK: ArraySet
- /// CHECK: ArrayGet
-
- /// CHECK-START: int Main.testNoSideEffects(int[]) load_store_elimination (after)
- /// CHECK: ArraySet
- /// CHECK: ArraySet
- /// CHECK-NOT: ArrayGet
-
- private static int testNoSideEffects(int[] array) {
- array[0] = 101;
- int bitCount = Integer.bitCount(0x3456);
- array[1] = array[0] + 1;
- return array[0] + bitCount;
- }
-
/// CHECK-START: double Main.getCircleArea(double, boolean) load_store_elimination (before)
/// CHECK: NewInstance
@@ -1105,16 +1208,46 @@ public class Main {
assertIntEquals(testStoreStore().i, 41);
assertIntEquals(testStoreStore().j, 43);
- assertIntEquals(testStoreStoreWithDeoptimize(new int[4]), 4);
assertIntEquals(testExitMerge(true), 2);
assertIntEquals(testExitMerge2(true), 2);
assertIntEquals(testExitMerge2(false), 2);
- int ret = testNoSideEffects(iarray);
+ TestClass2 testclass2 = new TestClass2();
+ testStoreStore2(testclass2);
+ assertIntEquals(testclass2.i, 43);
+ assertIntEquals(testclass2.j, 44);
+
+ testStoreStore3(testclass2, true);
+ assertIntEquals(testclass2.i, 41);
+ assertIntEquals(testclass2.j, 43);
+ testStoreStore3(testclass2, false);
+ assertIntEquals(testclass2.i, 41);
+ assertIntEquals(testclass2.j, 44);
+
+ testStoreStore4();
+ assertIntEquals(TestClass.si, 62);
+
+ int ret = testStoreStore5(testclass2, testclass2);
+ assertIntEquals(testclass2.i, 72);
+ assertIntEquals(ret, 71);
+
+ testclass2.j = 88;
+ ret = testStoreStore6(testclass2, testclass2);
+ assertIntEquals(testclass2.i, 82);
+ assertIntEquals(ret, 88);
+
+ ret = testNoSideEffects(iarray);
assertIntEquals(iarray[0], 101);
- assertIntEquals(iarray[1], 102);
+ assertIntEquals(iarray[1], 103);
assertIntEquals(ret, 108);
+
+ try {
+ testThrow(testclass2, new Exception());
+ } catch (Exception e) {}
+ assertIntEquals(testclass2.i, 55);
+
+ assertIntEquals(testStoreStoreWithDeoptimize(new int[4]), 4);
}
static boolean sFlag;
diff --git a/test/530-regression-lse/expected.txt b/test/530-regression-lse/expected.txt
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/530-regression-lse/expected.txt
diff --git a/test/530-regression-lse/info.txt b/test/530-regression-lse/info.txt
new file mode 100644
index 0000000000..688d0c8d1b
--- /dev/null
+++ b/test/530-regression-lse/info.txt
@@ -0,0 +1,2 @@
+Regression test (b/72440777) for load store elimination across invocation
+that has only write side effects.
diff --git a/test/530-regression-lse/src/Main.java b/test/530-regression-lse/src/Main.java
new file mode 100644
index 0000000000..7aec21c86a
--- /dev/null
+++ b/test/530-regression-lse/src/Main.java
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.io.File;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.MappedByteBuffer;
+import java.nio.channels.FileChannel;
+
+public class Main {
+ public static void assertEquals(int expected, int actual) {
+ if (expected != actual) {
+ throw new Error("Assertion failed: " + expected + " != " + actual);
+ }
+ }
+
+ private static void testRelativePositions(ByteBuffer b) throws Exception {
+ // This goes into Memory.pokeByte(), which is an intrinsic that has
+ // kWriteSideEffects. Stores before this call need to be kept.
+ b.put((byte) 0);
+ assertEquals(1, b.position());
+ }
+
+ private static ByteBuffer allocateMapped(int size) throws Exception {
+ File f = File.createTempFile("mapped", "tmp");
+ f.deleteOnExit();
+ RandomAccessFile raf = new RandomAccessFile(f, "rw");
+ raf.setLength(size);
+ FileChannel ch = raf.getChannel();
+ MappedByteBuffer result = ch.map(FileChannel.MapMode.READ_WRITE, 0, size);
+ ch.close();
+ return result;
+ }
+
+ public static void testRelativePositionsMapped() throws Exception {
+ testRelativePositions(allocateMapped(10));
+ }
+
+ public static void main(String[] args) throws Exception {
+ testRelativePositionsMapped();
+ }
+}
diff --git a/test/603-checker-instanceof/src/Main.java b/test/603-checker-instanceof/src/Main.java
index 1487969c03..2c97bedbaa 100644
--- a/test/603-checker-instanceof/src/Main.java
+++ b/test/603-checker-instanceof/src/Main.java
@@ -59,7 +59,7 @@ public class Main {
/// CHECK: InstanceOf check_kind:exact_check
/// CHECK-NOT: {{.*gs:.*}}
- /// CHECK-START-{ARM,ARM64}: boolean Main.$noinline$instanceOfString(java.lang.Object) disassembly (after)
+ /// CHECK-START-{ARM,ARM64,MIPS,MIPS64}: boolean Main.$noinline$instanceOfString(java.lang.Object) disassembly (after)
/// CHECK: InstanceOf check_kind:exact_check
// For ARM and ARM64, the marking register (r8 and x20, respectively) can be used in
// non-CC configs for any other purpose, so we'd need a config-specific checker test.
diff --git a/test/608-checker-unresolved-lse/src/Main.java b/test/608-checker-unresolved-lse/src/Main.java
index c6f8854b49..a39dd51bdf 100644
--- a/test/608-checker-unresolved-lse/src/Main.java
+++ b/test/608-checker-unresolved-lse/src/Main.java
@@ -88,7 +88,6 @@ public class Main extends MissingSuperClass {
/// CHECK-START: void Main.staticFieldTest() load_store_elimination (after)
/// CHECK: StaticFieldSet
- /// CHECK: StaticFieldSet
/// CHECK: UnresolvedStaticFieldGet
public static void staticFieldTest() {
// Ensure Foo is initialized.
diff --git a/test/639-checker-code-sinking/expected.txt b/test/639-checker-code-sinking/expected.txt
index 52e756c231..5d4833aca8 100644
--- a/test/639-checker-code-sinking/expected.txt
+++ b/test/639-checker-code-sinking/expected.txt
@@ -1,3 +1,3 @@
0
class java.lang.Object
-43
+42
diff --git a/test/639-checker-code-sinking/src/Main.java b/test/639-checker-code-sinking/src/Main.java
index 7496925adc..a1c30f7b4e 100644
--- a/test/639-checker-code-sinking/src/Main.java
+++ b/test/639-checker-code-sinking/src/Main.java
@@ -337,7 +337,7 @@ public class Main {
public static void testStoreStore(boolean doThrow) {
Main m = new Main();
m.intField = 42;
- m.intField = 43;
+ m.intField2 = 43;
if (doThrow) {
throw new Error(m.$opt$noinline$toString());
}
@@ -349,6 +349,7 @@ public class Main {
volatile int volatileField;
int intField;
+ int intField2;
Object objectField;
static boolean doThrow;
static boolean doLoop;
diff --git a/test/672-checker-throw-method/src/Main.java b/test/672-checker-throw-method/src/Main.java
index ceb5eb784c..a507133b91 100644
--- a/test/672-checker-throw-method/src/Main.java
+++ b/test/672-checker-throw-method/src/Main.java
@@ -37,6 +37,12 @@ public class Main {
doThrow(par);
}
+ static private void checkNotNullSplitAlt(Object obj, String par) {
+ if (obj != null)
+ return;
+ doThrow(par);
+ }
+
//
// Various ways of enforcing non-null parameter.
// In all cases, par should be subject to code sinking.
@@ -174,6 +180,60 @@ public class Main {
}
//
+ // Various ways of exploiting non-null parameter.
+ // In all cases, implicit null checks are redundant.
+ //
+
+ /// CHECK-START: int Main.deleteNullCheck(int[]) dead_code_elimination$after_inlining (before)
+ /// CHECK: <<Par:l\d+>> ParameterValue
+ /// CHECK: <<Zero:i\d+>> IntConstant 0
+ /// CHECK: <<Null:l\d+>> NullCheck [<<Par>>]
+ /// CHECK: <<Len:i\d+>> ArrayLength [<<Null>>]
+ /// CHECK: <<Check:i\d+>> BoundsCheck [<<Zero>>,<<Len>>]
+ /// CHECK: <<Get:i\d+>> ArrayGet [<<Null>>,<<Check>>]
+ /// CHECK: Return [<<Get>>]
+ //
+ /// CHECK-START: int Main.deleteNullCheck(int[]) dead_code_elimination$after_inlining (after)
+ /// CHECK: <<Par:l\d+>> ParameterValue
+ /// CHECK: <<Zero:i\d+>> IntConstant 0
+ /// CHECK: <<BT:l\d+>> BoundType [<<Par>>]
+ /// CHECK: <<Len:i\d+>> ArrayLength [<<BT>>]
+ /// CHECK: <<Check:i\d+>> BoundsCheck [<<Zero>>,<<Len>>]
+ /// CHECK: <<Get:i\d+>> ArrayGet [<<BT>>,<<Check>>]
+ /// CHECK: Return [<<Get>>]
+ //
+ /// CHECK-START: int Main.deleteNullCheck(int[]) dead_code_elimination$after_inlining (after)
+ /// CHECK-NOT: NullCheck
+ static public int deleteNullCheck(int[] a) {
+ checkNotNullSplit(a, "a");
+ return a[0];
+ }
+
+ /// CHECK-START: int Main.deleteNullCheckAlt(int[]) dead_code_elimination$after_inlining (before)
+ /// CHECK: NullCheck
+ //
+ /// CHECK-START: int Main.deleteNullCheckAlt(int[]) dead_code_elimination$after_inlining (after)
+ /// CHECK-NOT: NullCheck
+ static public int deleteNullCheckAlt(int[] a) {
+ checkNotNullSplitAlt(a, "a");
+ return a[0];
+ }
+
+ /// CHECK-START: int Main.deleteNullChecks3(int[], int[], int[]) dead_code_elimination$after_inlining (before)
+ /// CHECK: NullCheck
+ /// CHECK: NullCheck
+ /// CHECK: NullCheck
+ //
+ /// CHECK-START: int Main.deleteNullChecks3(int[], int[], int[]) dead_code_elimination$after_inlining (after)
+ /// CHECK-NOT: NullCheck
+ static public int deleteNullChecks3(int[] a, int[] b, int[] c) {
+ checkNotNullSplit(a, "a");
+ checkNotNullSplit(b, "b");
+ checkNotNullSplit(c, "c");
+ return a[0] + b[0] + c[0];
+ }
+
+ //
// Test driver.
//
@@ -233,6 +293,18 @@ public class Main {
expectEquals(5, a[i]);
}
+ int[] x = { 11 } ;
+ expectEquals(11, deleteNullCheck(x));
+ int[] y = { 55 } ;
+ int[] z = { 22 } ;
+ expectEquals(88, deleteNullChecks3(x, y, z));
+
+ try {
+ deleteNullCheck(null);
+ System.out.println("should not reach this!");
+ } catch (Error e) {
+ }
+
System.out.println("passed");
}
diff --git a/test/674-hiddenapi/hiddenapi.cc b/test/674-hiddenapi/hiddenapi.cc
index baff6f758d..effa37ade4 100644
--- a/test/674-hiddenapi/hiddenapi.cc
+++ b/test/674-hiddenapi/hiddenapi.cc
@@ -26,8 +26,10 @@ namespace art {
namespace Test674HiddenApi {
extern "C" JNIEXPORT void JNICALL Java_Main_init(JNIEnv*, jclass) {
- Runtime::Current()->SetHiddenApiChecksEnabled(true);
- Runtime::Current()->SetDedupeHiddenApiWarnings(false);
+ Runtime* runtime = Runtime::Current();
+ runtime->SetHiddenApiChecksEnabled(true);
+ runtime->SetDedupeHiddenApiWarnings(false);
+ runtime->AlwaysSetHiddenApiWarningFlag();
}
extern "C" JNIEXPORT void JNICALL Java_Main_appendToBootClassLoader(
diff --git a/test/710-varhandle-creation/src-art/Main.java b/test/710-varhandle-creation/src-art/Main.java
index a737b5ba9e..246aac6900 100644
--- a/test/710-varhandle-creation/src-art/Main.java
+++ b/test/710-varhandle-creation/src-art/Main.java
@@ -1,21 +1,17 @@
/*
- * Copyright 2017 Google Inc.
+ * Copyright (C) 2018 The Android Open Source Project
*
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation. Google designates this
- * particular file as subject to the "Classpath" exception as provided
- * by Google in the LICENSE file that accompanied this code.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
*
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
+ * http://www.apache.org/licenses/LICENSE-2.0
*
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
*/
import java.lang.invoke.MethodHandles;
diff --git a/test/714-invoke-custom-lambda-metafactory/build b/test/714-invoke-custom-lambda-metafactory/build
new file mode 100644
index 0000000000..b5002ba79c
--- /dev/null
+++ b/test/714-invoke-custom-lambda-metafactory/build
@@ -0,0 +1,22 @@
+#!/bin/bash
+#
+# Copyright 2018 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Make us exit on a failure
+set -e
+
+# Opt-out from desugaring to ensure offending lambda is in the DEX.
+export USE_DESUGAR=false
+./default-build "$@" --experimental method-handles
diff --git a/test/714-invoke-custom-lambda-metafactory/expected.txt b/test/714-invoke-custom-lambda-metafactory/expected.txt
new file mode 100644
index 0000000000..cbe98404c5
--- /dev/null
+++ b/test/714-invoke-custom-lambda-metafactory/expected.txt
@@ -0,0 +1,4 @@
+Exception in thread "main" java.lang.BootstrapMethodError: Exception from call site #0 bootstrap method
+ at Main.main(Main.java:25)
+Caused by: java.lang.NullPointerException: Bootstrap method returned null
+ ... 1 more
diff --git a/test/714-invoke-custom-lambda-metafactory/info.txt b/test/714-invoke-custom-lambda-metafactory/info.txt
new file mode 100644
index 0000000000..4ef117b7e3
--- /dev/null
+++ b/test/714-invoke-custom-lambda-metafactory/info.txt
@@ -0,0 +1 @@
+Checks that ART doesn't crash when it encounters LambdaMetafactory.
diff --git a/test/714-invoke-custom-lambda-metafactory/src/Main.java b/test/714-invoke-custom-lambda-metafactory/src/Main.java
new file mode 100644
index 0000000000..74e0ad4370
--- /dev/null
+++ b/test/714-invoke-custom-lambda-metafactory/src/Main.java
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2018 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+import java.util.Arrays;
+import java.util.List;
+import java.util.Optional;
+
+public class Main {
+ public static void main(String[] args) {
+ int requiredLength = 3;
+ List<String> list = Arrays.asList("A", "B", "C", "D", "EEE");
+ Optional<String> result = list.stream().filter(x -> x.length() >= requiredLength).findAny();
+ if (result.isPresent()) {
+ System.out.println("Result is " + result.get());
+ } else {
+ System.out.println("Result is not there.");
+ }
+ }
+}
diff --git a/test/956-methodhandles/src/Main.java b/test/956-methodhandles/src/Main.java
index cb06e4263e..1ddef03d62 100644
--- a/test/956-methodhandles/src/Main.java
+++ b/test/956-methodhandles/src/Main.java
@@ -22,6 +22,7 @@ import java.lang.invoke.MethodType;
import java.lang.invoke.WrongMethodTypeException;
import java.lang.reflect.Constructor;
import java.lang.reflect.Field;
+import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
@@ -79,6 +80,7 @@ public class Main {
testVariableArity();
testVariableArity_MethodHandles_bind();
testRevealDirect();
+ testReflectiveCalls();
}
public static void testfindSpecial_invokeSuperBehaviour() throws Throwable {
@@ -1624,4 +1626,20 @@ public class Main {
assertEquals(field, info.reflectAs(Field.class, MethodHandles.lookup()));
assertEquals(MethodType.methodType(String.class), info.getMethodType());
}
+
+ public static void testReflectiveCalls() throws Throwable {
+ String[] methodNames = { "invoke", "invokeExact" };
+ for (String methodName : methodNames) {
+ Method invokeMethod = MethodHandle.class.getMethod(methodName, Object[].class);
+ MethodHandle instance =
+ MethodHandles.lookup().findVirtual(java.io.PrintStream.class, "println",
+ MethodType.methodType(void.class, String.class));
+ try {
+ invokeMethod.invoke(instance, new Object[] { new Object[] { Integer.valueOf(1) } } );
+ fail();
+ } catch (InvocationTargetException ite) {
+ assertEquals(ite.getCause().getClass(), UnsupportedOperationException.class);
+ }
+ }
+ }
}
diff --git a/test/983-source-transform-verify/source_transform.cc b/test/983-source-transform-verify/source_transform.cc
index c076d1521f..dfefce207b 100644
--- a/test/983-source-transform-verify/source_transform.cc
+++ b/test/983-source-transform-verify/source_transform.cc
@@ -67,6 +67,14 @@ void JNICALL CheckDexFileHook(jvmtiEnv* jvmti_env ATTRIBUTE_UNUSED,
if (IsJVM()) {
return;
}
+
+ // Due to b/72402467 the class_data_len might just be an estimate.
+ CHECK_GE(static_cast<size_t>(class_data_len), sizeof(DexFile::Header));
+ const DexFile::Header* header = reinterpret_cast<const DexFile::Header*>(class_data);
+ uint32_t header_file_size = header->file_size_;
+ CHECK_LE(static_cast<jint>(header_file_size), class_data_len);
+ class_data_len = static_cast<jint>(header_file_size);
+
const ArtDexFileLoader dex_file_loader;
std::string error;
std::unique_ptr<const DexFile> dex(dex_file_loader.Open(class_data,
diff --git a/test/ManyMethods/ManyMethods.java b/test/ManyMethods/ManyMethods.java
index b3a57f6b3b..98b9faffcd 100644
--- a/test/ManyMethods/ManyMethods.java
+++ b/test/ManyMethods/ManyMethods.java
@@ -26,6 +26,8 @@ class ManyMethods {
public static String msg7 = "Hello World7";
public static String msg8 = "Hello World8";
public static String msg9 = "Hello World9";
+ public static String msg10 = "Hello World10";
+ public static String msg11 = "Hello World11";
}
static class Printer {
@@ -57,35 +59,35 @@ class ManyMethods {
}
public static void Print4() {
- Printer.Print(Strings.msg2);
+ Printer.Print(Strings.msg4);
}
public static void Print5() {
- Printer.Print(Strings.msg3);
+ Printer.Print(Strings.msg5);
}
public static void Print6() {
- Printer2.Print(Strings.msg4);
+ Printer2.Print(Strings.msg6);
}
public static void Print7() {
- Printer.Print(Strings.msg5);
+ Printer.Print(Strings.msg7);
}
public static void Print8() {
- Printer.Print(Strings.msg6);
+ Printer.Print(Strings.msg8);
}
public static void Print9() {
- Printer2.Print(Strings.msg7);
+ Printer2.Print(Strings.msg9);
}
public static void Print10() {
- Printer2.Print(Strings.msg8);
+ Printer2.Print(Strings.msg10);
}
public static void Print11() {
- Printer.Print(Strings.msg9);
+ Printer.Print(Strings.msg11);
}
public static void main(String args[]) {
diff --git a/test/common/runtime_state.cc b/test/common/runtime_state.cc
index 298a2f0033..2203bdca01 100644
--- a/test/common/runtime_state.cc
+++ b/test/common/runtime_state.cc
@@ -152,7 +152,14 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_isAotCompiled(JNIEnv* env,
CHECK(chars.c_str() != nullptr);
ArtMethod* method = soa.Decode<mirror::Class>(cls)->FindDeclaredDirectMethodByName(
chars.c_str(), kRuntimePointerSize);
- return method->GetOatMethodQuickCode(kRuntimePointerSize) != nullptr;
+ const void* oat_code = method->GetOatMethodQuickCode(kRuntimePointerSize);
+ if (oat_code == nullptr) {
+ return false;
+ }
+ const void* actual_code = method->GetEntryPointFromQuickCompiledCodePtrSize(kRuntimePointerSize);
+ bool interpreter =
+ Runtime::Current()->GetClassLinker()->ShouldUseInterpreterEntrypoint(method, actual_code);
+ return !interpreter;
}
extern "C" JNIEXPORT jboolean JNICALL Java_Main_hasJitCompiledEntrypoint(JNIEnv* env,
diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar
index ea2d464d24..02438701b8 100755
--- a/test/etc/run-test-jar
+++ b/test/etc/run-test-jar
@@ -849,7 +849,7 @@ if [ "$HOST" = "n" ]; then
fi
# System libraries needed by libarttestd.so
- PUBLIC_LIBS=libart.so:libartd.so:libc++.so:libbacktrace.so:libbase.so:libnativehelper.so
+ PUBLIC_LIBS=libart.so:libartd.so:libc++.so:libbacktrace.so:libdexfile.so:libbase.so:libnativehelper.so
# Create a script with the command. The command can get longer than the longest
# allowed adb command and there is no way to get the exit status from a adb shell
diff --git a/test/knownfailures.json b/test/knownfailures.json
index 83ac068109..2b28409a1f 100644
--- a/test/knownfailures.json
+++ b/test/knownfailures.json
@@ -384,6 +384,12 @@
"variant": "jvmti-stress & jit | redefine-stress & jit"
},
{
+ "test_patterns": ["674-hiddenapi"],
+ "description": ["hiddenapi test is failing with redefine stress cdex"],
+ "bug": "http://b/72610009",
+ "variant": "redefine-stress & cdex-fast"
+ },
+ {
"test_patterns": ["616-cha"],
"description": ["The test assumes a boot image exists."],
"bug": "http://b/34193647",
@@ -425,6 +431,7 @@
},
{
"tests": [
+ "714-invoke-custom-lambda-metafactory",
"950-redefine-intrinsic",
"951-threaded-obsolete",
"952-invoke-custom",
@@ -648,11 +655,6 @@
"description": ["Tests that depend on input-vdex are not supported with compact dex"]
},
{
- "tests": ["137-cfi"],
- "variant": "cdex-fast",
- "description": ["Temporarily disabled until libbacktrace works properly with shared data section"]
- },
- {
"tests": "661-oat-writer-layout",
"variant": "interp-ac | interpreter | jit | no-dex2oat | no-prebuild | no-image | trace | redefine-stress | jvmti-stress",
"description": ["Test is designed to only check --compiler-filter=speed"]
diff --git a/test/testrunner/target_config.py b/test/testrunner/target_config.py
index 414e3e5037..3064c76ffd 100644
--- a/test/testrunner/target_config.py
+++ b/test/testrunner/target_config.py
@@ -327,27 +327,20 @@ target_config = {
'ART_USE_READ_BARRIER' : 'false'
}
},
- 'art-gtest-heap-poisoning': {
- 'make' : 'valgrind-test-art-host64',
- 'env' : {
- 'ART_HEAP_POISONING' : 'true',
- 'ART_USE_READ_BARRIER' : 'false'
- }
- },
# ASAN (host) configurations.
# These configurations need detect_leaks=0 to work in non-setup environments like build bots,
# as our build tools leak. b/37751350
- 'art-gtest-asan': {
+ 'art-gtest-asan': {
'make' : 'test-art-host-gtest',
'env': {
'SANITIZE_HOST' : 'address',
'ASAN_OPTIONS' : 'detect_leaks=0'
}
- },
- 'art-asan': {
+ },
+ 'art-asan': {
'run-test' : ['--interpreter',
'--optimizing',
'--jit'],
@@ -355,7 +348,16 @@ target_config = {
'SANITIZE_HOST' : 'address',
'ASAN_OPTIONS' : 'detect_leaks=0'
}
- },
+ },
+ 'art-gtest-heap-poisoning': {
+ 'make' : 'test-art-host-gtest',
+ 'env' : {
+ 'ART_HEAP_POISONING' : 'true',
+ 'ART_USE_READ_BARRIER' : 'false',
+ 'SANITIZE_HOST' : 'address',
+ 'ASAN_OPTIONS' : 'detect_leaks=0'
+ }
+ },
# ART Golem build targets used by go/lem (continuous ART benchmarking),
# (art-opt-cc is used by default since it mimics the default preopt config),
diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py
index 8a1e06cabf..3d173f5571 100755
--- a/test/testrunner/testrunner.py
+++ b/test/testrunner/testrunner.py
@@ -357,11 +357,12 @@ def run_tests(tests):
while threading.active_count() > 2:
time.sleep(0.1)
return
+ # NB The order of components here should match the order of
+ # components in the regex parser in parse_test_name.
test_name = 'test-art-'
test_name += target + '-run-test-'
test_name += run + '-'
test_name += prebuild + '-'
- test_name += cdex_level + '-'
test_name += compiler + '-'
test_name += relocate + '-'
test_name += trace + '-'
@@ -371,6 +372,7 @@ def run_tests(tests):
test_name += pictest + '-'
test_name += debuggable + '-'
test_name += jvmti + '-'
+ test_name += cdex_level + '-'
test_name += test
test_name += address_size
diff --git a/test/ti-stress/stress.cc b/test/ti-stress/stress.cc
index 6e29e36e82..7fc289faeb 100644
--- a/test/ti-stress/stress.cc
+++ b/test/ti-stress/stress.cc
@@ -809,7 +809,7 @@ extern "C" JNIEXPORT jint JNICALL Agent_OnLoad(JavaVM* vm,
.can_signal_thread = 0,
.can_get_source_file_name = 1,
.can_get_line_numbers = 1,
- .can_get_source_debug_extension = 0,
+ .can_get_source_debug_extension = 1,
.can_access_local_variables = 0,
.can_maintain_original_method_order = 0,
.can_generate_single_step_events = 1,
diff --git a/test/valgrind-suppressions.txt b/test/valgrind-suppressions.txt
index 086a856f51..a97d03c2d4 100644
--- a/test/valgrind-suppressions.txt
+++ b/test/valgrind-suppressions.txt
@@ -75,3 +75,13 @@
process_vm_readv(lvec[...])
fun:process_vm_readv
}
+
+# Suppressions for IsAddressMapped check in MemMapTest
+{
+ MemMapTest_IsAddressMapped
+ Memcheck:Param
+ msync(start)
+ ...
+ fun:_ZN3art10MemMapTest15IsAddressMappedEPv
+ ...
+}
diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh
index 8956e98ed0..a20175531d 100755
--- a/tools/buildbot-build.sh
+++ b/tools/buildbot-build.sh
@@ -74,14 +74,7 @@ if [[ $mode == "host" ]]; then
make_command+=" dx-tests"
mode_suffix="-host"
elif [[ $mode == "target" ]]; then
- # Create dummy hidden API lists which are normally generated by the framework
- # but which we do not have in the buildbot manifest. These are empty because
- # we do not want to enforce these rules in the buildbots anyway.
- hiddenapi_out_dir=${out_dir}/target/common/obj/PACKAGING
- make_command="mkdir -p ${hiddenapi_out_dir} && "
- make_command+="touch ${hiddenapi_out_dir}/hiddenapi-{blacklist,dark-greylist,light-greylist}.txt && "
-
- make_command+="make $j_arg $extra_args $showcommands build-art-target-tests $common_targets"
+ make_command="make $j_arg $extra_args $showcommands build-art-target-tests $common_targets"
make_command+=" libjavacrypto-target libnetd_client-target linker toybox toolbox sh"
make_command+=" ${out_dir}/host/linux-x86/bin/adb libstdc++ "
make_command+=" ${out_dir}/target/product/${TARGET_PRODUCT}/system/etc/public.libraries.txt"
diff --git a/tools/external_oj_libjdwp_art_failures.txt b/tools/external_oj_libjdwp_art_failures.txt
index c96830a592..828c0aac0f 100644
--- a/tools/external_oj_libjdwp_art_failures.txt
+++ b/tools/external_oj_libjdwp_art_failures.txt
@@ -59,6 +59,13 @@
name: "org.apache.harmony.jpda.tests.jdwp.ObjectReference.IsCollectedTest#testIsCollected001"
},
{
+ description: "Test is flaky",
+ result: EXEC_FAILED,
+ bug: 70958370,
+ names: [ "org.apache.harmony.jpda.tests.jdwp.ObjectReference.EnableCollectionTest#testEnableCollection001",
+ "org.apache.harmony.jpda.tests.jdwp.MultiSession.EnableCollectionTest#testEnableCollection001" ]
+},
+{
description: "Test crashes",
result: EXEC_FAILED,
bug: 69591477,
diff --git a/tools/prebuilt_libjdwp_art_failures.txt b/tools/prebuilt_libjdwp_art_failures.txt
index eaa99a3bf8..a9d268de0a 100644
--- a/tools/prebuilt_libjdwp_art_failures.txt
+++ b/tools/prebuilt_libjdwp_art_failures.txt
@@ -114,16 +114,16 @@
name: "org.apache.harmony.jpda.tests.jdwp.DDM.DDMTest#testChunk001"
},
{
- description: "Test crashes",
- result: EXEC_FAILED,
- bug: 69591477,
- name: "org.apache.harmony.jpda.tests.jdwp.VirtualMachine.ExitTest#testExit001"
-},
-{
description: "Test is flakey",
result: EXEC_FAILED,
bug: 70958370,
names: [ "org.apache.harmony.jpda.tests.jdwp.ObjectReference.EnableCollectionTest#testEnableCollection001",
- "org.apache.harmony.jpda.tests.jdwp.MultiSession.EnableCollectionTest.testEnableCollection001" ]
+ "org.apache.harmony.jpda.tests.jdwp.MultiSession.EnableCollectionTest#testEnableCollection001" ]
+},
+{
+ description: "Test crashes",
+ result: EXEC_FAILED,
+ bug: 69591477,
+ name: "org.apache.harmony.jpda.tests.jdwp.VirtualMachine.ExitTest#testExit001"
}
]