diff options
89 files changed, 1719 insertions, 2318 deletions
diff --git a/Android.mk b/Android.mk index 4d9f6225db..2c75accc9d 100644 --- a/Android.mk +++ b/Android.mk @@ -92,7 +92,6 @@ include $(art_path)/dex2oat/Android.mk include $(art_path)/disassembler/Android.mk include $(art_path)/oatdump/Android.mk include $(art_path)/dalvikvm/Android.mk -include $(art_path)/jdwpspy/Android.mk include $(art_build_path)/Android.oat.mk # ART_HOST_DEPENDENCIES depends on Android.executable.mk above for ART_HOST_EXECUTABLES @@ -356,19 +355,19 @@ include $(art_build_path)/Android.cpplint.mk .PHONY: use-art use-art: adb root && sleep 3 - adb shell setprop persist.sys.dalvik.vm.lib libart.so + adb shell setprop persist.sys.dalvik.vm.lib.1 libart.so adb reboot .PHONY: use-artd use-artd: adb root && sleep 3 - adb shell setprop persist.sys.dalvik.vm.lib libartd.so + adb shell setprop persist.sys.dalvik.vm.lib.1 libartd.so adb reboot .PHONY: use-dalvik use-dalvik: adb root && sleep 3 - adb shell setprop persist.sys.dalvik.vm.lib libdvm.so + adb shell setprop persist.sys.dalvik.vm.lib.1 libdvm.so adb reboot ######################################################################## diff --git a/build/Android.common.mk b/build/Android.common.mk index 0f4ade39f1..415d810e80 100644 --- a/build/Android.common.mk +++ b/build/Android.common.mk @@ -182,9 +182,6 @@ ART_HOST_CFLAGS += -Wthread-safety # Make host builds easier to debug and profile by not omitting the frame pointer. ART_HOST_CFLAGS += -fno-omit-frame-pointer -# Workaround differences in inttypes.h. -ART_HOST_CFLAGS += -D__STDC_FORMAT_MACROS=1 - # To use oprofile_android --callgraph, uncomment this and recompile with "mmm art -B -j16" # ART_TARGET_CFLAGS += -fno-omit-frame-pointer -marm -mapcs diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk index bed48ba999..acaa0f8add 100644 --- a/build/Android.gtest.mk +++ b/build/Android.gtest.mk @@ -30,6 +30,7 @@ TEST_COMMON_SRC_FILES := \ compiler/utils/x86/managed_register_x86_test.cc \ runtime/barrier_test.cc \ runtime/base/bit_vector_test.cc \ + runtime/base/hex_dump_test.cc \ runtime/base/histogram_test.cc \ runtime/base/mutex_test.cc \ runtime/base/timing_logger_test.cc \ diff --git a/build/Android.oat.mk b/build/Android.oat.mk index ec6efbc599..10dc2d3fd2 100644 --- a/build/Android.oat.mk +++ b/build/Android.oat.mk @@ -68,7 +68,7 @@ LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common.mk LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.oat.mk LOCAL_ADDITIONAL_DEPENDENCIES += $(HOST_CORE_IMG_OUT) include $(BUILD_PHONY_PACKAGE) -endif +endif # ART_BUILD_HOST # If we aren't building the host toolchain, skip building the target core.art. ifeq ($(WITH_HOST_DALVIK),true) @@ -80,15 +80,5 @@ LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common.mk LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.oat.mk LOCAL_ADDITIONAL_DEPENDENCIES += $(TARGET_CORE_IMG_OUT) include $(BUILD_PHONY_PACKAGE) -endif - -ifeq ($(ART_BUILD_TARGET_NDEBUG),true) -include $(CLEAR_VARS) -LOCAL_MODULE := boot.art -LOCAL_MODULE_TAGS := optional -LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common.mk -LOCAL_ADDITIONAL_DEPENDENCIES += art/build/Android.oat.mk -LOCAL_ADDITIONAL_DEPENDENCIES += $(DEFAULT_DEX_PREOPT_INSTALLED_IMAGE) -include $(BUILD_PHONY_PACKAGE) -endif -endif +endif # ART_BUILD_TARGET +endif # WITH_HOST_DALVIK diff --git a/compiler/Android.mk b/compiler/Android.mk index 25dfb0a192..c6662c2181 100644 --- a/compiler/Android.mk +++ b/compiler/Android.mk @@ -59,7 +59,8 @@ LIBART_COMPILER_SRC_FILES := \ dex/frontend.cc \ dex/mir_graph.cc \ dex/mir_analysis.cc \ - dex/verified_methods_data.cc \ + dex/verified_method.cc \ + dex/verification_results.cc \ dex/vreg_analysis.cc \ dex/ssa_transformation.cc \ driver/compiler_driver.cc \ diff --git a/compiler/dex/bb_optimizations.cc b/compiler/dex/bb_optimizations.cc index f0130674b5..b6716e64da 100644 --- a/compiler/dex/bb_optimizations.cc +++ b/compiler/dex/bb_optimizations.cc @@ -23,7 +23,7 @@ namespace art { /* * Code Layout pass implementation start. */ -bool CodeLayout::WalkBasicBlocks(CompilationUnit *cUnit, BasicBlock *bb) const { +bool CodeLayout::WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const { cUnit->mir_graph->LayoutBlocks(bb); // No need of repeating, so just return false. return false; @@ -32,13 +32,13 @@ bool CodeLayout::WalkBasicBlocks(CompilationUnit *cUnit, BasicBlock *bb) const { /* * SSATransformation pass implementation start. */ -bool SSATransformation::WalkBasicBlocks(CompilationUnit *cUnit, BasicBlock *bb) const { +bool SSATransformation::WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const { cUnit->mir_graph->InsertPhiNodeOperands(bb); // No need of repeating, so just return false. return false; } -void SSATransformation::End(CompilationUnit *cUnit) const { +void SSATransformation::End(CompilationUnit* cUnit) const { // Verify the dataflow information after the pass. if (cUnit->enable_debug & (1 << kDebugVerifyDataflow)) { cUnit->mir_graph->VerifyDataflow(); @@ -48,7 +48,7 @@ void SSATransformation::End(CompilationUnit *cUnit) const { /* * ConstantPropagation pass implementation start */ -bool ConstantPropagation::WalkBasicBlocks(CompilationUnit *cUnit, BasicBlock *bb) const { +bool ConstantPropagation::WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const { cUnit->mir_graph->DoConstantPropagation(bb); // No need of repeating, so just return false. return false; @@ -57,7 +57,7 @@ bool ConstantPropagation::WalkBasicBlocks(CompilationUnit *cUnit, BasicBlock *bb /* * MethodUseCount pass implementation start. */ -bool MethodUseCount::Gate(const CompilationUnit *cUnit) const { +bool MethodUseCount::Gate(const CompilationUnit* cUnit) const { // First initialize the data. cUnit->mir_graph->InitializeMethodUses(); @@ -67,7 +67,7 @@ bool MethodUseCount::Gate(const CompilationUnit *cUnit) const { return res; } -bool MethodUseCount::WalkBasicBlocks(CompilationUnit *cUnit, BasicBlock *bb) const { +bool MethodUseCount::WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const { cUnit->mir_graph->CountUses(bb); // No need of repeating, so just return false. return false; @@ -77,7 +77,7 @@ bool MethodUseCount::WalkBasicBlocks(CompilationUnit *cUnit, BasicBlock *bb) con * Null Check Elimination and Type Inference Initialization pass implementation start. */ -bool NullCheckEliminationAndTypeInferenceInit::Gate(const CompilationUnit *cUnit) const { +bool NullCheckEliminationAndTypeInferenceInit::Gate(const CompilationUnit* cUnit) const { // First check the ssa register vector cUnit->mir_graph->CheckSSARegisterVector(); @@ -87,7 +87,8 @@ bool NullCheckEliminationAndTypeInferenceInit::Gate(const CompilationUnit *cUnit return performInit; } -bool NullCheckEliminationAndTypeInferenceInit::WalkBasicBlocks(CompilationUnit *cUnit, BasicBlock *bb) const { +bool NullCheckEliminationAndTypeInferenceInit::WalkBasicBlocks(CompilationUnit* cUnit, + BasicBlock* bb) const { cUnit->mir_graph->NullCheckEliminationInit(bb); // No need of repeating, so just return false. return false; @@ -96,7 +97,7 @@ bool NullCheckEliminationAndTypeInferenceInit::WalkBasicBlocks(CompilationUnit * /* * BasicBlock Combine pass implementation start. */ -bool BBCombine::WalkBasicBlocks(CompilationUnit *cUnit, BasicBlock *bb) const { +bool BBCombine::WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const { cUnit->mir_graph->CombineBlocks(bb); // No need of repeating, so just return false. @@ -106,7 +107,7 @@ bool BBCombine::WalkBasicBlocks(CompilationUnit *cUnit, BasicBlock *bb) const { /* * BasicBlock Optimization pass implementation start. */ -void BBOptimizations::Start(CompilationUnit *cUnit) const { +void BBOptimizations::Start(CompilationUnit* cUnit) const { DCHECK_EQ(cUnit->num_compiler_temps, 0); /* diff --git a/compiler/dex/bb_optimizations.h b/compiler/dex/bb_optimizations.h index 768b273510..1286a8e52e 100644 --- a/compiler/dex/bb_optimizations.h +++ b/compiler/dex/bb_optimizations.h @@ -28,14 +28,14 @@ namespace art { */ class CodeLayout : public Pass { public: - CodeLayout():Pass("CodeLayout", "2_post_layout_cfg") { + CodeLayout() : Pass("CodeLayout", "2_post_layout_cfg") { } - void Start(CompilationUnit *cUnit) const { + void Start(CompilationUnit* cUnit) const { cUnit->mir_graph->VerifyDataflow(); } - bool WalkBasicBlocks(CompilationUnit *cUnit, BasicBlock *bb) const; + bool WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const; }; /** @@ -44,16 +44,16 @@ class CodeLayout : public Pass { */ class SSATransformation : public Pass { public: - SSATransformation():Pass("SSATransformation", kPreOrderDFSTraversal, "3_post_ssa_cfg") { + SSATransformation() : Pass("SSATransformation", kPreOrderDFSTraversal, "3_post_ssa_cfg") { } - bool WalkBasicBlocks(CompilationUnit *cUnit, BasicBlock *bb) const; + bool WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const; - void Start(CompilationUnit *cUnit) const { + void Start(CompilationUnit* cUnit) const { cUnit->mir_graph->InitializeSSATransformation(); } - void End(CompilationUnit *cUnit) const; + void End(CompilationUnit* cUnit) const; }; /** @@ -62,12 +62,12 @@ class SSATransformation : public Pass { */ class ConstantPropagation : public Pass { public: - ConstantPropagation():Pass("ConstantPropagation") { + ConstantPropagation() : Pass("ConstantPropagation") { } - bool WalkBasicBlocks(CompilationUnit *cUnit, BasicBlock *bb) const; + bool WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const; - void Start(CompilationUnit *cUnit) const { + void Start(CompilationUnit* cUnit) const { cUnit->mir_graph->InitializeConstantPropagation(); } }; @@ -78,10 +78,10 @@ class ConstantPropagation : public Pass { */ class InitRegLocations : public Pass { public: - InitRegLocations():Pass("InitRegLocation") { + InitRegLocations() : Pass("InitRegLocation", kNoNodes) { } - void Start(CompilationUnit *cUnit) const { + void Start(CompilationUnit* cUnit) const { cUnit->mir_graph->InitRegLocations(); } }; @@ -92,12 +92,12 @@ class InitRegLocations : public Pass { */ class MethodUseCount : public Pass { public: - MethodUseCount():Pass("UseCount") { + MethodUseCount() : Pass("UseCount") { } - bool WalkBasicBlocks(CompilationUnit *cUnit, BasicBlock *bb) const; + bool WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const; - bool Gate(const CompilationUnit *cUnit) const; + bool Gate(const CompilationUnit* cUnit) const; }; /** @@ -106,12 +106,12 @@ class MethodUseCount : public Pass { */ class NullCheckEliminationAndTypeInferenceInit : public Pass { public: - NullCheckEliminationAndTypeInferenceInit():Pass("NCE_TypeInferenceInit") { + NullCheckEliminationAndTypeInferenceInit() : Pass("NCE_TypeInferenceInit") { } - bool WalkBasicBlocks(CompilationUnit *cUnit, BasicBlock *bb) const; + bool WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const; - bool Gate(const CompilationUnit *cUnit) const; + bool Gate(const CompilationUnit* cUnit) const; }; /** @@ -120,10 +120,11 @@ class NullCheckEliminationAndTypeInferenceInit : public Pass { */ class NullCheckEliminationAndTypeInference : public Pass { public: - NullCheckEliminationAndTypeInference():Pass("NCE_TypeInference", kRepeatingPreOrderDFSTraversal, "4_post_nce_cfg") { + NullCheckEliminationAndTypeInference() + : Pass("NCE_TypeInference", kRepeatingPreOrderDFSTraversal, "4_post_nce_cfg") { } - bool WalkBasicBlocks(CompilationUnit *cUnit, BasicBlock *bb) const { + bool WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const { return cUnit->mir_graph->EliminateNullChecksAndInferTypes(bb); } }; @@ -134,14 +135,14 @@ class NullCheckEliminationAndTypeInference : public Pass { */ class BBCombine : public Pass { public: - BBCombine():Pass("BBCombine", kPreOrderDFSTraversal, "5_post_bbcombine_cfg") { + BBCombine() : Pass("BBCombine", kPreOrderDFSTraversal, "5_post_bbcombine_cfg") { } - bool Gate(const CompilationUnit *cUnit) const { + bool Gate(const CompilationUnit* cUnit) const { return ((cUnit->disable_opt & (1 << kSuppressExceptionEdges)) != 0); } - bool WalkBasicBlocks(CompilationUnit *cUnit, BasicBlock *bb) const; + bool WalkBasicBlocks(CompilationUnit* cUnit, BasicBlock* bb) const; }; /** @@ -150,14 +151,14 @@ class BBCombine : public Pass { */ class BBOptimizations : public Pass { public: - BBOptimizations():Pass("BBOptimizations", "5_post_bbo_cfg") { + BBOptimizations() : Pass("BBOptimizations", kNoNodes, "5_post_bbo_cfg") { } - bool Gate(const CompilationUnit *cUnit) const { + bool Gate(const CompilationUnit* cUnit) const { return ((cUnit->disable_opt & (1 << kBBOpt)) == 0); } - void Start(CompilationUnit *cUnit) const; + void Start(CompilationUnit* cUnit) const; }; } // namespace art diff --git a/compiler/dex/dataflow_iterator-inl.h b/compiler/dex/dataflow_iterator-inl.h index 0ca1a47f79..f8b9c1a952 100644 --- a/compiler/dex/dataflow_iterator-inl.h +++ b/compiler/dex/dataflow_iterator-inl.h @@ -107,7 +107,7 @@ inline BasicBlock* AllNodesIterator::Next(bool had_change) { // Find the next BasicBlock. while (keep_looking == true) { // Get next BasicBlock. - res = all_nodes_iterator_->Next(); + res = all_nodes_iterator_.Next(); // Are we done or is the BasicBlock not hidden? if ((res == NULL) || (res->hidden == false)) { diff --git a/compiler/dex/dataflow_iterator.h b/compiler/dex/dataflow_iterator.h index 658a9b1c46..b45d6a45ae 100644 --- a/compiler/dex/dataflow_iterator.h +++ b/compiler/dex/dataflow_iterator.h @@ -138,21 +138,6 @@ namespace art { return ForwardSingleNext(); } - - /** - * @brief Redefine the new operator to use the arena - * @param size actually unused, we use our own class size - * @param arena the arena to perform the actual allocation - * @return the pointer to the newly allocated object - */ - static void* operator new(size_t size, ArenaAllocator* arena) { - return arena->Alloc(sizeof(PreOrderDfsIterator), ArenaAllocator::kAllocGrowableBitMap); - } - - /** - * @brief Redefine delete to not actually delete anything since we are using the arena - */ - static void operator delete(void* p) {} }; /** @@ -184,21 +169,6 @@ namespace art { return ForwardRepeatNext(); } - - /** - * @brief Redefine the new operator to use the arena - * @param size actually unused, we use our own class size - * @param arena the arena to perform the actual allocation - * @return the pointer to the newly allocated object - */ - static void* operator new(size_t size, ArenaAllocator* arena) { - return arena->Alloc(sizeof(RepeatingPreOrderDfsIterator), ArenaAllocator::kAllocGrowableBitMap); - } - - /** - * @brief Redefine delete to not actually delete anything since we are using the arena - */ - static void operator delete(void* p) {} }; /** @@ -230,21 +200,6 @@ namespace art { return ForwardRepeatNext(); } - - /** - * @brief Redefine the new operator to use the arena - * @param size actually unused, we use our own class size - * @param arena the arena to perform the actual allocation - * @return the pointer to the newly allocated object - */ - static void* operator new(size_t size, ArenaAllocator* arena) { - return arena->Alloc(sizeof(RepeatingPostOrderDfsIterator), ArenaAllocator::kAllocGrowableBitMap); - } - - /** - * @brief Redefine delete to not actually delete anything since we are using the arena - */ - static void operator delete(void* p) {} }; /** @@ -275,21 +230,6 @@ namespace art { return ReverseSingleNext(); } - - /** - * @brief Redefine the new operator to use the arena - * @param size actually unused, we use our own class size - * @param arena the arena to perform the actual allocation - * @return the pointer to the newly allocated object - */ - static void* operator new(size_t size, ArenaAllocator* arena) { - return arena->Alloc(sizeof(ReversePostOrderDfsIterator), ArenaAllocator::kAllocGrowableBitMap); - } - - /** - * @brief Redefine delete to not actually delete anything since we are using the arena - */ - static void operator delete(void* p) {} }; /** @@ -321,21 +261,6 @@ namespace art { return ReverseRepeatNext(); } - - /** - * @brief Redefine the new operator to use the arena - * @param size actually unused, we use our own class size - * @param arena the arena to perform the actual allocation - * @return the pointer to the newly allocated object - */ - static void* operator new(size_t size, ArenaAllocator* arena) { - return arena->Alloc(sizeof(RepeatingReversePostOrderDfsIterator), ArenaAllocator::kAllocGrowableBitMap); - } - - /** - * @brief Redefine delete to not actually delete anything since we are using the arena - */ - static void operator delete(void* p) {} }; /** @@ -366,21 +291,6 @@ namespace art { return ForwardSingleNext(); } - - /** - * @brief Redefine the new operator to use the arena - * @param size actually unused, we use our own class size - * @param arena the arena to perform the actual allocation - * @return the pointer to the newly allocated object - */ - static void* operator new(size_t size, ArenaAllocator* arena) { - return arena->Alloc(sizeof(PostOrderDOMIterator), ArenaAllocator::kAllocGrowableBitMap); - } - - /** - * @brief Redefine delete to not actually delete anything since we are using the arena - */ - static void operator delete(void* p) {} }; /** @@ -394,16 +304,15 @@ namespace art { * @param mir_graph The MIRGraph considered. */ explicit AllNodesIterator(MIRGraph* mir_graph) - : DataflowIterator(mir_graph, 0, 0) { - all_nodes_iterator_ = new - (mir_graph->GetArena()) GrowableArray<BasicBlock*>::Iterator(mir_graph->GetBlockList()); + : DataflowIterator(mir_graph, 0, 0), + all_nodes_iterator_(mir_graph->GetBlockList()) { } /** * @brief Resetting the iterator. */ void Reset() { - all_nodes_iterator_->Reset(); + all_nodes_iterator_.Reset(); } /** @@ -413,23 +322,8 @@ namespace art { */ virtual BasicBlock* Next(bool had_change = false) ALWAYS_INLINE; - /** - * @brief Redefine the new operator to use the arena - * @param size actually unused, we use our own class size - * @param arena the arena to perform the actual allocation - * @return the pointer to the newly allocated object - */ - static void* operator new(size_t size, ArenaAllocator* arena) { - return arena->Alloc(sizeof(AllNodesIterator), ArenaAllocator::kAllocGrowableBitMap); - } - - /** - * @brief Redefine delete to not actually delete anything since we are using the arena - */ - static void operator delete(void* p) {} - private: - GrowableArray<BasicBlock*>::Iterator* all_nodes_iterator_; /**< @brief The list of all the nodes */ + GrowableArray<BasicBlock*>::Iterator all_nodes_iterator_; /**< @brief The list of all the nodes */ }; } // namespace art diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc index 3368132a0e..ff8fea0f88 100644 --- a/compiler/dex/dex_to_dex_compiler.cc +++ b/compiler/dex/dex_to_dex_compiler.cc @@ -176,8 +176,7 @@ Instruction* DexCompiler::CompileCheckCast(Instruction* inst, uint32_t dex_pc) { if (!kEnableCheckCastEllision || !PerformOptimizations()) { return inst; } - MethodReference referrer(&GetDexFile(), unit_.GetDexMethodIndex()); - if (!driver_.IsSafeCast(referrer, dex_pc)) { + if (!driver_.IsSafeCast(&unit_, dex_pc)) { return inst; } // Ok, this is a safe cast. Since the "check-cast" instruction size is 2 code @@ -272,15 +271,16 @@ void DexCompiler::CompileInvokeVirtual(Instruction* inst, } // namespace optimizer } // namespace art -extern "C" void ArtCompileDEX(art::CompilerDriver& compiler, const art::DexFile::CodeItem* code_item, +extern "C" void ArtCompileDEX(art::CompilerDriver& driver, const art::DexFile::CodeItem* code_item, uint32_t access_flags, art::InvokeType invoke_type, uint16_t class_def_idx, uint32_t method_idx, jobject class_loader, const art::DexFile& dex_file, art::DexToDexCompilationLevel dex_to_dex_compilation_level) { if (dex_to_dex_compilation_level != art::kDontDexToDexCompile) { art::DexCompilationUnit unit(NULL, class_loader, art::Runtime::Current()->GetClassLinker(), - dex_file, code_item, class_def_idx, method_idx, access_flags); - art::optimizer::DexCompiler dex_compiler(compiler, unit, dex_to_dex_compilation_level); + dex_file, code_item, class_def_idx, method_idx, access_flags, + driver.GetVerifiedMethod(&dex_file, method_idx)); + art::optimizer::DexCompiler dex_compiler(driver, unit, dex_to_dex_compilation_level); dex_compiler.Compile(); } } diff --git a/compiler/dex/growable_array.h b/compiler/dex/growable_array.h index 639120a2ba..b5842e1486 100644 --- a/compiler/dex/growable_array.h +++ b/compiler/dex/growable_array.h @@ -66,11 +66,6 @@ class GrowableArray { idx_ = 0; } - static void* operator new(size_t size, ArenaAllocator* arena) { - return arena->Alloc(sizeof(GrowableArray::Iterator), ArenaAllocator::kAllocGrowableArray); - }; - static void operator delete(void* p) {} // Nop. - private: size_t idx_; GrowableArray* const g_list_; diff --git a/compiler/dex/mir_graph.cc b/compiler/dex/mir_graph.cc index 8c90edb1f4..9dbb3417a3 100644 --- a/compiler/dex/mir_graph.cc +++ b/compiler/dex/mir_graph.cc @@ -529,7 +529,8 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_ current_offset_ = 0; // TODO: will need to snapshot stack image and use that as the mir context identification. m_units_.push_back(new DexCompilationUnit(cu_, class_loader, Runtime::Current()->GetClassLinker(), - dex_file, current_code_item_, class_def_idx, method_idx, access_flags)); + dex_file, current_code_item_, class_def_idx, method_idx, access_flags, + cu_->compiler_driver->GetVerifiedMethod(&dex_file, method_idx))); const uint16_t* code_ptr = current_code_item_->insns_; const uint16_t* code_end = current_code_item_->insns_ + current_code_item_->insns_size_in_code_units_; diff --git a/compiler/dex/pass.h b/compiler/dex/pass.h index c52ddf5f27..255892e324 100644 --- a/compiler/dex/pass.h +++ b/compiler/dex/pass.h @@ -41,6 +41,7 @@ enum DataFlowAnalysisMode { kRepeatingPostOrderDFSTraversal, /**< @brief Depth-First-Search / Repeating Post-Order. */ kRepeatingReversePostOrderDFSTraversal, /**< @brief Depth-First-Search / Repeating Reverse Post-Order. */ kPostOrderDOMTraversal, /**< @brief Dominator tree / Post-Order. */ + kNoNodes, /**< @brief Skip BasicBlock traversal. */ }; /** @@ -50,20 +51,22 @@ enum DataFlowAnalysisMode { */ class Pass { public: - Pass(const char *name, DataFlowAnalysisMode type, bool freed, const unsigned int f, const char *dump): pass_name_(name), traversal_type_(type), flags_(f), dump_cfg_folder_(dump) { + explicit Pass(const char* name, DataFlowAnalysisMode type = kAllNodes, + unsigned int flags = 0u, const char* dump = "") + : pass_name_(name), traversal_type_(type), flags_(flags), dump_cfg_folder_(dump) { } - Pass(const char *name, const char *dump): pass_name_(name), traversal_type_(kAllNodes), flags_(0), dump_cfg_folder_(dump) { + Pass(const char* name, DataFlowAnalysisMode type, const char* dump) + : pass_name_(name), traversal_type_(type), flags_(0), dump_cfg_folder_(dump) { } - explicit Pass(const char *name):pass_name_(name), traversal_type_(kAllNodes), flags_(0), dump_cfg_folder_("") { + Pass(const char* name, const char* dump) + : pass_name_(name), traversal_type_(kAllNodes), flags_(0), dump_cfg_folder_(dump) { } - Pass(const char *name, DataFlowAnalysisMode type, const char *dump):pass_name_(name), traversal_type_(type), flags_(false), dump_cfg_folder_(dump) { + virtual ~Pass() { } - virtual ~Pass() {} - virtual const char* GetName() const { return pass_name_; } @@ -76,14 +79,16 @@ class Pass { return (flags_ & flag); } - const char* GetDumpCFGFolder() const {return dump_cfg_folder_;} + const char* GetDumpCFGFolder() const { + return dump_cfg_folder_; + } /** * @brief Gate for the pass: determines whether to execute the pass or not considering a CompilationUnit * @param c_unit the CompilationUnit. * @return whether or not to execute the pass */ - virtual bool Gate(const CompilationUnit *c_unit) const { + virtual bool Gate(const CompilationUnit* c_unit) const { // Unused parameter. UNUSED(c_unit); @@ -95,7 +100,7 @@ class Pass { * @brief Start of the pass: called before the WalkBasicBlocks function * @param c_unit the considered CompilationUnit. */ - virtual void Start(CompilationUnit *c_unit) const { + virtual void Start(CompilationUnit* c_unit) const { // Unused parameter. UNUSED(c_unit); } @@ -104,7 +109,7 @@ class Pass { * @brief End of the pass: called after the WalkBasicBlocks function * @param c_unit the considered CompilationUnit. */ - virtual void End(CompilationUnit *c_unit) const { + virtual void End(CompilationUnit* c_unit) const { // Unused parameter. UNUSED(c_unit); } @@ -115,7 +120,7 @@ class Pass { * @param bb the BasicBlock. * @return whether or not there is a change when walking the BasicBlock */ - virtual bool WalkBasicBlocks(CompilationUnit *c_unit, BasicBlock *bb) const { + virtual bool WalkBasicBlocks(CompilationUnit* c_unit, BasicBlock* bb) const { // Unused parameters. UNUSED(c_unit); UNUSED(bb); diff --git a/compiler/dex/pass_driver.cc b/compiler/dex/pass_driver.cc index 820dc5a629..4f8739a666 100644 --- a/compiler/dex/pass_driver.cc +++ b/compiler/dex/pass_driver.cc @@ -16,6 +16,8 @@ #include <dlfcn.h> +#include "base/logging.h" +#include "base/macros.h" #include "bb_optimizations.h" #include "compiler_internals.h" #include "dataflow_iterator.h" @@ -28,7 +30,8 @@ namespace art { namespace { // anonymous namespace /** - * @brief Helper function to create a single instance of a given Pass and can be shared across the threads + * @brief Helper function to create a single instance of a given Pass and can be shared across + * the threads. */ template <typename PassType> const Pass* GetPassInstance() { @@ -36,55 +39,58 @@ const Pass* GetPassInstance() { return &pass; } +void DoWalkBasicBlocks(CompilationUnit* c_unit, const Pass* pass, DataflowIterator* iterator) { + // Paranoid: Check the iterator before walking the BasicBlocks. + DCHECK(iterator != nullptr); + + bool change = false; + for (BasicBlock *bb = iterator->Next(change); bb != 0; bb = iterator->Next(change)) { + change = pass->WalkBasicBlocks(c_unit, bb); + } +} + +template <typename Iterator> +inline void DoWalkBasicBlocks(CompilationUnit* c_unit, const Pass* pass) { + Iterator iterator(c_unit->mir_graph.get()); + DoWalkBasicBlocks(c_unit, pass, &iterator); +} + } // anonymous namespace -PassDriver::PassDriver(CompilationUnit* cu, bool create_default_passes) : cu_(cu) { - dump_cfg_folder_ = "/sdcard/"; +PassDriver::PassDriver(CompilationUnit* cu, bool create_default_passes) + : cu_(cu), dump_cfg_folder_("/sdcard/") { + DCHECK(cu != nullptr); // If need be, create the default passes. - if (create_default_passes == true) { + if (create_default_passes) { CreatePasses(); } } PassDriver::~PassDriver() { - // Clear the map: done to remove any chance of having a pointer after freeing below - pass_map_.clear(); } -void PassDriver::InsertPass(const Pass* new_pass, bool warn_override) { - assert(new_pass != 0); - - // Get name here to not do it all over the method. - const std::string& name = new_pass->GetName(); +void PassDriver::InsertPass(const Pass* new_pass) { + DCHECK(new_pass != nullptr); + DCHECK(new_pass->GetName() != nullptr && new_pass->GetName()[0] != 0); - // Do we want to warn the user about squashing a pass? - if (warn_override == false) { - auto it = pass_map_.find(name); + // It is an error to override an existing pass. + DCHECK(GetPass(new_pass->GetName()) == nullptr) + << "Pass name " << new_pass->GetName() << " already used."; - if (it != pass_map_.end()) { - LOG(INFO) << "Pass name " << name << " already used, overwriting pass"; - } - } - - // Now add to map and list. - pass_map_.Put(name, new_pass); + // Now add to the list. pass_list_.push_back(new_pass); } void PassDriver::CreatePasses() { /* - * Create the pass list: - * - These passes are immutable and are shared across the threads: - * - This is achieved via: - * - The UniquePtr used here. - * - DISALLOW_COPY_AND_ASSIGN in the base Pass class. + * Create the pass list. These passes are immutable and are shared across the threads. * * Advantage is that there will be no race conditions here. * Disadvantage is the passes can't change their internal states depending on CompilationUnit: * - This is not yet an issue: no current pass would require it. */ - static const Pass* passes[] = { + static const Pass* const passes[] = { GetPassInstance<CodeLayout>(), GetPassInstance<SSATransformation>(), GetPassInstance<ConstantPropagation>(), @@ -96,14 +102,10 @@ void PassDriver::CreatePasses() { GetPassInstance<BBOptimizations>(), }; - // Get number of elements in the array. - unsigned int nbr = (sizeof(passes) / sizeof(passes[0])); - - // Insert each pass into the map and into the list via the InsertPass method: - // - Map is used for the lookup - // - List is used for the pass walk - for (unsigned int i = 0; i < nbr; i++) { - InsertPass(passes[i]); + // Insert each pass into the list via the InsertPass method. + pass_list_.reserve(arraysize(passes)); + for (const Pass* pass : passes) { + InsertPass(pass); } } @@ -114,49 +116,37 @@ void PassDriver::HandlePassFlag(CompilationUnit* c_unit, const Pass* pass) { } void PassDriver::DispatchPass(CompilationUnit* c_unit, const Pass* curPass) { - DataflowIterator* iterator = 0; - LOG(DEBUG) << "Dispatching " << curPass->GetName(); - MIRGraph* mir_graph = c_unit->mir_graph.get(); - ArenaAllocator *arena = &(c_unit->arena); - - // Let us start by getting the right iterator. DataFlowAnalysisMode mode = curPass->GetTraversal(); switch (mode) { case kPreOrderDFSTraversal: - iterator = new (arena) PreOrderDfsIterator(mir_graph); + DoWalkBasicBlocks<PreOrderDfsIterator>(c_unit, curPass); break; case kRepeatingPreOrderDFSTraversal: - iterator = new (arena) RepeatingPreOrderDfsIterator(mir_graph); + DoWalkBasicBlocks<RepeatingPreOrderDfsIterator>(c_unit, curPass); break; case kRepeatingPostOrderDFSTraversal: - iterator = new (arena) RepeatingPostOrderDfsIterator(mir_graph); + DoWalkBasicBlocks<RepeatingPostOrderDfsIterator>(c_unit, curPass); break; case kReversePostOrderDFSTraversal: - iterator = new (arena) ReversePostOrderDfsIterator(mir_graph); + DoWalkBasicBlocks<ReversePostOrderDfsIterator>(c_unit, curPass); break; case kRepeatingReversePostOrderDFSTraversal: - iterator = new (arena) RepeatingReversePostOrderDfsIterator(mir_graph); + DoWalkBasicBlocks<RepeatingReversePostOrderDfsIterator>(c_unit, curPass); break; case kPostOrderDOMTraversal: - iterator = new (arena) PostOrderDOMIterator(mir_graph); + DoWalkBasicBlocks<PostOrderDOMIterator>(c_unit, curPass); break; case kAllNodes: - iterator = new (arena) AllNodesIterator(mir_graph); + DoWalkBasicBlocks<AllNodesIterator>(c_unit, curPass); + break; + case kNoNodes: break; default: LOG(DEBUG) << "Iterator mode not handled in dispatcher: " << mode; - return; - } - - // Paranoid: Check the iterator before walking the BasicBlocks. - assert(iterator != 0); - - bool change = false; - for (BasicBlock *bb = iterator->Next(change); bb != 0; bb = iterator->Next(change)) { - change = curPass->WalkBasicBlocks(c_unit, bb); + break; } } @@ -166,33 +156,34 @@ void PassDriver::ApplyPass(CompilationUnit* c_unit, const Pass* curPass) { curPass->End(c_unit); } -bool PassDriver::RunPass(CompilationUnit* c_unit, const Pass* curPass, bool time_split) { - // Paranoid: c_unit or curPass cannot be 0, and the pass should have a name. - if (c_unit == 0 || curPass == 0 || (strcmp(curPass->GetName(), "") == 0)) { - return false; - } +bool PassDriver::RunPass(CompilationUnit* c_unit, const Pass* pass, bool time_split) { + // Paranoid: c_unit and pass cannot be nullptr, and the pass should have a name. + DCHECK(c_unit != nullptr); + DCHECK(pass != nullptr); + DCHECK(pass->GetName() != nullptr && pass->GetName()[0] != 0); // Do we perform a time split - if (time_split == true) { - c_unit->NewTimingSplit(curPass->GetName()); + if (time_split) { + c_unit->NewTimingSplit(pass->GetName()); } // Check the pass gate first. - bool shouldApplyPass = curPass->Gate(c_unit); + bool should_apply_pass = pass->Gate(c_unit); - if (shouldApplyPass == true) { + if (should_apply_pass) { // Applying the pass: first start, doWork, and end calls. - ApplyPass(c_unit, curPass); + ApplyPass(c_unit, pass); // Clean up if need be. - HandlePassFlag(c_unit, curPass); + HandlePassFlag(c_unit, pass); // Do we want to log it? if ((c_unit->enable_debug& (1 << kDebugDumpCFG)) != 0) { // Do we have a pass folder? - const std::string& passFolder = curPass->GetDumpCFGFolder(); + const char* passFolder = pass->GetDumpCFGFolder(); + DCHECK(passFolder != nullptr); - if (passFolder != "") { + if (passFolder[0] != 0) { // Create directory prefix. std::string prefix = GetDumpCFGFolder(); prefix += passFolder; @@ -204,19 +195,18 @@ bool PassDriver::RunPass(CompilationUnit* c_unit, const Pass* curPass, bool time } // If the pass gate passed, we can declare success. - return shouldApplyPass; + return should_apply_pass; } -bool PassDriver::RunPass(CompilationUnit* c_unit, const std::string& pass_name) { - // Paranoid: c_unit cannot be 0 and we need a pass name. - if (c_unit == 0 || pass_name == "") { - return false; - } +bool PassDriver::RunPass(CompilationUnit* c_unit, const char* pass_name) { + // Paranoid: c_unit cannot be nullptr and we need a pass name. + DCHECK(c_unit != nullptr); + DCHECK(pass_name != nullptr && pass_name[0] != 0); - const Pass* curPass = GetPass(pass_name); + const Pass* cur_pass = GetPass(pass_name); - if (curPass != 0) { - return RunPass(c_unit, curPass); + if (cur_pass != nullptr) { + return RunPass(c_unit, cur_pass); } // Return false, we did not find the pass. @@ -224,27 +214,26 @@ bool PassDriver::RunPass(CompilationUnit* c_unit, const std::string& pass_name) } void PassDriver::Launch() { - for (const Pass *curPass : pass_list_) { - RunPass(cu_, curPass, true); + for (const Pass* cur_pass : pass_list_) { + RunPass(cu_, cur_pass, true); } } void PassDriver::PrintPassNames() const { LOG(INFO) << "Loop Passes are:"; - for (const Pass *curPass : pass_list_) { - LOG(INFO) << "\t-" << curPass->GetName(); + for (const Pass* cur_pass : pass_list_) { + LOG(INFO) << "\t-" << cur_pass->GetName(); } } -const Pass* PassDriver::GetPass(const std::string& name) const { - auto it = pass_map_.find(name); - - if (it != pass_map_.end()) { - return it->second; +const Pass* PassDriver::GetPass(const char* name) const { + for (const Pass* cur_pass : pass_list_) { + if (strcmp(name, cur_pass->GetName()) == 0) { + return cur_pass; + } } - - return 0; + return nullptr; } } // namespace art diff --git a/compiler/dex/pass_driver.h b/compiler/dex/pass_driver.h index d58046007f..c734d3e0eb 100644 --- a/compiler/dex/pass_driver.h +++ b/compiler/dex/pass_driver.h @@ -17,7 +17,7 @@ #ifndef ART_COMPILER_DEX_PASS_DRIVER_H_ #define ART_COMPILER_DEX_PASS_DRIVER_H_ -#include <list> +#include <vector> #include "pass.h" #include "safe_map.h" @@ -42,7 +42,7 @@ class PassDriver { * @param new_pass the new Pass to insert in the map and list. * @param warn_override warn if the name of the Pass is already used. */ - void InsertPass(const Pass* new_pass, bool warn_override = true); + void InsertPass(const Pass* new_pass); /** * @brief Run a pass using the name as key. @@ -50,7 +50,7 @@ class PassDriver { * @param pass_name the Pass name. * @return whether the pass was applied. */ - bool RunPass(CompilationUnit* c_unit, const std::string& pass_name); + bool RunPass(CompilationUnit* c_unit, const char* pass_name); /** * @brief Run a pass using the Pass itself. @@ -75,20 +75,17 @@ class PassDriver { void PrintPassNames() const; - const Pass* GetPass(const std::string& name) const; + const Pass* GetPass(const char* name) const; - const char *GetDumpCFGFolder() const { + const char* GetDumpCFGFolder() const { return dump_cfg_folder_; } protected: void CreatePasses(); - /** @brief The Pass Map: contains name -> pass for quick lookup. */ - SafeMap<std::string, const Pass*> pass_map_; - /** @brief List of passes: provides the order to execute the passes. */ - std::list<const Pass*> pass_list_; + std::vector<const Pass*> pass_list_; /** @brief The CompilationUnit on which to execute the passes on. */ CompilationUnit* const cu_; diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index d938af2c08..2d0fa03dd6 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -21,7 +21,8 @@ #include "mir_to_lir-inl.h" #include "dex/quick/dex_file_method_inliner.h" #include "dex/quick/dex_file_to_method_inliner_map.h" -#include "dex/verified_methods_data.h" +#include "dex/verification_results.h" +#include "dex/verified_method.h" #include "verifier/dex_gc_map.h" #include "verifier/method_verifier.h" @@ -763,10 +764,10 @@ void Mir2Lir::CreateNativeGcMap() { } } MethodReference method_ref(cu_->dex_file, cu_->method_idx); - const std::vector<uint8_t>* gc_map_raw = - cu_->compiler_driver->GetVerifiedMethodsData()->GetDexGcMap(method_ref); - verifier::DexPcToReferenceMap dex_gc_map(&(*gc_map_raw)[0]); - DCHECK_EQ(gc_map_raw->size(), dex_gc_map.RawSize()); + const std::vector<uint8_t>& gc_map_raw = + mir_graph_->GetCurrentDexCompilationUnit()->GetVerifiedMethod()->GetDexGcMap(); + verifier::DexPcToReferenceMap dex_gc_map(&(gc_map_raw)[0]); + DCHECK_EQ(gc_map_raw.size(), dex_gc_map.RawSize()); // Compute native offset to references size. NativePcToReferenceMapBuilder native_gc_map_builder(&native_gc_map_, mapping_table.PcToDexSize(), diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index 44d671deef..760e06e769 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -207,13 +207,43 @@ void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) { FlushAllRegs(); /* Everything to home location */ ThreadOffset func_offset(-1); - if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file, + const DexFile* dex_file = cu_->dex_file; + CompilerDriver* driver = cu_->compiler_driver; + if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *dex_file, type_idx)) { - func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArray); + bool is_type_initialized; // Ignored as an array does not have an initializer. + bool use_direct_type_ptr; + uintptr_t direct_type_ptr; + if (kEmbedClassInCode && + driver->CanEmbedTypeInCode(*dex_file, type_idx, + &is_type_initialized, &use_direct_type_ptr, &direct_type_ptr)) { + // The fast path. + if (!use_direct_type_ptr) { + // Use the literal pool and a PC-relative load from a data word. + LIR* data_target = ScanLiteralPool(class_literal_list_, type_idx, 0); + if (data_target == nullptr) { + data_target = AddWordData(&class_literal_list_, type_idx); + } + LIR* load_pc_rel = OpPcRelLoad(TargetReg(kArg0), data_target); + AppendLIR(load_pc_rel); + func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArrayResolved); + CallRuntimeHelperRegMethodRegLocation(func_offset, TargetReg(kArg0), rl_src, true); + } else { + // Use the direct pointer. + func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArrayResolved); + CallRuntimeHelperImmMethodRegLocation(func_offset, direct_type_ptr, rl_src, true); + } + } else { + // The slow path. + DCHECK_EQ(func_offset.Int32Value(), -1); + func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArray); + CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); + } + DCHECK_NE(func_offset.Int32Value(), -1); } else { func_offset= QUICK_ENTRYPOINT_OFFSET(pAllocArrayWithAccessCheck); + CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); } - CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true); RegLocation rl_result = GetReturn(false); StoreValue(rl_dest, rl_result); } @@ -962,6 +992,9 @@ void Mir2Lir::GenThrow(RegLocation rl_src) { // question with simple comparisons. void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) { + // X86 has its own implementation. + DCHECK_NE(cu_->instruction_set, kX86); + RegLocation object = LoadValue(rl_src, kCoreReg); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); int result_reg = rl_result.low_reg; @@ -1151,8 +1184,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ // Note: currently type_known_final is unused, as optimizing will only improve the performance // of the exception throw path. DexCompilationUnit* cu = mir_graph_->GetCurrentDexCompilationUnit(); - const MethodReference mr(cu->GetDexFile(), cu->GetDexMethodIndex()); - if (!needs_access_check && cu_->compiler_driver->IsSafeCast(mr, insn_idx)) { + if (!needs_access_check && cu_->compiler_driver->IsSafeCast(cu, insn_idx)) { // Verifier type analysis proved this check cast would never cause an exception. return; } diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index f865207fc7..3823fb31d4 100644 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -153,6 +153,19 @@ void Mir2Lir::CallRuntimeHelperRegMethod(ThreadOffset helper_offset, int arg0, b CallHelper(r_tgt, helper_offset, safepoint_pc); } +void Mir2Lir::CallRuntimeHelperRegMethodRegLocation(ThreadOffset helper_offset, int arg0, + RegLocation arg2, bool safepoint_pc) { + int r_tgt = CallHelperSetup(helper_offset); + DCHECK_NE(TargetReg(kArg1), arg0); + if (TargetReg(kArg0) != arg0) { + OpRegCopy(TargetReg(kArg0), arg0); + } + LoadCurrMethodDirect(TargetReg(kArg1)); + LoadValueDirectFixed(arg2, TargetReg(kArg2)); + ClobberCallerSave(); + CallHelper(r_tgt, helper_offset, safepoint_pc); +} + void Mir2Lir::CallRuntimeHelperRegLocationRegLocation(ThreadOffset helper_offset, RegLocation arg0, RegLocation arg1, bool safepoint_pc) { int r_tgt = CallHelperSetup(helper_offset); @@ -1216,10 +1229,6 @@ bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long, // TODO - add Mips implementation return false; } - if (cu_->instruction_set == kX86 && is_object) { - // TODO: fix X86, it exhausts registers for card marking. - return false; - } // Unused - RegLocation rl_src_unsafe = info->args[0]; RegLocation rl_src_obj = info->args[1]; // Object RegLocation rl_src_offset = info->args[2]; // long low @@ -1239,6 +1248,9 @@ bool Mir2Lir::GenInlinedUnsafePut(CallInfo* info, bool is_long, rl_value = LoadValue(rl_src_value, kCoreReg); StoreBaseIndexed(rl_object.low_reg, rl_offset.low_reg, rl_value.low_reg, 0, kWord); } + + // Free up the temp early, to ensure x86 doesn't run out of temporaries in MarkGCCard. + FreeTemp(rl_offset.low_reg); if (is_volatile) { GenMemBarrier(kStoreLoad); } diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc index 0a470a5b31..1f4122d7a3 100644 --- a/compiler/dex/quick/mir_to_lir.cc +++ b/compiler/dex/quick/mir_to_lir.cc @@ -342,8 +342,8 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list bool is_safe = is_null; // Always safe to store null. if (!is_safe) { // Check safety from verifier type information. - const MethodReference mr(cu_->dex_file, cu_->method_idx); - is_safe = cu_->compiler_driver->IsSafeCast(mr, mir->offset); + const DexCompilationUnit* unit = mir_graph_->GetCurrentDexCompilationUnit(); + is_safe = cu_->compiler_driver->IsSafeCast(unit, mir->offset); } if (is_null || is_safe) { // Store of constant null doesn't require an assignability test and can be generated inline diff --git a/compiler/dex/quick/mir_to_lir.h b/compiler/dex/quick/mir_to_lir.h index c67a129d0d..2ea84d4013 100644 --- a/compiler/dex/quick/mir_to_lir.h +++ b/compiler/dex/quick/mir_to_lir.h @@ -530,6 +530,8 @@ class Mir2Lir : public Backend { void CallRuntimeHelperImmMethod(ThreadOffset helper_offset, int arg0, bool safepoint_pc); void CallRuntimeHelperRegMethod(ThreadOffset helper_offset, int arg0, bool safepoint_pc); + void CallRuntimeHelperRegMethodRegLocation(ThreadOffset helper_offset, int arg0, + RegLocation arg2, bool safepoint_pc); void CallRuntimeHelperRegLocationRegLocation(ThreadOffset helper_offset, RegLocation arg0, RegLocation arg1, bool safepoint_pc); @@ -930,9 +932,10 @@ class Mir2Lir : public Backend { */ RegLocation ForceTempWide(RegLocation loc); + virtual void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, + RegLocation rl_dest, RegLocation rl_src); + private: - void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest, - RegLocation rl_src); void GenInstanceofCallingHelper(bool needs_access_check, bool type_known_final, bool type_known_abstract, bool use_declaring_class, bool can_assume_type_is_in_dex_cache, diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h index b46c32487f..f054d82b0d 100644 --- a/compiler/dex/quick/x86/codegen_x86.h +++ b/compiler/dex/quick/x86/codegen_x86.h @@ -180,6 +180,15 @@ class X86Mir2Lir : public Mir2Lir { */ void GenLongRegOrMemOp(RegLocation rl_dest, RegLocation rl_src, Instruction::Code op); + /** + * @brief Implement instanceof a final class with x86 specific code. + * @param use_declaring_class 'true' if we can use the class itself. + * @param type_idx Type index to use if use_declaring_class is 'false'. + * @param rl_dest Result to be set to 0 or 1. + * @param rl_src Object to be tested. + */ + void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, + RegLocation rl_dest, RegLocation rl_src); // Single operation generators. LIR* OpUnconditionalBranch(LIR* target); LIR* OpCmpBranch(ConditionCode cond, int src1, int src2, LIR* target); diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc index e665f700ba..7d0ba279d5 100644 --- a/compiler/dex/quick/x86/int_x86.cc +++ b/compiler/dex/quick/x86/int_x86.cc @@ -1650,4 +1650,72 @@ void X86Mir2Lir::GenLongLongImm(RegLocation rl_dest, RegLocation rl_src1, StoreFinalValueWide(rl_dest, rl_result); } +// For final classes there are no sub-classes to check and so we can answer the instance-of +// question with simple comparisons. Use compares to memory and SETEQ to optimize for x86. +void X86Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, + RegLocation rl_dest, RegLocation rl_src) { + RegLocation object = LoadValue(rl_src, kCoreReg); + RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); + int result_reg = rl_result.low_reg; + + // SETcc only works with EAX..EDX. + if (result_reg == object.low_reg || result_reg >= 4) { + result_reg = AllocTypedTemp(false, kCoreReg); + DCHECK_LT(result_reg, 4); + } + + // Assume that there is no match. + LoadConstant(result_reg, 0); + LIR* null_branchover = OpCmpImmBranch(kCondEq, object.low_reg, 0, NULL); + + int check_class = AllocTypedTemp(false, kCoreReg); + + // If Method* is already in a register, we can save a copy. + RegLocation rl_method = mir_graph_->GetMethodLoc(); + int32_t offset_of_type = mirror::Array::DataOffset(sizeof(mirror::Class*)).Int32Value() + + (sizeof(mirror::Class*) * type_idx); + + if (rl_method.location == kLocPhysReg) { + if (use_declaring_class) { + LoadWordDisp(rl_method.low_reg, + mirror::ArtMethod::DeclaringClassOffset().Int32Value(), + check_class); + } else { + LoadWordDisp(rl_method.low_reg, + mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), + check_class); + LoadWordDisp(check_class, offset_of_type, check_class); + } + } else { + LoadCurrMethodDirect(check_class); + if (use_declaring_class) { + LoadWordDisp(check_class, + mirror::ArtMethod::DeclaringClassOffset().Int32Value(), + check_class); + } else { + LoadWordDisp(check_class, + mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), + check_class); + LoadWordDisp(check_class, offset_of_type, check_class); + } + } + + // Compare the computed class to the class in the object. + DCHECK_EQ(object.location, kLocPhysReg); + OpRegMem(kOpCmp, check_class, object.low_reg, + mirror::Object::ClassOffset().Int32Value()); + + // Set the low byte of the result to 0 or 1 from the compare condition code. + NewLIR2(kX86Set8R, result_reg, kX86CondEq); + + LIR* target = NewLIR0(kPseudoTargetLabel); + null_branchover->target = target; + FreeTemp(check_class); + if (IsTemp(result_reg)) { + OpRegCopy(rl_result.low_reg, result_reg); + FreeTemp(result_reg); + } + StoreValue(rl_dest, rl_result); +} + } // namespace art diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc new file mode 100644 index 0000000000..edccec55ba --- /dev/null +++ b/compiler/dex/verification_results.cc @@ -0,0 +1,110 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "verification_results.h" + +#include "base/stl_util.h" +#include "base/mutex.h" +#include "base/mutex-inl.h" +#include "thread.h" +#include "thread-inl.h" +#include "verified_method.h" +#include "verifier/method_verifier.h" +#include "verifier/method_verifier-inl.h" + +namespace art { + +VerificationResults::VerificationResults() + : verified_methods_lock_("compiler verified methods lock"), + verified_methods_(), + rejected_classes_lock_("compiler rejected classes lock"), + rejected_classes_() { +} + +VerificationResults::~VerificationResults() { + Thread* self = Thread::Current(); + { + WriterMutexLock mu(self, verified_methods_lock_); + STLDeleteValues(&verified_methods_); + } +} + +bool VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier) { + MethodReference ref = method_verifier->GetMethodReference(); + bool compile = IsCandidateForCompilation(ref, method_verifier->GetAccessFlags()); + // TODO: Check also for virtual/interface invokes when DEX-to-DEX supports devirtualization. + if (!compile && !method_verifier->HasCheckCasts()) { + return true; + } + + const VerifiedMethod* verified_method = VerifiedMethod::Create(method_verifier, compile); + if (verified_method == nullptr) { + DCHECK(method_verifier->HasFailures()); + return false; + } + + WriterMutexLock mu(Thread::Current(), verified_methods_lock_); + auto it = verified_methods_.find(ref); + if (it != verified_methods_.end()) { + // TODO: Investigate why are we doing the work again for this method and try to avoid it. + LOG(WARNING) << "Method processed more than once: " + << PrettyMethod(ref.dex_method_index, *ref.dex_file); + DCHECK_EQ(it->second->GetDevirtMap().size(), verified_method->GetDevirtMap().size()); + DCHECK_EQ(it->second->GetSafeCastSet().size(), verified_method->GetSafeCastSet().size()); + DCHECK_EQ(it->second->GetDexGcMap().size(), verified_method->GetDexGcMap().size()); + delete it->second; + verified_methods_.erase(it); + } + verified_methods_.Put(ref, verified_method); + DCHECK(verified_methods_.find(ref) != verified_methods_.end()); + return true; +} + +const VerifiedMethod* VerificationResults::GetVerifiedMethod(MethodReference ref) { + ReaderMutexLock mu(Thread::Current(), verified_methods_lock_); + auto it = verified_methods_.find(ref); + return (it != verified_methods_.end()) ? it->second : nullptr; +} + +void VerificationResults::AddRejectedClass(ClassReference ref) { + { + WriterMutexLock mu(Thread::Current(), rejected_classes_lock_); + rejected_classes_.insert(ref); + } + DCHECK(IsClassRejected(ref)); +} + +bool VerificationResults::IsClassRejected(ClassReference ref) { + ReaderMutexLock mu(Thread::Current(), rejected_classes_lock_); + return (rejected_classes_.find(ref) != rejected_classes_.end()); +} + +bool VerificationResults::IsCandidateForCompilation(MethodReference& method_ref, + const uint32_t access_flags) { +#ifdef ART_SEA_IR_MODE + bool use_sea = Runtime::Current()->IsSeaIRMode(); + use_sea = use_sea && (std::string::npos != PrettyMethod( + method_ref.dex_method_index, *(method_ref.dex_file)).find("fibonacci")); + if (use_sea) return true; +#endif + // Don't compile class initializers, ever. + if (((access_flags & kAccConstructor) != 0) && ((access_flags & kAccStatic) != 0)) { + return false; + } + return (Runtime::Current()->GetCompilerFilter() != Runtime::kInterpretOnly); +} + +} // namespace art diff --git a/compiler/dex/verification_results.h b/compiler/dex/verification_results.h new file mode 100644 index 0000000000..2eb07131ce --- /dev/null +++ b/compiler/dex/verification_results.h @@ -0,0 +1,70 @@ +/* + * Copyright (C) 2013 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEX_VERIFICATION_RESULTS_H_ +#define ART_COMPILER_DEX_VERIFICATION_RESULTS_H_ + +#include <stdint.h> +#include <set> +#include <vector> + +#include "base/macros.h" +#include "base/mutex.h" +#include "class_reference.h" +#include "method_reference.h" +#include "safe_map.h" + +namespace art { + +namespace verifier { +class MethodVerifier; +} // namespace verifier + +class VerifiedMethod; + +class VerificationResults { + public: + VerificationResults(); + ~VerificationResults(); + + bool ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) + LOCKS_EXCLUDED(verified_methods_lock_); + + const VerifiedMethod* GetVerifiedMethod(MethodReference ref) + LOCKS_EXCLUDED(verified_methods_lock_); + + void AddRejectedClass(ClassReference ref) LOCKS_EXCLUDED(rejected_classes_lock_); + bool IsClassRejected(ClassReference ref) LOCKS_EXCLUDED(rejected_classes_lock_); + + static bool IsCandidateForCompilation(MethodReference& method_ref, + const uint32_t access_flags); + + private: + // Verified methods. + typedef SafeMap<MethodReference, const VerifiedMethod*, + MethodReferenceComparator> VerifiedMethodMap; + ReaderWriterMutex verified_methods_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; + VerifiedMethodMap verified_methods_; + + // Rejected classes. + ReaderWriterMutex rejected_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; + std::set<ClassReference> rejected_classes_ GUARDED_BY(rejected_classes_lock_); +}; + +} // namespace art + +#endif // ART_COMPILER_DEX_VERIFICATION_RESULTS_H_ diff --git a/compiler/dex/verified_methods_data.cc b/compiler/dex/verified_method.cc index e6c4ddab06..0f812a49cd 100644 --- a/compiler/dex/verified_methods_data.cc +++ b/compiler/dex/verified_method.cc @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013 The Android Open Source Project + * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,6 +14,12 @@ * limitations under the License. */ +#include "verified_method.h" + +#include <algorithm> +#include <vector> + +#include "base/logging.h" #include "base/stl_util.h" #include "dex_file.h" #include "dex_instruction.h" @@ -28,7 +34,7 @@ #include "mirror/dex_cache-inl.h" #include "mirror/object.h" #include "mirror/object-inl.h" -#include "verified_methods_data.h" +#include "UniquePtr.h" #include "verifier/dex_gc_map.h" #include "verifier/method_verifier.h" #include "verifier/method_verifier-inl.h" @@ -37,150 +43,58 @@ namespace art { -VerifiedMethodsData::VerifiedMethodsData() - : dex_gc_maps_lock_("compiler GC maps lock"), - dex_gc_maps_(), - safecast_map_lock_("compiler Cast Elision lock"), - safecast_map_(), - devirt_maps_lock_("compiler Devirtualization lock"), - devirt_maps_(), - rejected_classes_lock_("compiler rejected classes lock"), - rejected_classes_() { -} - -VerifiedMethodsData::~VerifiedMethodsData() { - Thread* self = Thread::Current(); - { - WriterMutexLock mu(self, dex_gc_maps_lock_); - STLDeleteValues(&dex_gc_maps_); - } - { - WriterMutexLock mu(self, safecast_map_lock_); - STLDeleteValues(&safecast_map_); - } - { - WriterMutexLock mu(self, devirt_maps_lock_); - STLDeleteValues(&devirt_maps_); - } -} - -bool VerifiedMethodsData::ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier) { - MethodReference ref = method_verifier->GetMethodReference(); - bool compile = IsCandidateForCompilation(ref, method_verifier->GetAccessFlags()); +const VerifiedMethod* VerifiedMethod::Create(verifier::MethodVerifier* method_verifier, + bool compile) { + UniquePtr<VerifiedMethod> verified_method(new VerifiedMethod); if (compile) { - /* Generate a register map and add it to the method. */ - const std::vector<uint8_t>* dex_gc_map = GenerateGcMap(method_verifier); - if (dex_gc_map == NULL) { - DCHECK(method_verifier->HasFailures()); - return false; // Not a real failure, but a failure to encode + /* Generate a register map. */ + if (!verified_method->GenerateGcMap(method_verifier)) { + CHECK(method_verifier->HasFailures()); + return nullptr; // Not a real failure, but a failure to encode. } if (kIsDebugBuild) { - VerifyGcMap(method_verifier, *dex_gc_map); + VerifyGcMap(method_verifier, verified_method->dex_gc_map_); } - SetDexGcMap(ref, dex_gc_map); // TODO: move this out when DEX-to-DEX supports devirtualization. if (method_verifier->HasVirtualOrInterfaceInvokes()) { - PcToConcreteMethodMap* pc_to_concrete_method = GenerateDevirtMap(method_verifier); - if (pc_to_concrete_method != NULL) { - SetDevirtMap(ref, pc_to_concrete_method); - } + verified_method->GenerateDevirtMap(method_verifier); } } if (method_verifier->HasCheckCasts()) { - MethodSafeCastSet* method_to_safe_casts = GenerateSafeCastSet(method_verifier); - if (method_to_safe_casts != NULL) { - SetSafeCastMap(ref, method_to_safe_casts); - } - } - return true; -} - -const std::vector<uint8_t>* VerifiedMethodsData::GetDexGcMap(MethodReference ref) { - ReaderMutexLock mu(Thread::Current(), dex_gc_maps_lock_); - DexGcMapTable::const_iterator it = dex_gc_maps_.find(ref); - CHECK(it != dex_gc_maps_.end()) - << "Didn't find GC map for: " << PrettyMethod(ref.dex_method_index, *ref.dex_file); - CHECK(it->second != NULL); - return it->second; -} - -const MethodReference* VerifiedMethodsData::GetDevirtMap(const MethodReference& ref, - uint32_t dex_pc) { - ReaderMutexLock mu(Thread::Current(), devirt_maps_lock_); - DevirtualizationMapTable::const_iterator it = devirt_maps_.find(ref); - if (it == devirt_maps_.end()) { - return NULL; - } - - // Look up the PC in the map, get the concrete method to execute and return its reference. - PcToConcreteMethodMap::const_iterator pc_to_concrete_method = it->second->find(dex_pc); - if (pc_to_concrete_method != it->second->end()) { - return &(pc_to_concrete_method->second); - } else { - return NULL; - } -} - -bool VerifiedMethodsData::IsSafeCast(MethodReference ref, uint32_t pc) { - ReaderMutexLock mu(Thread::Current(), safecast_map_lock_); - SafeCastMap::const_iterator it = safecast_map_.find(ref); - if (it == safecast_map_.end()) { - return false; + verified_method->GenerateSafeCastSet(method_verifier); } - - // Look up the cast address in the set of safe casts - // Use binary_search for lookup in the sorted vector. - return std::binary_search(it->second->begin(), it->second->end(), pc); -} - -void VerifiedMethodsData::AddRejectedClass(ClassReference ref) { - { - WriterMutexLock mu(Thread::Current(), rejected_classes_lock_); - rejected_classes_.insert(ref); - } - DCHECK(IsClassRejected(ref)); + return verified_method.release(); } -bool VerifiedMethodsData::IsClassRejected(ClassReference ref) { - ReaderMutexLock mu(Thread::Current(), rejected_classes_lock_); - return (rejected_classes_.find(ref) != rejected_classes_.end()); +const MethodReference* VerifiedMethod::GetDevirtTarget(uint32_t dex_pc) const { + auto it = devirt_map_.find(dex_pc); + return (it != devirt_map_.end()) ? &it->second : nullptr; } -bool VerifiedMethodsData::IsCandidateForCompilation(MethodReference& method_ref, - const uint32_t access_flags) { -#ifdef ART_SEA_IR_MODE - bool use_sea = Runtime::Current()->IsSeaIRMode(); - use_sea = use_sea && (std::string::npos != PrettyMethod( - method_ref.dex_method_index, *(method_ref.dex_file)).find("fibonacci")); - if (use_sea) return true; -#endif - // Don't compile class initializers, ever. - if (((access_flags & kAccConstructor) != 0) && ((access_flags & kAccStatic) != 0)) { - return false; - } - return (Runtime::Current()->GetCompilerFilter() != Runtime::kInterpretOnly); +bool VerifiedMethod::IsSafeCast(uint32_t pc) const { + return std::binary_search(safe_cast_set_.begin(), safe_cast_set_.end(), pc); } -const std::vector<uint8_t>* VerifiedMethodsData::GenerateGcMap( - verifier::MethodVerifier* method_verifier) { +bool VerifiedMethod::GenerateGcMap(verifier::MethodVerifier* method_verifier) { + DCHECK(dex_gc_map_.empty()); size_t num_entries, ref_bitmap_bits, pc_bits; ComputeGcMapSizes(method_verifier, &num_entries, &ref_bitmap_bits, &pc_bits); - // There's a single byte to encode the size of each bitmap + // There's a single byte to encode the size of each bitmap. if (ref_bitmap_bits >= (8 /* bits per byte */ * 8192 /* 13-bit size */ )) { // TODO: either a better GC map format or per method failures method_verifier->Fail(verifier::VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot encode GC map for method with " << ref_bitmap_bits << " registers"; - return NULL; + return false; } size_t ref_bitmap_bytes = (ref_bitmap_bits + 7) / 8; - // There are 2 bytes to encode the number of entries + // There are 2 bytes to encode the number of entries. if (num_entries >= 65536) { - // TODO: either a better GC map format or per method failures + // TODO: Either a better GC map format or per method failures. method_verifier->Fail(verifier::VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot encode GC map for method with " << num_entries << " entries"; - return NULL; + return false; } size_t pc_bytes; verifier::RegisterMapFormat format; @@ -191,45 +105,39 @@ const std::vector<uint8_t>* VerifiedMethodsData::GenerateGcMap( format = verifier::kRegMapFormatCompact16; pc_bytes = 2; } else { - // TODO: either a better GC map format or per method failures + // TODO: Either a better GC map format or per method failures. method_verifier->Fail(verifier::VERIFY_ERROR_BAD_CLASS_HARD) << "Cannot encode GC map for method with " << (1 << pc_bits) << " instructions (number is rounded up to nearest power of 2)"; - return NULL; + return false; } size_t table_size = ((pc_bytes + ref_bitmap_bytes) * num_entries) + 4; - std::vector<uint8_t>* table = new std::vector<uint8_t>; - if (table == NULL) { - method_verifier->Fail(verifier::VERIFY_ERROR_BAD_CLASS_HARD) - << "Failed to encode GC map (size=" << table_size << ")"; - return NULL; - } - table->reserve(table_size); - // Write table header - table->push_back(format | ((ref_bitmap_bytes & ~0xFF) >> 5)); - table->push_back(ref_bitmap_bytes & 0xFF); - table->push_back(num_entries & 0xFF); - table->push_back((num_entries >> 8) & 0xFF); - // Write table data + dex_gc_map_.reserve(table_size); + // Write table header. + dex_gc_map_.push_back(format | ((ref_bitmap_bytes & ~0xFF) >> 5)); + dex_gc_map_.push_back(ref_bitmap_bytes & 0xFF); + dex_gc_map_.push_back(num_entries & 0xFF); + dex_gc_map_.push_back((num_entries >> 8) & 0xFF); + // Write table data. const DexFile::CodeItem* code_item = method_verifier->CodeItem(); for (size_t i = 0; i < code_item->insns_size_in_code_units_; i++) { if (method_verifier->GetInstructionFlags(i).IsCompileTimeInfoPoint()) { - table->push_back(i & 0xFF); + dex_gc_map_.push_back(i & 0xFF); if (pc_bytes == 2) { - table->push_back((i >> 8) & 0xFF); + dex_gc_map_.push_back((i >> 8) & 0xFF); } verifier::RegisterLine* line = method_verifier->GetRegLine(i); - line->WriteReferenceBitMap(*table, ref_bitmap_bytes); + line->WriteReferenceBitMap(dex_gc_map_, ref_bitmap_bytes); } } - DCHECK_EQ(table->size(), table_size); - return table; + DCHECK_EQ(dex_gc_map_.size(), table_size); + return true; } -void VerifiedMethodsData::VerifyGcMap(verifier::MethodVerifier* method_verifier, - const std::vector<uint8_t>& data) { +void VerifiedMethod::VerifyGcMap(verifier::MethodVerifier* method_verifier, + const std::vector<uint8_t>& data) { // Check that for every GC point there is a map entry, there aren't entries for non-GC points, - // that the table data is well formed and all references are marked (or not) in the bitmap + // that the table data is well formed and all references are marked (or not) in the bitmap. verifier::DexPcToReferenceMap map(&data[0]); DCHECK_EQ(data.size(), map.RawSize()); size_t map_index = 0; @@ -237,30 +145,30 @@ void VerifiedMethodsData::VerifyGcMap(verifier::MethodVerifier* method_verifier, for (size_t i = 0; i < code_item->insns_size_in_code_units_; i++) { const uint8_t* reg_bitmap = map.FindBitMap(i, false); if (method_verifier->GetInstructionFlags(i).IsCompileTimeInfoPoint()) { - CHECK_LT(map_index, map.NumEntries()); - CHECK_EQ(map.GetDexPc(map_index), i); - CHECK_EQ(map.GetBitMap(map_index), reg_bitmap); + DCHECK_LT(map_index, map.NumEntries()); + DCHECK_EQ(map.GetDexPc(map_index), i); + DCHECK_EQ(map.GetBitMap(map_index), reg_bitmap); map_index++; verifier::RegisterLine* line = method_verifier->GetRegLine(i); for (size_t j = 0; j < code_item->registers_size_; j++) { if (line->GetRegisterType(j).IsNonZeroReferenceTypes()) { - CHECK_LT(j / 8, map.RegWidth()); - CHECK_EQ((reg_bitmap[j / 8] >> (j % 8)) & 1, 1); + DCHECK_LT(j / 8, map.RegWidth()); + DCHECK_EQ((reg_bitmap[j / 8] >> (j % 8)) & 1, 1); } else if ((j / 8) < map.RegWidth()) { - CHECK_EQ((reg_bitmap[j / 8] >> (j % 8)) & 1, 0); + DCHECK_EQ((reg_bitmap[j / 8] >> (j % 8)) & 1, 0); } else { - // If a register doesn't contain a reference then the bitmap may be shorter than the line + // If a register doesn't contain a reference then the bitmap may be shorter than the line. } } } else { - CHECK(reg_bitmap == NULL); + DCHECK(reg_bitmap == NULL); } } } -void VerifiedMethodsData::ComputeGcMapSizes(verifier::MethodVerifier* method_verifier, - size_t* gc_points, size_t* ref_bitmap_bits, - size_t* log2_max_gc_pc) { +void VerifiedMethod::ComputeGcMapSizes(verifier::MethodVerifier* method_verifier, + size_t* gc_points, size_t* ref_bitmap_bits, + size_t* log2_max_gc_pc) { size_t local_gc_points = 0; size_t max_insn = 0; size_t max_ref_reg = -1; @@ -274,7 +182,7 @@ void VerifiedMethodsData::ComputeGcMapSizes(verifier::MethodVerifier* method_ver } } *gc_points = local_gc_points; - *ref_bitmap_bits = max_ref_reg + 1; // if max register is 0 we need 1 bit to encode (ie +1) + *ref_bitmap_bits = max_ref_reg + 1; // If max register is 0 we need 1 bit to encode (ie +1). size_t i = 0; while ((1U << i) <= max_insn) { i++; @@ -282,92 +190,13 @@ void VerifiedMethodsData::ComputeGcMapSizes(verifier::MethodVerifier* method_ver *log2_max_gc_pc = i; } -void VerifiedMethodsData::SetDexGcMap(MethodReference ref, const std::vector<uint8_t>* gc_map) { - DCHECK(Runtime::Current()->IsCompiler()); - { - WriterMutexLock mu(Thread::Current(), dex_gc_maps_lock_); - DexGcMapTable::iterator it = dex_gc_maps_.find(ref); - if (it != dex_gc_maps_.end()) { - delete it->second; - dex_gc_maps_.erase(it); - } - dex_gc_maps_.Put(ref, gc_map); - } - DCHECK(GetDexGcMap(ref) != NULL); -} - -VerifiedMethodsData::MethodSafeCastSet* VerifiedMethodsData::GenerateSafeCastSet( - verifier::MethodVerifier* method_verifier) { - /* - * Walks over the method code and adds any cast instructions in which - * the type cast is implicit to a set, which is used in the code generation - * to elide these casts. - */ - if (method_verifier->HasFailures()) { - return NULL; - } - UniquePtr<MethodSafeCastSet> mscs; - const DexFile::CodeItem* code_item = method_verifier->CodeItem(); - const Instruction* inst = Instruction::At(code_item->insns_); - const Instruction* end = Instruction::At(code_item->insns_ + - code_item->insns_size_in_code_units_); - - for (; inst < end; inst = inst->Next()) { - Instruction::Code code = inst->Opcode(); - if ((code == Instruction::CHECK_CAST) || (code == Instruction::APUT_OBJECT)) { - uint32_t dex_pc = inst->GetDexPc(code_item->insns_); - const verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc); - bool is_safe_cast = false; - if (code == Instruction::CHECK_CAST) { - const verifier::RegType& reg_type(line->GetRegisterType(inst->VRegA_21c())); - const verifier::RegType& cast_type = - method_verifier->ResolveCheckedClass(inst->VRegB_21c()); - is_safe_cast = cast_type.IsStrictlyAssignableFrom(reg_type); - } else { - const verifier::RegType& array_type(line->GetRegisterType(inst->VRegB_23x())); - // We only know its safe to assign to an array if the array type is precise. For example, - // an Object[] can have any type of object stored in it, but it may also be assigned a - // String[] in which case the stores need to be of Strings. - if (array_type.IsPreciseReference()) { - const verifier::RegType& value_type(line->GetRegisterType(inst->VRegA_23x())); - const verifier::RegType& component_type = method_verifier->GetRegTypeCache() - ->GetComponentType(array_type, method_verifier->GetClassLoader()); - is_safe_cast = component_type.IsStrictlyAssignableFrom(value_type); - } - } - if (is_safe_cast) { - if (mscs.get() == nullptr) { - mscs.reset(new MethodSafeCastSet()); - } else { - DCHECK_LT(mscs->back(), dex_pc); // Verify ordering for push_back() to the sorted vector. - } - mscs->push_back(dex_pc); - } - } - } - return mscs.release(); -} - -void VerifiedMethodsData::SetSafeCastMap(MethodReference ref, const MethodSafeCastSet* cast_set) { - WriterMutexLock mu(Thread::Current(), safecast_map_lock_); - SafeCastMap::iterator it = safecast_map_.find(ref); - if (it != safecast_map_.end()) { - delete it->second; - safecast_map_.erase(it); - } - safecast_map_.Put(ref, cast_set); - DCHECK(safecast_map_.find(ref) != safecast_map_.end()); -} - -VerifiedMethodsData::PcToConcreteMethodMap* VerifiedMethodsData::GenerateDevirtMap( - verifier::MethodVerifier* method_verifier) { +void VerifiedMethod::GenerateDevirtMap(verifier::MethodVerifier* method_verifier) { // It is risky to rely on reg_types for sharpening in cases of soft // verification, we might end up sharpening to a wrong implementation. Just abort. if (method_verifier->HasFailures()) { - return NULL; + return; } - UniquePtr<PcToConcreteMethodMap> pc_to_concrete_method_map; const DexFile::CodeItem* code_item = method_verifier->CodeItem(); const uint16_t* insns = code_item->insns_; const Instruction* inst = Instruction::At(insns); @@ -426,29 +255,58 @@ VerifiedMethodsData::PcToConcreteMethodMap* VerifiedMethodsData::GenerateDevirtM concrete_method->GetDeclaringClass()->IsFinal()) { // If we knew exactly the class being dispatched upon, or if the target method cannot be // overridden record the target to be used in the compiler driver. - if (pc_to_concrete_method_map.get() == NULL) { - pc_to_concrete_method_map.reset(new PcToConcreteMethodMap()); - } MethodReference concrete_ref( concrete_method->GetDeclaringClass()->GetDexCache()->GetDexFile(), concrete_method->GetDexMethodIndex()); - pc_to_concrete_method_map->Put(dex_pc, concrete_ref); + devirt_map_.Put(dex_pc, concrete_ref); } } - return pc_to_concrete_method_map.release(); } -void VerifiedMethodsData::SetDevirtMap(MethodReference ref, - const PcToConcreteMethodMap* devirt_map) { - WriterMutexLock mu(Thread::Current(), devirt_maps_lock_); - DevirtualizationMapTable::iterator it = devirt_maps_.find(ref); - if (it != devirt_maps_.end()) { - delete it->second; - devirt_maps_.erase(it); +void VerifiedMethod::GenerateSafeCastSet(verifier::MethodVerifier* method_verifier) { + /* + * Walks over the method code and adds any cast instructions in which + * the type cast is implicit to a set, which is used in the code generation + * to elide these casts. + */ + if (method_verifier->HasFailures()) { + return; } + const DexFile::CodeItem* code_item = method_verifier->CodeItem(); + const Instruction* inst = Instruction::At(code_item->insns_); + const Instruction* end = Instruction::At(code_item->insns_ + + code_item->insns_size_in_code_units_); - devirt_maps_.Put(ref, devirt_map); - DCHECK(devirt_maps_.find(ref) != devirt_maps_.end()); + for (; inst < end; inst = inst->Next()) { + Instruction::Code code = inst->Opcode(); + if ((code == Instruction::CHECK_CAST) || (code == Instruction::APUT_OBJECT)) { + uint32_t dex_pc = inst->GetDexPc(code_item->insns_); + const verifier::RegisterLine* line = method_verifier->GetRegLine(dex_pc); + bool is_safe_cast = false; + if (code == Instruction::CHECK_CAST) { + const verifier::RegType& reg_type(line->GetRegisterType(inst->VRegA_21c())); + const verifier::RegType& cast_type = + method_verifier->ResolveCheckedClass(inst->VRegB_21c()); + is_safe_cast = cast_type.IsStrictlyAssignableFrom(reg_type); + } else { + const verifier::RegType& array_type(line->GetRegisterType(inst->VRegB_23x())); + // We only know its safe to assign to an array if the array type is precise. For example, + // an Object[] can have any type of object stored in it, but it may also be assigned a + // String[] in which case the stores need to be of Strings. + if (array_type.IsPreciseReference()) { + const verifier::RegType& value_type(line->GetRegisterType(inst->VRegA_23x())); + const verifier::RegType& component_type = method_verifier->GetRegTypeCache() + ->GetComponentType(array_type, method_verifier->GetClassLoader()); + is_safe_cast = component_type.IsStrictlyAssignableFrom(value_type); + } + } + if (is_safe_cast) { + // Verify ordering for push_back() to the sorted vector. + DCHECK(safe_cast_set_.empty() || safe_cast_set_.back() < dex_pc); + safe_cast_set_.push_back(dex_pc); + } + } + } } } // namespace art diff --git a/compiler/dex/verified_method.h b/compiler/dex/verified_method.h new file mode 100644 index 0000000000..aa0e72a5ca --- /dev/null +++ b/compiler/dex/verified_method.h @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_DEX_VERIFIED_METHOD_H_ +#define ART_COMPILER_DEX_VERIFIED_METHOD_H_ + +#include <vector> + +#include "method_reference.h" +#include "safe_map.h" + +namespace art { + +namespace verifier { +class MethodVerifier; +} // namespace verifier + +class VerifiedMethod { + public: + // Cast elision set type. + // Since we're adding the dex PCs to the set in increasing order, a sorted vector + // is better for performance (not just memory usage), especially for large sets. + typedef std::vector<uint32_t> SafeCastSet; + + // Devirtualization map type maps dex offset to concrete method reference. + typedef SafeMap<uint32_t, MethodReference> DevirtualizationMap; + + static const VerifiedMethod* Create(verifier::MethodVerifier* method_verifier, bool compile) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ~VerifiedMethod() = default; + + const std::vector<uint8_t>& GetDexGcMap() const { + return dex_gc_map_; + } + + const DevirtualizationMap& GetDevirtMap() const { + return devirt_map_; + } + + const SafeCastSet& GetSafeCastSet() const { + return safe_cast_set_; + } + + // Returns the devirtualization target method, or nullptr if none. + const MethodReference* GetDevirtTarget(uint32_t dex_pc) const; + + // Returns true if the cast can statically be verified to be redundant + // by using the check-cast elision peephole optimization in the verifier. + bool IsSafeCast(uint32_t pc) const; + + private: + VerifiedMethod() = default; + + /* + * Generate the GC map for a method that has just been verified (i.e. we're doing this as part of + * verification). For type-precise determination we have all the data we need, so we just need to + * encode it in some clever fashion. + * Stores the data in dex_gc_map_, returns true on success and false on failure. + */ + bool GenerateGcMap(verifier::MethodVerifier* method_verifier); + + // Verify that the GC map associated with method_ is well formed. + static void VerifyGcMap(verifier::MethodVerifier* method_verifier, + const std::vector<uint8_t>& data); + + // Compute sizes for GC map data. + static void ComputeGcMapSizes(verifier::MethodVerifier* method_verifier, + size_t* gc_points, size_t* ref_bitmap_bits, size_t* log2_max_gc_pc); + + // Generate devirtualizaion map into devirt_map_. + void GenerateDevirtMap(verifier::MethodVerifier* method_verifier) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + // Generate safe case set into safe_cast_set_. + void GenerateSafeCastSet(verifier::MethodVerifier* method_verifier) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + std::vector<uint8_t> dex_gc_map_; + DevirtualizationMap devirt_map_; + SafeCastSet safe_cast_set_; +}; + +} // namespace art + +#endif // ART_COMPILER_DEX_VERIFIED_METHOD_H_ diff --git a/compiler/dex/verified_methods_data.h b/compiler/dex/verified_methods_data.h deleted file mode 100644 index d495dff7d9..0000000000 --- a/compiler/dex/verified_methods_data.h +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright (C) 2013 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#ifndef ART_COMPILER_DEX_VERIFIED_METHODS_DATA_H_ -#define ART_COMPILER_DEX_VERIFIED_METHODS_DATA_H_ - -#include <stdint.h> -#include <set> -#include <vector> - -#include "base/macros.h" -#include "base/mutex.h" -#include "class_reference.h" -#include "method_reference.h" -#include "safe_map.h" - -namespace art { - -namespace verifier { -class MethodVerifier; -} // namespace verifier - -class VerifiedMethodsData { - public: - VerifiedMethodsData(); - ~VerifiedMethodsData(); - - bool ProcessVerifiedMethod(verifier::MethodVerifier* method_verifier) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) - LOCKS_EXCLUDED(dex_gc_maps_lock_, devirt_maps_lock_, safecast_map_lock_); - - const std::vector<uint8_t>* GetDexGcMap(MethodReference ref) - LOCKS_EXCLUDED(dex_gc_maps_lock_); - - const MethodReference* GetDevirtMap(const MethodReference& ref, uint32_t dex_pc) - LOCKS_EXCLUDED(devirt_maps_lock_); - - // Returns true if the cast can statically be verified to be redundant - // by using the check-cast elision peephole optimization in the verifier - bool IsSafeCast(MethodReference ref, uint32_t pc) LOCKS_EXCLUDED(safecast_map_lock_); - - void AddRejectedClass(ClassReference ref) LOCKS_EXCLUDED(rejected_classes_lock_); - bool IsClassRejected(ClassReference ref) LOCKS_EXCLUDED(rejected_classes_lock_); - - static bool IsCandidateForCompilation(MethodReference& method_ref, - const uint32_t access_flags); - - private: - /* - * Generate the GC map for a method that has just been verified (i.e. we're doing this as part of - * verification). For type-precise determination we have all the data we need, so we just need to - * encode it in some clever fashion. - * Returns a pointer to a newly-allocated RegisterMap, or NULL on failure. - */ - const std::vector<uint8_t>* GenerateGcMap(verifier::MethodVerifier* method_verifier); - - // Verify that the GC map associated with method_ is well formed - void VerifyGcMap(verifier::MethodVerifier* method_verifier, const std::vector<uint8_t>& data); - - // Compute sizes for GC map data - void ComputeGcMapSizes(verifier::MethodVerifier* method_verifier, - size_t* gc_points, size_t* ref_bitmap_bits, size_t* log2_max_gc_pc); - - // All the GC maps that the verifier has created - typedef SafeMap<const MethodReference, const std::vector<uint8_t>*, - MethodReferenceComparator> DexGcMapTable; - ReaderWriterMutex dex_gc_maps_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; - DexGcMapTable dex_gc_maps_ GUARDED_BY(dex_gc_maps_lock_); - void SetDexGcMap(MethodReference ref, const std::vector<uint8_t>* dex_gc_map) - LOCKS_EXCLUDED(dex_gc_maps_lock_); - - // Cast elision types. - // Since we're adding the dex PCs to the set in increasing order, a sorted vector - // is better for performance (not just memory usage), especially for large sets. - typedef std::vector<uint32_t> MethodSafeCastSet; - typedef SafeMap<MethodReference, const MethodSafeCastSet*, - MethodReferenceComparator> SafeCastMap; - MethodSafeCastSet* GenerateSafeCastSet(verifier::MethodVerifier* method_verifier) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void SetSafeCastMap(MethodReference ref, const MethodSafeCastSet* mscs) - LOCKS_EXCLUDED(safecast_map_lock_); - ReaderWriterMutex safecast_map_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; - SafeCastMap safecast_map_ GUARDED_BY(safecast_map_lock_); - - // Devirtualization map. - typedef SafeMap<uint32_t, MethodReference> PcToConcreteMethodMap; - typedef SafeMap<MethodReference, const PcToConcreteMethodMap*, - MethodReferenceComparator> DevirtualizationMapTable; - PcToConcreteMethodMap* GenerateDevirtMap(verifier::MethodVerifier* method_verifier) - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - ReaderWriterMutex devirt_maps_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; - DevirtualizationMapTable devirt_maps_ GUARDED_BY(devirt_maps_lock_); - void SetDevirtMap(MethodReference ref, const PcToConcreteMethodMap* pc_method_map) - LOCKS_EXCLUDED(devirt_maps_lock_); - - // Rejected classes - typedef std::set<ClassReference> RejectedClassesTable; - ReaderWriterMutex rejected_classes_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; - RejectedClassesTable rejected_classes_ GUARDED_BY(rejected_classes_lock_); -}; - -} // namespace art - -#endif // ART_COMPILER_DEX_VERIFIED_METHODS_DATA_H_ diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index f390b4143f..d504a4e704 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -27,7 +27,8 @@ #include "class_linker.h" #include "dex_compilation_unit.h" #include "dex_file-inl.h" -#include "dex/verified_methods_data.h" +#include "dex/verification_results.h" +#include "dex/verified_method.h" #include "jni_internal.h" #include "object_utils.h" #include "runtime.h" @@ -336,13 +337,13 @@ extern "C" art::CompiledMethod* ArtQuickJniCompileMethod(art::CompilerDriver& co extern "C" void compilerLLVMSetBitcodeFileName(art::CompilerDriver& driver, std::string const& filename); -CompilerDriver::CompilerDriver(VerifiedMethodsData* verified_methods_data, +CompilerDriver::CompilerDriver(VerificationResults* verification_results, DexFileToMethodInlinerMap* method_inliner_map, CompilerBackend compiler_backend, InstructionSet instruction_set, InstructionSetFeatures instruction_set_features, bool image, DescriptorSet* image_classes, size_t thread_count, bool dump_stats) - : verified_methods_data_(verified_methods_data), + : verification_results_(verification_results), method_inliner_map_(method_inliner_map), compiler_backend_(compiler_backend), instruction_set_(instruction_set), @@ -1272,9 +1273,9 @@ bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const ui if (enableVerifierBasedSharpening && (*invoke_type == kVirtual || *invoke_type == kInterface)) { // Did the verifier record a more precise invoke target based on its type information? - const MethodReference caller_method(mUnit->GetDexFile(), mUnit->GetDexMethodIndex()); + DCHECK(mUnit->GetVerifiedMethod() != nullptr); const MethodReference* devirt_map_target = - verified_methods_data_->GetDevirtMap(caller_method, dex_pc); + mUnit->GetVerifiedMethod()->GetDevirtTarget(dex_pc); if (devirt_map_target != NULL) { SirtRef<mirror::DexCache> target_dex_cache(soa.Self(), mUnit->GetClassLinker()->FindDexCache(*devirt_map_target->dex_file)); SirtRef<mirror::ClassLoader> class_loader(soa.Self(), soa.Decode<mirror::ClassLoader*>(mUnit->GetClassLoader())); @@ -1321,8 +1322,15 @@ bool CompilerDriver::ComputeInvokeInfo(const DexCompilationUnit* mUnit, const ui return false; // Incomplete knowledge needs slow path. } -bool CompilerDriver::IsSafeCast(const MethodReference& mr, uint32_t dex_pc) { - bool result = verified_methods_data_->IsSafeCast(mr, dex_pc); +const VerifiedMethod* CompilerDriver::GetVerifiedMethod(const DexFile* dex_file, + uint32_t method_idx) const { + MethodReference ref(dex_file, method_idx); + return verification_results_->GetVerifiedMethod(ref); +} + +bool CompilerDriver::IsSafeCast(const DexCompilationUnit* mUnit, uint32_t dex_pc) { + DCHECK(mUnit->GetVerifiedMethod() != nullptr); + bool result = mUnit->GetVerifiedMethod()->IsSafeCast(dex_pc); if (result) { stats_->SafeCast(); } else { @@ -2268,7 +2276,7 @@ void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, siz } ClassReference ref(&dex_file, class_def_index); // Skip compiling classes with generic verifier failures since they will still fail at runtime - if (manager->GetCompiler()->verified_methods_data_->IsClassRejected(ref)) { + if (manager->GetCompiler()->verification_results_->IsClassRejected(ref)) { return; } const byte* class_data = dex_file.GetClassData(class_def); @@ -2351,7 +2359,7 @@ void CompilerDriver::CompileMethod(const DexFile::CodeItem* code_item, uint32_t } else if ((access_flags & kAccAbstract) != 0) { } else { MethodReference method_ref(&dex_file, method_idx); - bool compile = VerifiedMethodsData::IsCandidateForCompilation(method_ref, access_flags); + bool compile = VerificationResults::IsCandidateForCompilation(method_ref, access_flags); if (compile) { CompilerFn compiler = compiler_; diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h index eef94a1fc1..a8110e71d7 100644 --- a/compiler/driver/compiler_driver.h +++ b/compiler/driver/compiler_driver.h @@ -44,7 +44,8 @@ class DexCompilationUnit; class DexFileToMethodInlinerMap; class OatWriter; class TimingLogger; -class VerifiedMethodsData; +class VerificationResults; +class VerifiedMethod; enum CompilerBackend { kQuick, @@ -92,7 +93,7 @@ class CompilerDriver { // enabled. "image_classes" lets the compiler know what classes it // can assume will be in the image, with NULL implying all available // classes. - explicit CompilerDriver(VerifiedMethodsData* verified_methods_data, + explicit CompilerDriver(VerificationResults* verification_results, DexFileToMethodInlinerMap* method_inliner_map, CompilerBackend compiler_backend, InstructionSet instruction_set, InstructionSetFeatures instruction_set_features, @@ -109,8 +110,8 @@ class CompilerDriver { void CompileOne(const mirror::ArtMethod* method, TimingLogger& timings) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - VerifiedMethodsData* GetVerifiedMethodsData() const { - return verified_methods_data_; + VerificationResults* GetVerificationResults() const { + return verification_results_; } DexFileToMethodInlinerMap* GetMethodInlinerMap() const { @@ -213,7 +214,8 @@ class CompilerDriver { uintptr_t* direct_code, uintptr_t* direct_method) LOCKS_EXCLUDED(Locks::mutator_lock_); - bool IsSafeCast(const MethodReference& mr, uint32_t dex_pc); + const VerifiedMethod* GetVerifiedMethod(const DexFile* dex_file, uint32_t method_idx) const; + bool IsSafeCast(const DexCompilationUnit* mUnit, uint32_t dex_pc); // Record patch information for later fix up. void AddCodePatch(const DexFile* dex_file, @@ -486,7 +488,7 @@ class CompilerDriver { std::vector<const CallPatchInformation*> methods_to_patch_; std::vector<const TypePatchInformation*> classes_to_patch_; - VerifiedMethodsData* verified_methods_data_; + VerificationResults* verification_results_; DexFileToMethodInlinerMap* method_inliner_map_; CompilerBackend compiler_backend_; diff --git a/compiler/driver/dex_compilation_unit.cc b/compiler/driver/dex_compilation_unit.cc index c441d09ab2..840b0adf49 100644 --- a/compiler/driver/dex_compilation_unit.cc +++ b/compiler/driver/dex_compilation_unit.cc @@ -31,7 +31,8 @@ DexCompilationUnit::DexCompilationUnit(CompilationUnit* cu) code_item_(cu->code_item), class_def_idx_(cu->class_def_idx), dex_method_idx_(cu->method_idx), - access_flags_(cu->access_flags) { + access_flags_(cu->access_flags), + verified_method_(cu_->compiler_driver->GetVerifiedMethod(cu->dex_file, cu->method_idx)) { } DexCompilationUnit::DexCompilationUnit(CompilationUnit* cu, @@ -41,7 +42,8 @@ DexCompilationUnit::DexCompilationUnit(CompilationUnit* cu, const DexFile::CodeItem* code_item, uint16_t class_def_idx, uint32_t method_idx, - uint32_t access_flags) + uint32_t access_flags, + const VerifiedMethod* verified_method) : cu_(cu), class_loader_(class_loader), class_linker_(class_linker), @@ -49,7 +51,8 @@ DexCompilationUnit::DexCompilationUnit(CompilationUnit* cu, code_item_(code_item), class_def_idx_(class_def_idx), dex_method_idx_(method_idx), - access_flags_(access_flags) { + access_flags_(access_flags), + verified_method_(verified_method) { } const std::string& DexCompilationUnit::GetSymbol() { diff --git a/compiler/driver/dex_compilation_unit.h b/compiler/driver/dex_compilation_unit.h index 3df50ffec6..84f57991c3 100644 --- a/compiler/driver/dex_compilation_unit.h +++ b/compiler/driver/dex_compilation_unit.h @@ -29,6 +29,7 @@ class DexCache; } // namespace mirror class ClassLinker; struct CompilationUnit; +class VerifiedMethod; class DexCompilationUnit { public: @@ -36,7 +37,8 @@ class DexCompilationUnit { DexCompilationUnit(CompilationUnit* cu, jobject class_loader, ClassLinker* class_linker, const DexFile& dex_file, const DexFile::CodeItem* code_item, - uint16_t class_def_idx, uint32_t method_idx, uint32_t access_flags); + uint16_t class_def_idx, uint32_t method_idx, uint32_t access_flags, + const VerifiedMethod* verified_method); CompilationUnit* GetCompilationUnit() const { return cu_; @@ -96,6 +98,10 @@ class DexCompilationUnit { return ((access_flags_ & kAccSynchronized) != 0); } + const VerifiedMethod* GetVerifiedMethod() const { + return verified_method_; + } + const std::string& GetSymbol(); private: @@ -111,6 +117,7 @@ class DexCompilationUnit { const uint16_t class_def_idx_; const uint32_t dex_method_idx_; const uint32_t access_flags_; + const VerifiedMethod* const verified_method_; std::string symbol_; }; diff --git a/compiler/llvm/compiler_llvm.cc b/compiler/llvm/compiler_llvm.cc index 35d1ecd783..94408bb39c 100644 --- a/compiler/llvm/compiler_llvm.cc +++ b/compiler/llvm/compiler_llvm.cc @@ -20,7 +20,8 @@ #include "base/stl_util.h" #include "class_linker.h" #include "compiled_method.h" -#include "dex/verified_methods_data.h" +#include "dex/verification_results.h" +#include "dex/verified_method.h" #include "driver/compiler_driver.h" #include "driver/dex_compilation_unit.h" #include "globals.h" @@ -153,11 +154,9 @@ CompileDexMethod(DexCompilationUnit* dex_compilation_unit, InvokeType invoke_typ cunit->Materialize(); - MethodReference mref(dex_compilation_unit->GetDexFile(), - dex_compilation_unit->GetDexMethodIndex()); return new CompiledMethod(*compiler_driver_, compiler_driver_->GetInstructionSet(), cunit->GetElfObject(), - *compiler_driver_->GetVerifiedMethodsData()->GetDexGcMap(mref), + dex_compilation_unit->GetVerifiedMethod()->GetDexGcMap(), cunit->GetDexCompilationUnit()->GetSymbol()); } @@ -214,7 +213,7 @@ extern "C" art::CompiledMethod* ArtCompileMethod(art::CompilerDriver& driver, art::DexCompilationUnit dex_compilation_unit( NULL, class_loader, class_linker, dex_file, code_item, - class_def_idx, method_idx, access_flags); + class_def_idx, method_idx, access_flags, driver.GetVerifiedMethod(&dex_file, method_idx)); art::llvm::CompilerLLVM* compiler_llvm = ContextOf(driver); art::CompiledMethod* result = compiler_llvm->CompileDexMethod(&dex_compilation_unit, invoke_type); return result; @@ -226,8 +225,8 @@ extern "C" art::CompiledMethod* ArtLLVMJniCompileMethod(art::CompilerDriver& dri art::ClassLinker *class_linker = art::Runtime::Current()->GetClassLinker(); art::DexCompilationUnit dex_compilation_unit( - NULL, NULL, class_linker, dex_file, NULL, - 0, method_idx, access_flags); + nullptr, nullptr, class_linker, dex_file, nullptr, + 0, method_idx, access_flags, nullptr); art::llvm::CompilerLLVM* compiler_llvm = ContextOf(driver); art::CompiledMethod* result = compiler_llvm->CompileNativeMethod(&dex_compilation_unit); diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc index 2434262279..fc454127c3 100644 --- a/compiler/oat_test.cc +++ b/compiler/oat_test.cc @@ -79,10 +79,10 @@ TEST_F(OatTest, WriteRead) { InstructionSet insn_set = kIsTargetBuild ? kThumb2 : kX86; InstructionSetFeatures insn_features; - verified_methods_data_.reset(new VerifiedMethodsData); + verification_results_.reset(new VerificationResults); method_inliner_map_.reset(compiler_backend == kQuick ? new DexFileToMethodInlinerMap : nullptr); - callbacks_.Reset(verified_methods_data_.get(), method_inliner_map_.get()); - compiler_driver_.reset(new CompilerDriver(verified_methods_data_.get(), + callbacks_.Reset(verification_results_.get(), method_inliner_map_.get()); + compiler_driver_.reset(new CompilerDriver(verification_results_.get(), method_inliner_map_.get(), compiler_backend, insn_set, insn_features, false, NULL, 2, true)); diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc index 199a2b8d58..7a902d86d6 100644 --- a/compiler/oat_writer.cc +++ b/compiler/oat_writer.cc @@ -23,7 +23,7 @@ #include "base/unix_file/fd_file.h" #include "class_linker.h" #include "dex_file-inl.h" -#include "dex/verified_methods_data.h" +#include "dex/verification_results.h" #include "gc/space/space.h" #include "mirror/art_method-inl.h" #include "mirror/array.h" @@ -218,7 +218,7 @@ size_t OatWriter::InitOatClasses(size_t offset) { mirror::Class::Status status; if (compiled_class != NULL) { status = compiled_class->GetStatus(); - } else if (compiler_driver_->GetVerifiedMethodsData()->IsClassRejected(class_ref)) { + } else if (compiler_driver_->GetVerificationResults()->IsClassRejected(class_ref)) { status = mirror::Class::kStatusError; } else { status = mirror::Class::kStatusNotReady; @@ -433,7 +433,7 @@ size_t OatWriter::InitOatCodeMethod(size_t offset, size_t oat_class_index, mirror::Class::Status status; if (compiled_class != NULL) { status = compiled_class->GetStatus(); - } else if (compiler_driver_->GetVerifiedMethodsData()->IsClassRejected(class_ref)) { + } else if (compiler_driver_->GetVerificationResults()->IsClassRejected(class_ref)) { status = mirror::Class::kStatusError; } else { status = mirror::Class::kStatusNotReady; diff --git a/dex2oat/Android.mk b/dex2oat/Android.mk index 05dcd7bac6..6cd0538c43 100644 --- a/dex2oat/Android.mk +++ b/dex2oat/Android.mk @@ -33,7 +33,7 @@ ifeq ($(WITH_HOST_DALVIK),true) ifeq ($(ART_BUILD_NDEBUG),true) $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libart-compiler,art/compiler,host,ndebug)) endif - ifeq ($(ART_BUILD_NDEBUG),true) + ifeq ($(ART_BUILD_DEBUG),true) $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libartd-compiler,art/compiler,host,debug)) endif endif diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index 26fac2370d..97df199be9 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -32,7 +32,7 @@ #include "class_linker.h" #include "compiler_callbacks.h" #include "dex_file-inl.h" -#include "dex/verified_methods_data.h" +#include "dex/verification_results.h" #include "driver/compiler_driver.h" #include "elf_fixup.h" #include "elf_stripper.h" @@ -268,7 +268,7 @@ class Dex2Oat { Runtime::Current()->SetCompileTimeClassPath(class_loader, class_path_files); } - UniquePtr<CompilerDriver> driver(new CompilerDriver(verified_methods_data_.get(), + UniquePtr<CompilerDriver> driver(new CompilerDriver(verification_results_.get(), method_inliner_map_.get(), compiler_backend_, instruction_set_, @@ -348,15 +348,15 @@ class Dex2Oat { private: class Dex2OatCompilerCallbacks : public CompilerCallbacks { public: - Dex2OatCompilerCallbacks(VerifiedMethodsData* verified_methods_data, + Dex2OatCompilerCallbacks(VerificationResults* verification_results, DexFileToMethodInlinerMap* method_inliner_map) - : verified_methods_data_(verified_methods_data), + : verification_results_(verification_results), method_inliner_map_(method_inliner_map) { } virtual ~Dex2OatCompilerCallbacks() { } virtual bool MethodVerified(verifier::MethodVerifier* verifier) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - bool result = verified_methods_data_->ProcessVerifiedMethod(verifier); + bool result = verification_results_->ProcessVerifiedMethod(verifier); if (result && method_inliner_map_ != nullptr) { MethodReference ref = verifier->GetMethodReference(); method_inliner_map_->GetMethodInliner(ref.dex_file) @@ -365,11 +365,11 @@ class Dex2Oat { return result; } virtual void ClassRejected(ClassReference ref) { - verified_methods_data_->AddRejectedClass(ref); + verification_results_->AddRejectedClass(ref); } private: - VerifiedMethodsData* verified_methods_data_; + VerificationResults* verification_results_; DexFileToMethodInlinerMap* method_inliner_map_; }; @@ -380,9 +380,9 @@ class Dex2Oat { : compiler_backend_(compiler_backend), instruction_set_(instruction_set), instruction_set_features_(instruction_set_features), - verified_methods_data_(new VerifiedMethodsData), + verification_results_(new VerificationResults), method_inliner_map_(compiler_backend == kQuick ? new DexFileToMethodInlinerMap : nullptr), - callbacks_(verified_methods_data_.get(), method_inliner_map_.get()), + callbacks_(verification_results_.get(), method_inliner_map_.get()), runtime_(nullptr), thread_count_(thread_count), start_ns_(NanoTime()) { @@ -446,7 +446,7 @@ class Dex2Oat { const InstructionSet instruction_set_; const InstructionSetFeatures instruction_set_features_; - UniquePtr<VerifiedMethodsData> verified_methods_data_; + UniquePtr<VerificationResults> verification_results_; UniquePtr<DexFileToMethodInlinerMap> method_inliner_map_; Dex2OatCompilerCallbacks callbacks_; Runtime* runtime_; diff --git a/jdwpspy/Android.mk b/jdwpspy/Android.mk deleted file mode 100644 index 97162f0b0d..0000000000 --- a/jdwpspy/Android.mk +++ /dev/null @@ -1,25 +0,0 @@ -# -# Copyright (C) 2006 The Android Open Source Project -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -LOCAL_PATH:= $(call my-dir) - -include $(CLEAR_VARS) -LOCAL_SRC_FILES:= Main.cpp Net.cpp -LOCAL_C_INCLUDES += art/runtime -LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk -LOCAL_MODULE := jdwpspy -include $(BUILD_HOST_EXECUTABLE) -ART_HOST_EXECUTABLES += $(HOST_OUT_EXECUTABLES)/$(LOCAL_MODULE) diff --git a/jdwpspy/Common.h b/jdwpspy/Common.h deleted file mode 100644 index 30a49fba76..0000000000 --- a/jdwpspy/Common.h +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Copyright 2006 The Android Open Source Project - * - * jdwpspy common stuff. - */ -#ifndef ART_JDWPSPY_COMMON_H_ -#define ART_JDWPSPY_COMMON_H_ - -#include <stdint.h> -#include <stdio.h> -#include <sys/types.h> - -typedef uint8_t u1; -typedef uint16_t u2; -typedef uint32_t u4; -typedef uint64_t u8; - -#define NELEM(x) (sizeof(x) / sizeof((x)[0])) - -#ifndef _JDWP_MISC_INLINE -# define INLINE extern inline -#else -# define INLINE -#endif - -/* - * Get 1 byte. (Included to make the code more legible.) - */ -INLINE u1 get1(unsigned const char* pSrc) { - return *pSrc; -} - -/* - * Get 2 big-endian bytes. - */ -INLINE u2 get2BE(unsigned char const* pSrc) { - u2 result; - - result = *pSrc++ << 8; - result |= *pSrc++; - - return result; -} - -/* - * Get 4 big-endian bytes. - */ -INLINE u4 get4BE(unsigned char const* pSrc) { - u4 result; - - result = *pSrc++ << 24; - result |= *pSrc++ << 16; - result |= *pSrc++ << 8; - result |= *pSrc++; - - return result; -} - -/* - * Get 8 big-endian bytes. - */ -INLINE u8 get8BE(unsigned char const* pSrc) { - u8 result; - - result = (u8) *pSrc++ << 56; - result |= (u8) *pSrc++ << 48; - result |= (u8) *pSrc++ << 40; - result |= (u8) *pSrc++ << 32; - result |= (u8) *pSrc++ << 24; - result |= (u8) *pSrc++ << 16; - result |= (u8) *pSrc++ << 8; - result |= (u8) *pSrc++; - - return result; -} - - -/* - * Start here. - */ -int run(const char* connectHost, int connectPort, int listenPort); - -/* - * Print a hex dump to the specified file pointer. - * - * "local" mode prints a hex dump starting from offset 0 (roughly equivalent - * to "xxd -g1"). - * - * "mem" mode shows the actual memory address, and will offset the start - * so that the low nibble of the address is always zero. - */ -enum HexDumpMode { kHexDumpLocal, kHexDumpMem }; -void printHexDump(const void* vaddr, size_t length); -void printHexDump2(const void* vaddr, size_t length, const char* prefix); -void printHexDumpEx(FILE* fp, const void* vaddr, size_t length, - HexDumpMode mode, const char* prefix); - -#endif // ART_JDWPSPY_COMMON_H_ diff --git a/jdwpspy/Main.cpp b/jdwpspy/Main.cpp deleted file mode 100644 index 0f68d52c38..0000000000 --- a/jdwpspy/Main.cpp +++ /dev/null @@ -1,139 +0,0 @@ -/* - * Copyright 2006 The Android Open Source Project - * - * JDWP spy. - */ -#define _JDWP_MISC_INLINE -#include "Common.h" -#include <stdlib.h> -#include <stdio.h> -#include <string.h> -#include <assert.h> -#include <ctype.h> - -static const char gHexDigit[] = "0123456789abcdef"; - -/* - * Print a hex dump. Just hands control off to the fancy version. - */ -void printHexDump(const void* vaddr, size_t length) -{ - printHexDumpEx(stdout, vaddr, length, kHexDumpLocal, ""); -} -void printHexDump2(const void* vaddr, size_t length, const char* prefix) -{ - printHexDumpEx(stdout, vaddr, length, kHexDumpLocal, prefix); -} - -/* - * Print a hex dump in this format: - * -01234567: 00 11 22 33 44 55 66 77 88 99 aa bb cc dd ee ff 0123456789abcdef\n - */ -void printHexDumpEx(FILE* fp, const void* vaddr, size_t length, - HexDumpMode mode, const char* prefix) -{ - const unsigned char* addr = reinterpret_cast<const unsigned char*>(vaddr); - char out[77]; /* exact fit */ - unsigned int offset; /* offset to show while printing */ - char* hex; - char* asc; - int gap; - - if (mode == kHexDumpLocal) - offset = 0; - else - offset = (int) addr; - - memset(out, ' ', sizeof(out)-1); - out[8] = ':'; - out[sizeof(out)-2] = '\n'; - out[sizeof(out)-1] = '\0'; - - gap = (int) offset & 0x0f; - while (length) { - unsigned int lineOffset = offset & ~0x0f; - char* hex = out; - char* asc = out + 59; - - for (int i = 0; i < 8; i++) { - *hex++ = gHexDigit[lineOffset >> 28]; - lineOffset <<= 4; - } - hex++; - hex++; - - int count = ((int)length > 16-gap) ? 16-gap : (int) length; /* cap length */ - assert(count != 0); - assert(count+gap <= 16); - - if (gap) { - /* only on first line */ - hex += gap * 3; - asc += gap; - } - - int i; - for (i = gap ; i < count+gap; i++) { - *hex++ = gHexDigit[*addr >> 4]; - *hex++ = gHexDigit[*addr & 0x0f]; - hex++; - if (isprint(*addr)) - *asc++ = *addr; - else - *asc++ = '.'; - addr++; - } - for ( ; i < 16; i++) { - /* erase extra stuff; only happens on last line */ - *hex++ = ' '; - *hex++ = ' '; - hex++; - *asc++ = ' '; - } - - fprintf(fp, "%s%s", prefix, out); - - gap = 0; - length -= count; - offset += count; - } -} - - -/* - * Explain it. - */ -static void usage(const char* progName) -{ - fprintf(stderr, "Usage: %s VM-port [debugger-listen-port]\n\n", progName); - fprintf(stderr, -"When a debugger connects to the debugger-listen-port, jdwpspy will connect\n"); - fprintf(stderr, "to the VM on the VM-port.\n"); -} - -/* - * Parse args. - */ -int main(int argc, char* argv[]) -{ - if (argc < 2 || argc > 3) { - usage("jdwpspy"); - return 2; - } - - setvbuf(stdout, NULL, _IONBF, 0); - - /* may want this to be host:port */ - int connectPort = atoi(argv[1]); - - int listenPort; - if (argc > 2) - listenPort = atoi(argv[2]); - else - listenPort = connectPort + 1; - - int cc = run("localhost", connectPort, listenPort); - - return (cc != 0); -} diff --git a/jdwpspy/Net.cpp b/jdwpspy/Net.cpp deleted file mode 100644 index 38d4e260af..0000000000 --- a/jdwpspy/Net.cpp +++ /dev/null @@ -1,751 +0,0 @@ -/* - * Copyright 2006 The Android Open Source Project - * - * JDWP spy. This is a rearranged version of the JDWP code from the VM. - */ -#include "Common.h" -#include "jdwp/jdwp_constants.h" - -#include <stdlib.h> -#include <unistd.h> -#include <stdio.h> -#include <string.h> -#include <sys/types.h> -#include <sys/socket.h> -#include <netinet/in.h> -#include <netinet/tcp.h> -#include <arpa/inet.h> -#include <netdb.h> -#include <time.h> -#include <errno.h> -#include <assert.h> - -#include <iostream> -#include <sstream> - -#define kInputBufferSize (256*1024) - -#define kMagicHandshakeLen 14 /* "JDWP-Handshake" */ -#define kJDWPHeaderLen 11 -#define kJDWPFlagReply 0x80 - - -/* - * Information about the remote end. - */ -struct Peer { - char label[2]; /* 'D' or 'V' */ - - int sock; - unsigned char inputBuffer[kInputBufferSize]; - int inputCount; - - bool awaitingHandshake; /* waiting for "JDWP-Handshake" */ -}; - - -/* - * Network state. - */ -struct NetState { - /* listen here for connection from debugger */ - int listenSock; - - /* connect here to contact VM */ - in_addr vmAddr; - uint16_t vmPort; - - Peer dbg; - Peer vm; -}; - -/* - * Function names. - */ -struct JdwpHandlerMap { - u1 cmdSet; - u1 cmd; - const char* descr; -}; - -/* - * Map commands to names. - * - * Command sets 0-63 are incoming requests, 64-127 are outbound requests, - * and 128-256 are vendor-defined. - */ -static const JdwpHandlerMap gHandlerMap[] = { - /* VirtualMachine command set (1) */ - { 1, 1, "VirtualMachine.Version" }, - { 1, 2, "VirtualMachine.ClassesBySignature" }, - { 1, 3, "VirtualMachine.AllClasses" }, - { 1, 4, "VirtualMachine.AllThreads" }, - { 1, 5, "VirtualMachine.TopLevelThreadGroups" }, - { 1, 6, "VirtualMachine.Dispose" }, - { 1, 7, "VirtualMachine.IDSizes" }, - { 1, 8, "VirtualMachine.Suspend" }, - { 1, 9, "VirtualMachine.Resume" }, - { 1, 10, "VirtualMachine.Exit" }, - { 1, 11, "VirtualMachine.CreateString" }, - { 1, 12, "VirtualMachine.Capabilities" }, - { 1, 13, "VirtualMachine.ClassPaths" }, - { 1, 14, "VirtualMachine.DisposeObjects" }, - { 1, 15, "VirtualMachine.HoldEvents" }, - { 1, 16, "VirtualMachine.ReleaseEvents" }, - { 1, 17, "VirtualMachine.CapabilitiesNew" }, - { 1, 18, "VirtualMachine.RedefineClasses" }, - { 1, 19, "VirtualMachine.SetDefaultStratum" }, - { 1, 20, "VirtualMachine.AllClassesWithGeneric"}, - { 1, 21, "VirtualMachine.InstanceCounts"}, - - /* ReferenceType command set (2) */ - { 2, 1, "ReferenceType.Signature" }, - { 2, 2, "ReferenceType.ClassLoader" }, - { 2, 3, "ReferenceType.Modifiers" }, - { 2, 4, "ReferenceType.Fields" }, - { 2, 5, "ReferenceType.Methods" }, - { 2, 6, "ReferenceType.GetValues" }, - { 2, 7, "ReferenceType.SourceFile" }, - { 2, 8, "ReferenceType.NestedTypes" }, - { 2, 9, "ReferenceType.Status" }, - { 2, 10, "ReferenceType.Interfaces" }, - { 2, 11, "ReferenceType.ClassObject" }, - { 2, 12, "ReferenceType.SourceDebugExtension" }, - { 2, 13, "ReferenceType.SignatureWithGeneric" }, - { 2, 14, "ReferenceType.FieldsWithGeneric" }, - { 2, 15, "ReferenceType.MethodsWithGeneric" }, - { 2, 16, "ReferenceType.Instances" }, - { 2, 17, "ReferenceType.ClassFileVersion" }, - { 2, 18, "ReferenceType.ConstantPool" }, - - /* ClassType command set (3) */ - { 3, 1, "ClassType.Superclass" }, - { 3, 2, "ClassType.SetValues" }, - { 3, 3, "ClassType.InvokeMethod" }, - { 3, 4, "ClassType.NewInstance" }, - - /* ArrayType command set (4) */ - { 4, 1, "ArrayType.NewInstance" }, - - /* InterfaceType command set (5) */ - - /* Method command set (6) */ - { 6, 1, "Method.LineTable" }, - { 6, 2, "Method.VariableTable" }, - { 6, 3, "Method.Bytecodes" }, - { 6, 4, "Method.IsObsolete" }, - { 6, 5, "Method.VariableTableWithGeneric" }, - - /* Field command set (8) */ - - /* ObjectReference command set (9) */ - { 9, 1, "ObjectReference.ReferenceType" }, - { 9, 2, "ObjectReference.GetValues" }, - { 9, 3, "ObjectReference.SetValues" }, - { 9, 4, "ObjectReference.UNUSED" }, - { 9, 5, "ObjectReference.MonitorInfo" }, - { 9, 6, "ObjectReference.InvokeMethod" }, - { 9, 7, "ObjectReference.DisableCollection" }, - { 9, 8, "ObjectReference.EnableCollection" }, - { 9, 9, "ObjectReference.IsCollected" }, - { 9, 10, "ObjectReference.ReferringObjects" }, - - /* StringReference command set (10) */ - { 10, 1, "StringReference.Value" }, - - /* ThreadReference command set (11) */ - { 11, 1, "ThreadReference.Name" }, - { 11, 2, "ThreadReference.Suspend" }, - { 11, 3, "ThreadReference.Resume" }, - { 11, 4, "ThreadReference.Status" }, - { 11, 5, "ThreadReference.ThreadGroup" }, - { 11, 6, "ThreadReference.Frames" }, - { 11, 7, "ThreadReference.FrameCount" }, - { 11, 8, "ThreadReference.OwnedMonitors" }, - { 11, 9, "ThreadReference.CurrentContendedMonitor" }, - { 11, 10, "ThreadReference.Stop" }, - { 11, 11, "ThreadReference.Interrupt" }, - { 11, 12, "ThreadReference.SuspendCount" }, - { 11, 13, "ThreadReference.OwnedMonitorsStackDepthInfo" }, - { 11, 14, "ThreadReference.ForceEarlyReturn" }, - - /* ThreadGroupReference command set (12) */ - { 12, 1, "ThreadGroupReference.Name" }, - { 12, 2, "ThreadGroupReference.Parent" }, - { 12, 3, "ThreadGroupReference.Children" }, - - /* ArrayReference command set (13) */ - { 13, 1, "ArrayReference.Length" }, - { 13, 2, "ArrayReference.GetValues" }, - { 13, 3, "ArrayReference.SetValues" }, - - /* ClassLoaderReference command set (14) */ - { 14, 1, "ArrayReference.VisibleClasses" }, - - /* EventRequest command set (15) */ - { 15, 1, "EventRequest.Set" }, - { 15, 2, "EventRequest.Clear" }, - { 15, 3, "EventRequest.ClearAllBreakpoints" }, - - /* StackFrame command set (16) */ - { 16, 1, "StackFrame.GetValues" }, - { 16, 2, "StackFrame.SetValues" }, - { 16, 3, "StackFrame.ThisObject" }, - { 16, 4, "StackFrame.PopFrames" }, - - /* ClassObjectReference command set (17) */ - { 17, 1, "ClassObjectReference.ReflectedType" }, - - /* Event command set (64) */ - { 64, 100, "Event.Composite" }, - - /* DDMS */ - { 199, 1, "DDMS.Chunk" }, -}; - -/* - * Look up a command's name. - */ -static const char* getCommandName(int cmdSet, int cmd) -{ - for (int i = 0; i < (int) NELEM(gHandlerMap); i++) { - if (gHandlerMap[i].cmdSet == cmdSet && - gHandlerMap[i].cmd == cmd) - { - return gHandlerMap[i].descr; - } - } - - return "?UNKNOWN?"; -} - - -void jdwpNetFree(NetState* netState); /* fwd */ - -/* - * Allocate state structure and bind to the listen port. - * - * Returns 0 on success. - */ -NetState* jdwpNetStartup(uint16_t listenPort, const char* connectHost, uint16_t connectPort) { - NetState* netState = new NetState; - memset(netState, 0, sizeof(*netState)); - netState->listenSock = -1; - netState->dbg.sock = netState->vm.sock = -1; - - strcpy(netState->dbg.label, "D"); - strcpy(netState->vm.label, "V"); - - /* - * Set up a socket to listen for connections from the debugger. - */ - - netState->listenSock = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); - if (netState->listenSock < 0) { - fprintf(stderr, "Socket create failed: %s\n", strerror(errno)); - goto fail; - } - - /* allow immediate re-use if we die */ - { - int one = 1; - if (setsockopt(netState->listenSock, SOL_SOCKET, SO_REUSEADDR, &one, - sizeof(one)) < 0) - { - fprintf(stderr, "setsockopt(SO_REUSEADDR) failed: %s\n", - strerror(errno)); - goto fail; - } - } - - sockaddr_in addr; - addr.sin_family = AF_INET; - addr.sin_port = htons(listenPort); - addr.sin_addr.s_addr = INADDR_ANY; - - if (bind(netState->listenSock, (sockaddr*) &addr, sizeof(addr)) != 0) - { - fprintf(stderr, "attempt to bind to port %u failed: %s\n", - listenPort, strerror(errno)); - goto fail; - } - - fprintf(stderr, "+++ bound to port %u\n", listenPort); - - if (listen(netState->listenSock, 5) != 0) { - fprintf(stderr, "Listen failed: %s\n", strerror(errno)); - goto fail; - } - - /* - * Do the hostname lookup for the VM. - */ - hostent* pHost; - - pHost = gethostbyname(connectHost); - if (pHost == NULL) { - fprintf(stderr, "Name lookup of '%s' failed: %s\n", - connectHost, strerror(h_errno)); - goto fail; - } - - netState->vmAddr = *((in_addr*) pHost->h_addr_list[0]); - netState->vmPort = connectPort; - - fprintf(stderr, "+++ connect host resolved to %s\n", - inet_ntoa(netState->vmAddr)); - - return netState; - -fail: - jdwpNetFree(netState); - return NULL; -} - -/* - * Shut down JDWP listener. Don't free state. - * - * Note that "netState" may be partially initialized if "startup" failed. - */ -void jdwpNetShutdown(NetState* netState) -{ - int listenSock = netState->listenSock; - int dbgSock = netState->dbg.sock; - int vmSock = netState->vm.sock; - - /* clear these out so it doesn't wake up and try to reuse them */ - /* (important when multi-threaded) */ - netState->listenSock = netState->dbg.sock = netState->vm.sock = -1; - - if (listenSock >= 0) { - shutdown(listenSock, SHUT_RDWR); - close(listenSock); - } - if (dbgSock >= 0) { - shutdown(dbgSock, SHUT_RDWR); - close(dbgSock); - } - if (vmSock >= 0) { - shutdown(vmSock, SHUT_RDWR); - close(vmSock); - } -} - -/* - * Shut down JDWP listener and free its state. - */ -void jdwpNetFree(NetState* netState) -{ - if (netState == NULL) - return; - - jdwpNetShutdown(netState); - delete netState; -} - -/* - * Disable the TCP Nagle algorithm, which delays transmission of outbound - * packets until the previous transmissions have been acked. JDWP does a - * lot of back-and-forth with small packets, so this may help. - */ -static int setNoDelay(int fd) -{ - int cc, on = 1; - - cc = setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on)); - assert(cc == 0); - return cc; -} - -/* - * Accept a connection. This will block waiting for somebody to show up. - */ -bool jdwpAcceptConnection(NetState* netState) -{ - sockaddr_in addr; - socklen_t addrlen; - int sock; - - if (netState->listenSock < 0) - return false; /* you're not listening! */ - - assert(netState->dbg.sock < 0); /* must not already be talking */ - - addrlen = sizeof(addr); - do { - sock = accept(netState->listenSock, (sockaddr*) &addr, &addrlen); - if (sock < 0 && errno != EINTR) { - fprintf(stderr, "accept failed: %s\n", strerror(errno)); - return false; - } - } while (sock < 0); - - fprintf(stderr, "+++ accepted connection from %s:%u\n", - inet_ntoa(addr.sin_addr), ntohs(addr.sin_port)); - - netState->dbg.sock = sock; - netState->dbg.awaitingHandshake = true; - netState->dbg.inputCount = 0; - - setNoDelay(sock); - - return true; -} - -/* - * Close the connections to the debugger and VM. - * - * Reset the state so we're ready to receive a new connection. - */ -void jdwpCloseConnection(NetState* netState) -{ - if (netState->dbg.sock >= 0) { - fprintf(stderr, "+++ closing connection to debugger\n"); - close(netState->dbg.sock); - netState->dbg.sock = -1; - } - if (netState->vm.sock >= 0) { - fprintf(stderr, "+++ closing connection to vm\n"); - close(netState->vm.sock); - netState->vm.sock = -1; - } -} - -/* - * Figure out if we have a full packet in the buffer. - */ -static bool haveFullPacket(Peer* pPeer) -{ - long length; - - if (pPeer->awaitingHandshake) - return (pPeer->inputCount >= kMagicHandshakeLen); - - if (pPeer->inputCount < 4) - return false; - - length = get4BE(pPeer->inputBuffer); - return (pPeer->inputCount >= length); -} - -/* - * Consume bytes from the buffer. - * - * This would be more efficient with a circular buffer. However, we're - * usually only going to find one packet, which is trivial to handle. - */ -static void consumeBytes(Peer* pPeer, int count) -{ - assert(count > 0); - assert(count <= pPeer->inputCount); - - if (count == pPeer->inputCount) { - pPeer->inputCount = 0; - return; - } - - memmove(pPeer->inputBuffer, pPeer->inputBuffer + count, - pPeer->inputCount - count); - pPeer->inputCount -= count; -} - -/* - * Get the current time. - */ -static void getCurrentTime(int* pMin, int* pSec) -{ - time_t now; - tm* ptm; - - now = time(NULL); - ptm = localtime(&now); - *pMin = ptm->tm_min; - *pSec = ptm->tm_sec; -} - -/* - * Dump the contents of a packet to stdout. - */ -static void dumpPacket(const unsigned char* packetBuf, const char* srcName, - const char* dstName) -{ - const unsigned char* buf = packetBuf; - char prefix[3]; - u4 length, id; - u1 flags, cmdSet=0, cmd=0; - art::JDWP::JdwpError error = art::JDWP::ERR_NONE; - bool reply; - int dataLen; - - length = get4BE(buf+0); - id = get4BE(buf+4); - flags = get1(buf+8); - if ((flags & kJDWPFlagReply) != 0) { - reply = true; - error = static_cast<art::JDWP::JdwpError>(get2BE(buf+9)); - } else { - reply = false; - cmdSet = get1(buf+9); - cmd = get1(buf+10); - } - - buf += kJDWPHeaderLen; - dataLen = length - (buf - packetBuf); - - if (!reply) { - prefix[0] = srcName[0]; - prefix[1] = '>'; - } else { - prefix[0] = dstName[0]; - prefix[1] = '<'; - } - prefix[2] = '\0'; - - int min, sec; - getCurrentTime(&min, &sec); - - if (!reply) { - printf("%s REQUEST dataLen=%-5u id=0x%08x flags=0x%02x cmd=%d/%d [%02d:%02d]\n", - prefix, dataLen, id, flags, cmdSet, cmd, min, sec); - printf("%s --> %s\n", prefix, getCommandName(cmdSet, cmd)); - } else { - std::ostringstream ss; - ss << "TODO"; // get access to the operator<<, or regenerate it for jdwpspy? - printf("%s REPLY dataLen=%-5u id=0x%08x flags=0x%02x err=%d (%s) [%02d:%02d]\n", - prefix, dataLen, id, flags, error, ss.str().c_str(), min,sec); - } - if (dataLen > 0) - printHexDump2(buf, dataLen, prefix); - printf("%s ----------\n", prefix); -} - -/* - * Handle a packet. Returns "false" if we encounter a connection-fatal error. - */ -static bool handlePacket(Peer* pDst, Peer* pSrc) -{ - const unsigned char* buf = pSrc->inputBuffer; - u4 length; - u1 flags; - int cc; - - length = get4BE(buf+0); - flags = get1(buf+9); - - assert((int) length <= pSrc->inputCount); - - dumpPacket(buf, pSrc->label, pDst->label); - - cc = write(pDst->sock, buf, length); - if (cc != (int) length) { - fprintf(stderr, "Failed sending packet: %s\n", strerror(errno)); - return false; - } - /*printf("*** wrote %d bytes from %c to %c\n", - cc, pSrc->label[0], pDst->label[0]);*/ - - consumeBytes(pSrc, length); - return true; -} - -/* - * Handle incoming data. If we have a full packet in the buffer, process it. - */ -static bool handleIncoming(Peer* pWritePeer, Peer* pReadPeer) -{ - if (haveFullPacket(pReadPeer)) { - if (pReadPeer->awaitingHandshake) { - printf("Handshake [%c]: %.14s\n", - pReadPeer->label[0], pReadPeer->inputBuffer); - if (write(pWritePeer->sock, pReadPeer->inputBuffer, - kMagicHandshakeLen) != kMagicHandshakeLen) - { - fprintf(stderr, - "+++ [%c] handshake write failed\n", pReadPeer->label[0]); - goto fail; - } - consumeBytes(pReadPeer, kMagicHandshakeLen); - pReadPeer->awaitingHandshake = false; - } else { - if (!handlePacket(pWritePeer, pReadPeer)) - goto fail; - } - } else { - /*printf("*** %c not full yet\n", pReadPeer->label[0]);*/ - } - - return true; - -fail: - return false; -} - -/* - * Process incoming data. If no data is available, this will block until - * some arrives. - * - * Returns "false" on error (indicating that the connection has been severed). - */ -bool jdwpProcessIncoming(NetState* netState) -{ - int cc; - - assert(netState->dbg.sock >= 0); - assert(netState->vm.sock >= 0); - - while (!haveFullPacket(&netState->dbg) && !haveFullPacket(&netState->vm)) { - /* read some more */ - int highFd; - fd_set readfds; - - highFd = (netState->dbg.sock > netState->vm.sock) ? - netState->dbg.sock+1 : netState->vm.sock+1; - FD_ZERO(&readfds); - FD_SET(netState->dbg.sock, &readfds); - FD_SET(netState->vm.sock, &readfds); - - errno = 0; - cc = select(highFd, &readfds, NULL, NULL, NULL); - if (cc < 0) { - if (errno == EINTR) { - fprintf(stderr, "+++ EINTR on select\n"); - continue; - } - fprintf(stderr, "+++ select failed: %s\n", strerror(errno)); - goto fail; - } - - if (FD_ISSET(netState->dbg.sock, &readfds)) { - cc = read(netState->dbg.sock, - netState->dbg.inputBuffer + netState->dbg.inputCount, - sizeof(netState->dbg.inputBuffer) - netState->dbg.inputCount); - if (cc < 0) { - if (errno == EINTR) { - fprintf(stderr, "+++ EINTR on read\n"); - continue; - } - fprintf(stderr, "+++ dbg read failed: %s\n", strerror(errno)); - goto fail; - } - if (cc == 0) { - if (sizeof(netState->dbg.inputBuffer) == - netState->dbg.inputCount) - fprintf(stderr, "+++ debugger sent huge message\n"); - else - fprintf(stderr, "+++ debugger disconnected\n"); - goto fail; - } - - /*printf("*** %d bytes from dbg\n", cc);*/ - netState->dbg.inputCount += cc; - } - - if (FD_ISSET(netState->vm.sock, &readfds)) { - cc = read(netState->vm.sock, - netState->vm.inputBuffer + netState->vm.inputCount, - sizeof(netState->vm.inputBuffer) - netState->vm.inputCount); - if (cc < 0) { - if (errno == EINTR) { - fprintf(stderr, "+++ EINTR on read\n"); - continue; - } - fprintf(stderr, "+++ vm read failed: %s\n", strerror(errno)); - goto fail; - } - if (cc == 0) { - if (sizeof(netState->vm.inputBuffer) == - netState->vm.inputCount) - fprintf(stderr, "+++ vm sent huge message\n"); - else - fprintf(stderr, "+++ vm disconnected\n"); - goto fail; - } - - /*printf("*** %d bytes from vm\n", cc);*/ - netState->vm.inputCount += cc; - } - } - - if (!handleIncoming(&netState->dbg, &netState->vm)) - goto fail; - if (!handleIncoming(&netState->vm, &netState->dbg)) - goto fail; - - return true; - -fail: - jdwpCloseConnection(netState); - return false; -} - -/* - * Connect to the VM. - */ -bool jdwpConnectToVm(NetState* netState) -{ - sockaddr_in addr; - int sock = -1; - - sock = socket(PF_INET, SOCK_STREAM, IPPROTO_TCP); - if (sock < 0) { - fprintf(stderr, "Socket create failed: %s\n", strerror(errno)); - goto fail; - } - - addr.sin_family = AF_INET; - addr.sin_addr = netState->vmAddr; - addr.sin_port = htons(netState->vmPort); - if (connect(sock, (struct sockaddr*) &addr, sizeof(addr)) != 0) { - fprintf(stderr, "Connection to %s:%u failed: %s\n", - inet_ntoa(addr.sin_addr), ntohs(addr.sin_port), strerror(errno)); - goto fail; - } - fprintf(stderr, "+++ connected to VM %s:%u\n", - inet_ntoa(addr.sin_addr), ntohs(addr.sin_port)); - - netState->vm.sock = sock; - netState->vm.awaitingHandshake = true; - netState->vm.inputCount = 0; - - setNoDelay(netState->vm.sock); - return true; - -fail: - if (sock >= 0) - close(sock); - return false; -} - -/* - * Establish network connections and start things running. - * - * We wait for a new connection from the debugger. When one arrives we - * open a connection to the VM. If one side or the other goes away, we - * drop both ends and go back to listening. - */ -int run(const char* connectHost, int connectPort, int listenPort) -{ - NetState* state; - - state = jdwpNetStartup(listenPort, connectHost, connectPort); - if (state == NULL) - return -1; - - while (true) { - if (!jdwpAcceptConnection(state)) - break; - - if (jdwpConnectToVm(state)) { - while (true) { - if (!jdwpProcessIncoming(state)) - break; - } - } - - jdwpCloseConnection(state); - } - - jdwpNetFree(state); - - return 0; -} diff --git a/runtime/Android.mk b/runtime/Android.mk index 9e6d5f9e28..bab250c16d 100644 --- a/runtime/Android.mk +++ b/runtime/Android.mk @@ -23,6 +23,7 @@ LIBART_COMMON_SRC_FILES := \ barrier.cc \ base/allocator.cc \ base/bit_vector.cc \ + base/hex_dump.cc \ base/logging.cc \ base/mutex.cc \ base/stringpiece.cc \ @@ -65,6 +66,7 @@ LIBART_COMMON_SRC_FILES := \ gc/space/malloc_space.cc \ gc/space/rosalloc_space.cc \ gc/space/space.cc \ + gc/space/zygote_space.cc \ hprof/hprof.cc \ image.cc \ indirect_reference_table.cc \ diff --git a/runtime/arch/quick_alloc_entrypoints.S b/runtime/arch/quick_alloc_entrypoints.S index 4fdcb35930..632c5f372a 100644 --- a/runtime/arch/quick_alloc_entrypoints.S +++ b/runtime/arch/quick_alloc_entrypoints.S @@ -26,6 +26,8 @@ TWO_ARG_DOWNCALL art_quick_alloc_object_initialized\c_suffix, artAllocObjectFrom TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check\c_suffix, artAllocObjectFromCodeWithAccessCheck\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO // Called by managed code to allocate an array. THREE_ARG_DOWNCALL art_quick_alloc_array\c_suffix, artAllocArrayFromCode\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO +// Called by managed code to allocate an array of a resolve class. +THREE_ARG_DOWNCALL art_quick_alloc_array_resolved\c_suffix, artAllocArrayFromCodeResolved\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO // Called by managed code to allocate an array when the caller doesn't know whether it has access // to the created type. THREE_ARG_DOWNCALL art_quick_alloc_array_with_access_check\c_suffix, artAllocArrayFromCodeWithAccessCheck\cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO diff --git a/runtime/arch/quick_alloc_entrypoints.cc b/runtime/arch/quick_alloc_entrypoints.cc index 0fad82266c..9363f81cfe 100644 --- a/runtime/arch/quick_alloc_entrypoints.cc +++ b/runtime/arch/quick_alloc_entrypoints.cc @@ -19,6 +19,7 @@ #define GENERATE_ENTRYPOINTS(suffix) \ extern "C" void* art_quick_alloc_array##suffix(uint32_t, void*, int32_t); \ +extern "C" void* art_quick_alloc_array_resolved##suffix(void* klass, void*, int32_t); \ extern "C" void* art_quick_alloc_array_with_access_check##suffix(uint32_t, void*, int32_t); \ extern "C" void* art_quick_alloc_object##suffix(uint32_t type_idx, void* method); \ extern "C" void* art_quick_alloc_object_resolved##suffix(void* klass, void* method); \ @@ -27,6 +28,7 @@ extern "C" void* art_quick_alloc_object_with_access_check##suffix(uint32_t type_ extern "C" void* art_quick_check_and_alloc_array##suffix(uint32_t, void*, int32_t); \ extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix(uint32_t, void*, int32_t); \ extern "C" void* art_quick_alloc_array##suffix##_instrumented(uint32_t, void*, int32_t); \ +extern "C" void* art_quick_alloc_array_resolved##suffix##_instrumented(void* klass, void*, int32_t); \ extern "C" void* art_quick_alloc_array_with_access_check##suffix##_instrumented(uint32_t, void*, int32_t); \ extern "C" void* art_quick_alloc_object##suffix##_instrumented(uint32_t type_idx, void* method); \ extern "C" void* art_quick_alloc_object_resolved##suffix##_instrumented(void* klass, void* method); \ @@ -37,6 +39,7 @@ extern "C" void* art_quick_check_and_alloc_array_with_access_check##suffix##_ins void SetQuickAllocEntryPoints##suffix(QuickEntryPoints* qpoints, bool instrumented) { \ if (instrumented) { \ qpoints->pAllocArray = art_quick_alloc_array##suffix##_instrumented; \ + qpoints->pAllocArrayResolved = art_quick_alloc_array_resolved##suffix##_instrumented; \ qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check##suffix##_instrumented; \ qpoints->pAllocObject = art_quick_alloc_object##suffix##_instrumented; \ qpoints->pAllocObjectResolved = art_quick_alloc_object_resolved##suffix##_instrumented; \ @@ -46,6 +49,7 @@ void SetQuickAllocEntryPoints##suffix(QuickEntryPoints* qpoints, bool instrument qpoints->pCheckAndAllocArrayWithAccessCheck = art_quick_check_and_alloc_array_with_access_check##suffix##_instrumented; \ } else { \ qpoints->pAllocArray = art_quick_alloc_array##suffix; \ + qpoints->pAllocArrayResolved = art_quick_alloc_array_resolved##suffix; \ qpoints->pAllocArrayWithAccessCheck = art_quick_alloc_array_with_access_check##suffix; \ qpoints->pAllocObject = art_quick_alloc_object##suffix; \ qpoints->pAllocObjectResolved = art_quick_alloc_object_resolved##suffix; \ diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S index 48c7d8d5f3..74ec761f5b 100644 --- a/runtime/arch/x86/quick_entrypoints_x86.S +++ b/runtime/arch/x86/quick_entrypoints_x86.S @@ -413,6 +413,8 @@ END_MACRO TWO_ARG_DOWNCALL art_quick_alloc_object_with_access_check ## c_suffix, artAllocObjectFromCodeWithAccessCheck ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO #define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(c_suffix, cxx_suffix) \ THREE_ARG_DOWNCALL art_quick_alloc_array ## c_suffix, artAllocArrayFromCode ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO +#define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(c_suffix, cxx_suffix) \ + THREE_ARG_DOWNCALL art_quick_alloc_array_resolved ## c_suffix, artAllocArrayFromCodeResolved ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO #define GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(c_suffix, cxx_suffix) \ THREE_ARG_DOWNCALL art_quick_alloc_array_with_access_check ## c_suffix, artAllocArrayFromCodeWithAccessCheck ## cxx_suffix, RETURN_IF_RESULT_IS_NON_ZERO #define GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(c_suffix, cxx_suffix) \ @@ -425,6 +427,7 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc, DlMalloc) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc, DlMalloc) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_dlmalloc, DlMalloc) +GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_dlmalloc, DlMalloc) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc) GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_dlmalloc, DlMalloc) GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc, DlMalloc) @@ -434,6 +437,7 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_dlmalloc_instrumented, DlMallo GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_dlmalloc_instrumented, DlMallocInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_dlmalloc_instrumented, DlMallocInstrumented) +GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_dlmalloc_instrumented, DlMallocInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented) GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_dlmalloc_instrumented, DlMallocInstrumented) GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_dlmalloc_instrumented, DlMallocInstrumented) @@ -443,6 +447,7 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc, RosAlloc) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc, RosAlloc) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc, RosAlloc) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_rosalloc, RosAlloc) +GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_rosalloc, RosAlloc) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc, RosAlloc) GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_rosalloc, RosAlloc) GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc, RosAlloc) @@ -452,6 +457,7 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_rosalloc_instrumented, RosAllo GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_rosalloc_instrumented, RosAllocInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_rosalloc_instrumented, RosAllocInstrumented) +GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_rosalloc_instrumented, RosAllocInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented) GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_rosalloc_instrumented, RosAllocInstrumented) GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_rosalloc_instrumented, RosAllocInstrumented) @@ -461,6 +467,7 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_bump_pointer, BumpPointer) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_bump_pointer, BumpPointer) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_bump_pointer, BumpPointer) +GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_bump_pointer, BumpPointer) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer) GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_bump_pointer, BumpPointer) GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer, BumpPointer) @@ -470,6 +477,7 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_bump_pointer_instrumented, Bum GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_bump_pointer_instrumented, BumpPointerInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_bump_pointer_instrumented, BumpPointerInstrumented) +GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_bump_pointer_instrumented, BumpPointerInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented) GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_bump_pointer_instrumented, BumpPointerInstrumented) GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_bump_pointer_instrumented, BumpPointerInstrumented) @@ -479,6 +487,7 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab, TLAB) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab, TLAB) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab, TLAB) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_tlab, TLAB) +GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab, TLAB) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB) GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_tlab, TLAB) GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab, TLAB) @@ -488,6 +497,7 @@ GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_RESOLVED(_tlab_instrumented, TLABInstrum GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_INITIALIZED(_tlab_instrumented, TLABInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_OBJECT_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY(_tlab_instrumented, TLABInstrumented) +GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_RESOLVED(_tlab_instrumented, TLABInstrumented) GENERATE_ALLOC_ENTRYPOINTS_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented) GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY(_tlab_instrumented, TLABInstrumented) GENERATE_ALLOC_ENTRYPOINTS_CHECK_AND_ALLOC_ARRAY_WITH_ACCESS_CHECK(_tlab_instrumented, TLABInstrumented) diff --git a/runtime/base/hex_dump.cc b/runtime/base/hex_dump.cc new file mode 100644 index 0000000000..936c52b47a --- /dev/null +++ b/runtime/base/hex_dump.cc @@ -0,0 +1,112 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hex_dump.h" + +#include "globals.h" + +#include <string.h> + +namespace art { + +void HexDump::Dump(std::ostream& os) const { + if (byte_count_ == 0) { + return; + } + + if (address_ == NULL) { + os << "00000000:"; + return; + } + + static const char gHexDigit[] = "0123456789abcdef"; + const unsigned char* addr = reinterpret_cast<const unsigned char*>(address_); + // 01234560: 00 11 22 33 44 55 66 77 88 99 aa bb cc dd ee ff 0123456789abcdef + char out[(kBitsPerWord / 4) + /* offset */ + 1 + /* colon */ + (16 * 3) + /* 16 hex digits and space */ + 2 + /* white space */ + 16 + /* 16 characters*/ + 1 /* \0 */ ]; + size_t offset; /* offset to show while printing */ + + if (show_actual_addresses_) { + offset = reinterpret_cast<size_t>(addr); + } else { + offset = 0; + } + memset(out, ' ', sizeof(out)-1); + out[kBitsPerWord / 4] = ':'; + out[sizeof(out)-1] = '\0'; + + size_t byte_count = byte_count_; + size_t gap = offset & 0x0f; + while (byte_count > 0) { + size_t line_offset = offset & ~0x0f; + + char* hex = out; + char* asc = out + (kBitsPerWord / 4) + /* offset */ 1 + /* colon */ + (16 * 3) + /* 16 hex digits and space */ 2 /* white space */; + + for (int i = 0; i < (kBitsPerWord / 4); i++) { + *hex++ = gHexDigit[line_offset >> (kBitsPerWord - 4)]; + line_offset <<= 4; + } + hex++; + hex++; + + size_t count = std::min(byte_count, 16 - gap); + // CHECK_NE(count, 0U); + // CHECK_LE(count + gap, 16U); + + if (gap) { + /* only on first line */ + hex += gap * 3; + asc += gap; + } + + size_t i; + for (i = gap ; i < count + gap; i++) { + *hex++ = gHexDigit[*addr >> 4]; + *hex++ = gHexDigit[*addr & 0x0f]; + hex++; + if (*addr >= 0x20 && *addr < 0x7f /*isprint(*addr)*/) { + *asc++ = *addr; + } else { + *asc++ = '.'; + } + addr++; + } + for (; i < 16; i++) { + /* erase extra stuff; only happens on last line */ + *hex++ = ' '; + *hex++ = ' '; + hex++; + *asc++ = ' '; + } + + os << prefix_ << out; + + gap = 0; + byte_count -= count; + offset += count; + if (byte_count > 0) { + os << "\n"; + } + } +} + +} // namespace art diff --git a/runtime/base/hex_dump.h b/runtime/base/hex_dump.h new file mode 100644 index 0000000000..8769ece6f9 --- /dev/null +++ b/runtime/base/hex_dump.h @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_BASE_HEX_DUMP_H_ +#define ART_RUNTIME_BASE_HEX_DUMP_H_ + +#include "macros.h" + +#include <ostream> + +namespace art { + +// Prints a hex dump in this format: +// +// 01234560: 00 11 22 33 44 55 66 77 88 99 aa bb cc dd ee ff 0123456789abcdef +// 01234568: 00 11 22 33 44 55 66 77 88 99 aa bb cc dd ee ff 0123456789abcdef +class HexDump { + public: + HexDump(const void* address, size_t byte_count, bool show_actual_addresses, const char* prefix) + : address_(address), byte_count_(byte_count), show_actual_addresses_(show_actual_addresses), + prefix_(prefix) { + } + + void Dump(std::ostream& os) const; + + private: + const void* const address_; + const size_t byte_count_; + const bool show_actual_addresses_; + const char* const prefix_; + + DISALLOW_COPY_AND_ASSIGN(HexDump); +}; + +inline std::ostream& operator<<(std::ostream& os, const HexDump& rhs) { + rhs.Dump(os); + return os; +} + +} // namespace art + +#endif // ART_RUNTIME_BASE_HEX_DUMP_H_ diff --git a/runtime/base/hex_dump_test.cc b/runtime/base/hex_dump_test.cc new file mode 100644 index 0000000000..d950961213 --- /dev/null +++ b/runtime/base/hex_dump_test.cc @@ -0,0 +1,63 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "hex_dump.h" + +#include "globals.h" + +#include "gtest/gtest.h" + +#include <stdint.h> + +namespace art { + +TEST(HexDump, OneLine) { + const char* test_text = "0123456789abcdef"; + std::ostringstream oss; + oss << HexDump(test_text, strlen(test_text), false, ""); + EXPECT_STREQ(oss.str().c_str(), + "00000000: 30 31 32 33 34 35 36 37 38 39 61 62 63 64 65 66 0123456789abcdef"); +} + +TEST(HexDump, MultiLine) { + const char* test_text = "0123456789abcdef0123456789ABCDEF"; + std::ostringstream oss; + oss << HexDump(test_text, strlen(test_text), false, ""); + EXPECT_STREQ(oss.str().c_str(), + "00000000: 30 31 32 33 34 35 36 37 38 39 61 62 63 64 65 66 0123456789abcdef\n" + "00000010: 30 31 32 33 34 35 36 37 38 39 41 42 43 44 45 46 0123456789ABCDEF"); +} + +uint64_t g16byte_aligned_number __attribute__ ((aligned(16))); // NOLINT(whitespace/parens) +TEST(HexDump, ShowActualAddresses) { + g16byte_aligned_number = 0x6162636465666768; + std::ostringstream oss; + oss << HexDump(&g16byte_aligned_number, 8, true, ""); + // Compare ignoring pointer. + EXPECT_STREQ(oss.str().c_str() + (kBitsPerWord / 4), + ": 68 67 66 65 64 63 62 61 hgfedcba "); +} + +TEST(HexDump, Prefix) { + const char* test_text = "0123456789abcdef"; + std::ostringstream oss; + oss << HexDump(test_text, strlen(test_text), false, "test prefix: "); + EXPECT_STREQ(oss.str().c_str(), + "test prefix: 00000000: 30 31 32 33 34 35 36 37 38 39 61 62 63 64 65 66 " + "0123456789abcdef"); +} + +} // namespace art diff --git a/runtime/base/logging.cc b/runtime/base/logging.cc index 15554ac811..46b8ff28fe 100644 --- a/runtime/base/logging.cc +++ b/runtime/base/logging.cc @@ -161,97 +161,4 @@ LogMessage::~LogMessage() { } } -HexDump::HexDump(const void* address, size_t byte_count, bool show_actual_addresses) - : address_(address), byte_count_(byte_count), show_actual_addresses_(show_actual_addresses) { -} - -void HexDump::Dump(std::ostream& os) const { - if (byte_count_ == 0) { - return; - } - - if (address_ == NULL) { - os << "00000000:"; - return; - } - - static const char gHexDigit[] = "0123456789abcdef"; - const unsigned char* addr = reinterpret_cast<const unsigned char*>(address_); - // 01234560: 00 11 22 33 44 55 66 77 88 99 aa bb cc dd ee ff 0123456789abcdef - char out[(kBitsPerWord / 4) + /* offset */ - 1 + /* colon */ - (16 * 3) + /* 16 hex digits and space */ - 2 + /* white space */ - 16 + /* 16 characters*/ - 1 /* \0 */ ]; - size_t offset; /* offset to show while printing */ - - if (show_actual_addresses_) { - offset = reinterpret_cast<size_t>(addr); - } else { - offset = 0; - } - memset(out, ' ', sizeof(out)-1); - out[kBitsPerWord / 4] = ':'; - out[sizeof(out)-1] = '\0'; - - size_t byte_count = byte_count_; - size_t gap = offset & 0x0f; - while (byte_count) { - size_t line_offset = offset & ~0x0f; - - char* hex = out; - char* asc = out + (kBitsPerWord / 4) + /* offset */ 1 + /* colon */ - (16 * 3) + /* 16 hex digits and space */ 2 /* white space */; - - for (int i = 0; i < (kBitsPerWord / 4); i++) { - *hex++ = gHexDigit[line_offset >> (kBitsPerWord - 4)]; - line_offset <<= 4; - } - hex++; - hex++; - - size_t count = std::min(byte_count, 16 - gap); - CHECK_NE(count, 0U); - CHECK_LE(count + gap, 16U); - - if (gap) { - /* only on first line */ - hex += gap * 3; - asc += gap; - } - - size_t i; - for (i = gap ; i < count + gap; i++) { - *hex++ = gHexDigit[*addr >> 4]; - *hex++ = gHexDigit[*addr & 0x0f]; - hex++; - if (*addr >= 0x20 && *addr < 0x7f /*isprint(*addr)*/) { - *asc++ = *addr; - } else { - *asc++ = '.'; - } - addr++; - } - for (; i < 16; i++) { - /* erase extra stuff; only happens on last line */ - *hex++ = ' '; - *hex++ = ' '; - hex++; - *asc++ = ' '; - } - - os << out; - - gap = 0; - byte_count -= count; - offset += count; - } -} - -std::ostream& operator<<(std::ostream& os, const HexDump& rhs) { - rhs.Dump(os); - return os; -} - } // namespace art diff --git a/runtime/base/logging.h b/runtime/base/logging.h index 8e40da0112..075d571197 100644 --- a/runtime/base/logging.h +++ b/runtime/base/logging.h @@ -208,24 +208,6 @@ class LogMessage { DISALLOW_COPY_AND_ASSIGN(LogMessage); }; -// Prints a hex dump in this format: -// -// 01234560: 00 11 22 33 44 55 66 77 88 99 aa bb cc dd ee ff 0123456789abcdef -// 01234568: 00 11 22 33 44 55 66 77 88 99 aa bb cc dd ee ff 0123456789abcdef -class HexDump { - public: - HexDump(const void* address, size_t byte_count, bool show_actual_addresses = false); - void Dump(std::ostream& os) const; - - private: - const void* address_; - size_t byte_count_; - bool show_actual_addresses_; - - DISALLOW_COPY_AND_ASSIGN(HexDump); -}; -std::ostream& operator<<(std::ostream& os, const HexDump& rhs); - // A convenience to allow any class with a "Dump(std::ostream& os)" member function // but without an operator<< to be used as if it had an operator<<. Use like this: // diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index b5d9fdfbd9..344da3ff0b 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -199,14 +199,14 @@ void ClassLinker::InitFromCompiler(const std::vector<const DexFile*>& boot_class Thread* self = Thread::Current(); gc::Heap* heap = Runtime::Current()->GetHeap(); // The GC can't handle an object with a null class since we can't get the size of this object. - heap->IncrementDisableGC(self); + heap->IncrementDisableMovingGC(self); SirtRef<mirror::Class> java_lang_Class(self, down_cast<mirror::Class*>( heap->AllocNonMovableObject<true>(self, nullptr, sizeof(mirror::ClassClass)))); CHECK(java_lang_Class.get() != NULL); mirror::Class::SetClassClass(java_lang_Class.get()); java_lang_Class->SetClass(java_lang_Class.get()); java_lang_Class->SetClassSize(sizeof(mirror::ClassClass)); - heap->DecrementDisableGC(self); + heap->DecrementDisableMovingGC(self); // AllocClass(mirror::Class*) can now be used // Class[] is used for reflection support. diff --git a/runtime/common_test.h b/runtime/common_test.h index ee95d5bbbd..fce3f3f7c2 100644 --- a/runtime/common_test.h +++ b/runtime/common_test.h @@ -26,7 +26,7 @@ #include "../../external/icu4c/common/unicode/uvernum.h" #include "../compiler/dex/quick/dex_file_to_method_inliner_map.h" -#include "../compiler/dex/verified_methods_data.h" +#include "../compiler/dex/verification_results.h" #include "../compiler/driver/compiler_driver.h" #include "base/macros.h" #include "base/stl_util.h" @@ -160,11 +160,7 @@ class ScratchFile { #if defined(__arm__) - -#include <signal.h> -#include <asm/sigcontext.h> -#include <asm-generic/ucontext.h> - +#include <sys/ucontext.h> // A signal handler called when have an illegal instruction. We record the fact in // a global boolean and then increment the PC in the signal context to return to @@ -425,9 +421,9 @@ class CommonTest : public testing::Test { CompilerBackend compiler_backend = kQuick; #endif - verified_methods_data_.reset(new VerifiedMethodsData); + verification_results_.reset(new VerificationResults); method_inliner_map_.reset(compiler_backend == kQuick ? new DexFileToMethodInlinerMap : nullptr); - callbacks_.Reset(verified_methods_data_.get(), method_inliner_map_.get()); + callbacks_.Reset(verification_results_.get(), method_inliner_map_.get()); Runtime::Options options; options.push_back(std::make_pair("compilercallbacks", static_cast<CompilerCallbacks*>(&callbacks_))); options.push_back(std::make_pair("bootclasspath", &boot_class_path_)); @@ -474,7 +470,7 @@ class CommonTest : public testing::Test { } } class_linker_->FixupDexCaches(runtime_->GetResolutionMethod()); - compiler_driver_.reset(new CompilerDriver(verified_methods_data_.get(), + compiler_driver_.reset(new CompilerDriver(verification_results_.get(), method_inliner_map_.get(), compiler_backend, instruction_set, instruction_set_features, @@ -526,7 +522,7 @@ class CommonTest : public testing::Test { compiler_driver_.reset(); callbacks_.Reset(nullptr, nullptr); method_inliner_map_.reset(); - verified_methods_data_.reset(); + verification_results_.reset(); STLDeleteElements(&opened_dex_files_); Runtime::Current()->GetHeap()->VerifyHeap(); // Check for heap corruption after the test @@ -654,18 +650,18 @@ class CommonTest : public testing::Test { class TestCompilerCallbacks : public CompilerCallbacks { public: - TestCompilerCallbacks() : verified_methods_data_(nullptr), method_inliner_map_(nullptr) { } + TestCompilerCallbacks() : verification_results_(nullptr), method_inliner_map_(nullptr) { } - void Reset(VerifiedMethodsData* verified_methods_data, + void Reset(VerificationResults* verification_results, DexFileToMethodInlinerMap* method_inliner_map) { - verified_methods_data_ = verified_methods_data; + verification_results_ = verification_results; method_inliner_map_ = method_inliner_map; } virtual bool MethodVerified(verifier::MethodVerifier* verifier) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - CHECK(verified_methods_data_); - bool result = verified_methods_data_->ProcessVerifiedMethod(verifier); + CHECK(verification_results_); + bool result = verification_results_->ProcessVerifiedMethod(verifier); if (result && method_inliner_map_ != nullptr) { MethodReference ref = verifier->GetMethodReference(); method_inliner_map_->GetMethodInliner(ref.dex_file) @@ -674,11 +670,11 @@ class CommonTest : public testing::Test { return result; } virtual void ClassRejected(ClassReference ref) { - verified_methods_data_->AddRejectedClass(ref); + verification_results_->AddRejectedClass(ref); } private: - VerifiedMethodsData* verified_methods_data_; + VerificationResults* verification_results_; DexFileToMethodInlinerMap* method_inliner_map_; }; @@ -689,7 +685,7 @@ class CommonTest : public testing::Test { UniquePtr<Runtime> runtime_; // Owned by the runtime ClassLinker* class_linker_; - UniquePtr<VerifiedMethodsData> verified_methods_data_; + UniquePtr<VerificationResults> verification_results_; UniquePtr<DexFileToMethodInlinerMap> method_inliner_map_; TestCompilerCallbacks callbacks_; UniquePtr<CompilerDriver> compiler_driver_; diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h index 5ee750fa7f..f6e8ca3d64 100644 --- a/runtime/entrypoints/entrypoint_utils.h +++ b/runtime/entrypoints/entrypoint_utils.h @@ -232,6 +232,30 @@ ALWAYS_INLINE static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx, return mirror::Array::Alloc<kInstrumented>(self, klass, component_count, allocator_type); } +template <bool kAccessCheck, bool kInstrumented> +ALWAYS_INLINE static inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass, + mirror::ArtMethod* method, + int32_t component_count, + Thread* self, + gc::AllocatorType allocator_type) + NO_THREAD_SAFETY_ANALYSIS { + DCHECK(klass != nullptr); + if (UNLIKELY(component_count < 0)) { + ThrowNegativeArraySizeException(component_count); + return nullptr; // Failure + } + if (kAccessCheck) { + mirror::Class* referrer = method->GetDeclaringClass(); + if (UNLIKELY(!referrer->CanAccess(klass))) { + ThrowIllegalAccessErrorClass(referrer, klass); + return nullptr; // Failure + } + } + // No need to retry a slow-path allocation as the above code won't + // cause a GC or thread suspension. + return mirror::Array::Alloc<kInstrumented>(self, klass, component_count, allocator_type); +} + extern mirror::Array* CheckAndAllocArrayFromCode(uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, Thread* self, bool access_check, diff --git a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc index 5657092d8d..2e1b69d236 100644 --- a/runtime/entrypoints/quick/quick_alloc_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_alloc_entrypoints.cc @@ -56,6 +56,14 @@ extern "C" mirror::Array* artAllocArrayFromCode##suffix##suffix2( \ return AllocArrayFromCode<false, instrumented_bool>(type_idx, method, component_count, self, \ allocator_type); \ } \ +extern "C" mirror::Array* artAllocArrayFromCodeResolved##suffix##suffix2( \ + mirror::Class* klass, mirror::ArtMethod* method, int32_t component_count, Thread* self, \ + mirror::ArtMethod** sp) \ + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \ + FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \ + return AllocArrayFromCodeResolved<false, instrumented_bool>(klass, method, component_count, self, \ + allocator_type); \ +} \ extern "C" mirror::Array* artAllocArrayFromCodeWithAccessCheck##suffix##suffix2( \ uint32_t type_idx, mirror::ArtMethod* method, int32_t component_count, Thread* self, \ mirror::ArtMethod** sp) \ diff --git a/runtime/entrypoints/quick/quick_entrypoints.h b/runtime/entrypoints/quick/quick_entrypoints.h index bbbc8f24e0..011e92693e 100644 --- a/runtime/entrypoints/quick/quick_entrypoints.h +++ b/runtime/entrypoints/quick/quick_entrypoints.h @@ -40,6 +40,7 @@ class Thread; struct PACKED(4) QuickEntryPoints { // Alloc void* (*pAllocArray)(uint32_t, void*, int32_t); + void* (*pAllocArrayResolved)(void*, void*, int32_t); void* (*pAllocArrayWithAccessCheck)(uint32_t, void*, int32_t); void* (*pAllocObject)(uint32_t, void*); void* (*pAllocObjectResolved)(void*, void*); diff --git a/runtime/gc/accounting/space_bitmap.cc b/runtime/gc/accounting/space_bitmap.cc index 99800fcccb..b831843e61 100644 --- a/runtime/gc/accounting/space_bitmap.cc +++ b/runtime/gc/accounting/space_bitmap.cc @@ -128,9 +128,9 @@ void SpaceBitmap::Walk(SpaceBitmap::Callback* callback, void* arg) { // // The callback is not permitted to increase the max of either bitmap. void SpaceBitmap::SweepWalk(const SpaceBitmap& live_bitmap, - const SpaceBitmap& mark_bitmap, - uintptr_t sweep_begin, uintptr_t sweep_end, - SpaceBitmap::SweepCallback* callback, void* arg) { + const SpaceBitmap& mark_bitmap, + uintptr_t sweep_begin, uintptr_t sweep_end, + SpaceBitmap::SweepCallback* callback, void* arg) { CHECK(live_bitmap.bitmap_begin_ != NULL); CHECK(mark_bitmap.bitmap_begin_ != NULL); CHECK_EQ(live_bitmap.heap_begin_, mark_bitmap.heap_begin_); diff --git a/runtime/gc/collector/mark_sweep.cc b/runtime/gc/collector/mark_sweep.cc index 5d450a7d85..862d06f977 100644 --- a/runtime/gc/collector/mark_sweep.cc +++ b/runtime/gc/collector/mark_sweep.cc @@ -89,7 +89,8 @@ constexpr bool kCheckLocks = kDebugLocking; void MarkSweep::ImmuneSpace(space::ContinuousSpace* space) { // Bind live to mark bitmap if necessary. if (space->GetLiveBitmap() != space->GetMarkBitmap()) { - BindLiveToMarkBitmap(space); + CHECK(space->IsContinuousMemMapAllocSpace()); + space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); } // Add the space to the immune region. @@ -143,11 +144,6 @@ MarkSweep::MarkSweep(Heap* heap, bool is_concurrent, const std::string& name_pre mark_stack_(NULL), immune_begin_(NULL), immune_end_(NULL), - soft_reference_list_(NULL), - weak_reference_list_(NULL), - finalizer_reference_list_(NULL), - phantom_reference_list_(NULL), - cleared_reference_list_(NULL), live_stack_freeze_size_(0), gc_barrier_(new Barrier(0)), large_object_lock_("mark sweep large object lock", kMarkSweepLargeObjectLock), @@ -161,11 +157,6 @@ void MarkSweep::InitializePhase() { mark_stack_ = heap_->mark_stack_.get(); DCHECK(mark_stack_ != nullptr); SetImmuneRange(nullptr, nullptr); - soft_reference_list_ = nullptr; - weak_reference_list_ = nullptr; - finalizer_reference_list_ = nullptr; - phantom_reference_list_ = nullptr; - cleared_reference_list_ = nullptr; class_count_ = 0; array_count_ = 0; other_count_ = 0; @@ -347,7 +338,8 @@ void MarkSweep::ReclaimPhase() { timings_.EndSplit(); // Unbind the live and mark bitmaps. - UnBindBitmaps(); + TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); + GetHeap()->UnBindBitmaps(); } } @@ -589,14 +581,6 @@ void MarkSweep::MarkConcurrentRoots() { timings_.EndSplit(); } -void MarkSweep::BindLiveToMarkBitmap(space::ContinuousSpace* space) { - CHECK(space->IsMallocSpace()); - space::MallocSpace* alloc_space = space->AsMallocSpace(); - accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); - accounting::SpaceBitmap* mark_bitmap = alloc_space->BindLiveToMarkBitmap(); - GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap); -} - class ScanObjectVisitor { public: explicit ScanObjectVisitor(MarkSweep* const mark_sweep) ALWAYS_INLINE @@ -893,14 +877,8 @@ class RecursiveMarkTask : public MarkStackTask<false> { // recursively marks until the mark stack is emptied. void MarkSweep::RecursiveMark() { TimingLogger::ScopedSplit split("RecursiveMark", &timings_); - // RecursiveMark will build the lists of known instances of the Reference classes. - // See DelayReferenceReferent for details. - CHECK(soft_reference_list_ == NULL); - CHECK(weak_reference_list_ == NULL); - CHECK(finalizer_reference_list_ == NULL); - CHECK(phantom_reference_list_ == NULL); - CHECK(cleared_reference_list_ == NULL); - + // RecursiveMark will build the lists of known instances of the Reference classes. See + // DelayReferenceReferent for details. if (kUseRecursiveMark) { const bool partial = GetGcType() == kGcTypePartial; ScanObjectVisitor scan_visitor(this); @@ -1146,13 +1124,13 @@ void MarkSweep::Sweep(bool swap_bitmaps) { DCHECK(mark_stack_->IsEmpty()); TimingLogger::ScopedSplit("Sweep", &timings_); for (const auto& space : GetHeap()->GetContinuousSpaces()) { - if (space->IsMallocSpace()) { - space::MallocSpace* malloc_space = space->AsMallocSpace(); + if (space->IsContinuousMemMapAllocSpace()) { + space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); TimingLogger::ScopedSplit split( - malloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_); + alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepMallocSpace", &timings_); size_t freed_objects = 0; size_t freed_bytes = 0; - malloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); + alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); heap_->RecordFree(freed_objects, freed_bytes); freed_objects_.FetchAndAdd(freed_objects); freed_bytes_.FetchAndAdd(freed_bytes); @@ -1278,23 +1256,6 @@ inline bool MarkSweep::IsMarked(const Object* object) const return heap_->GetMarkBitmap()->Test(object); } -void MarkSweep::UnBindBitmaps() { - TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); - for (const auto& space : GetHeap()->GetContinuousSpaces()) { - if (space->IsMallocSpace()) { - space::MallocSpace* alloc_space = space->AsMallocSpace(); - if (alloc_space->temp_bitmap_.get() != NULL) { - // At this point, the temp_bitmap holds our old mark bitmap. - accounting::SpaceBitmap* new_bitmap = alloc_space->temp_bitmap_.release(); - GetHeap()->GetMarkBitmap()->ReplaceBitmap(alloc_space->mark_bitmap_.get(), new_bitmap); - CHECK_EQ(alloc_space->mark_bitmap_.release(), alloc_space->live_bitmap_.get()); - alloc_space->mark_bitmap_.reset(new_bitmap); - DCHECK(alloc_space->temp_bitmap_.get() == NULL); - } - } - } -} - void MarkSweep::FinishPhase() { TimingLogger::ScopedSplit split("FinishPhase", &timings_); // Can't enqueue references if we hold the mutator lock. diff --git a/runtime/gc/collector/mark_sweep.h b/runtime/gc/collector/mark_sweep.h index e2eafb5f52..0c27a3be59 100644 --- a/runtime/gc/collector/mark_sweep.h +++ b/runtime/gc/collector/mark_sweep.h @@ -118,12 +118,6 @@ class MarkSweep : public GarbageCollector { // the image. Mark that portion of the heap as immune. virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void BindLiveToMarkBitmap(space::ContinuousSpace* space) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - - void UnBindBitmaps() - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - // Builds a mark stack with objects on dirty cards and recursively mark until it empties. void RecursiveMarkDirtyObjects(bool paused, byte minimum_age) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) @@ -151,10 +145,6 @@ class MarkSweep : public GarbageCollector { void SweepArray(accounting::ObjectStack* allocation_stack_, bool swap_bitmaps) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - mirror::Object* GetClearedReferences() { - return cleared_reference_list_; - } - // Blackens an object. void ScanObject(mirror::Object* obj) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) @@ -346,12 +336,6 @@ class MarkSweep : public GarbageCollector { mirror::Object* immune_begin_; mirror::Object* immune_end_; - mirror::Object* soft_reference_list_; - mirror::Object* weak_reference_list_; - mirror::Object* finalizer_reference_list_; - mirror::Object* phantom_reference_list_; - mirror::Object* cleared_reference_list_; - // Parallel finger. AtomicInteger atomic_finger_; // Number of classes scanned, if kCountScannedTypes. diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc index 99c726d216..625f86913c 100644 --- a/runtime/gc/collector/semi_space.cc +++ b/runtime/gc/collector/semi_space.cc @@ -67,7 +67,8 @@ static constexpr bool kResetFromSpace = true; void SemiSpace::ImmuneSpace(space::ContinuousSpace* space) { // Bind live to mark bitmap if necessary. if (space->GetLiveBitmap() != space->GetMarkBitmap()) { - BindLiveToMarkBitmap(space); + CHECK(space->IsContinuousMemMapAllocSpace()); + space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); } // Add the space to the immune region. if (immune_begin_ == nullptr) { @@ -98,12 +99,13 @@ void SemiSpace::ImmuneSpace(space::ContinuousSpace* space) { void SemiSpace::BindBitmaps() { timings_.StartSplit("BindBitmaps"); - WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); + WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); // Mark all of the spaces we never collect as immune. for (const auto& space : GetHeap()->GetContinuousSpaces()) { if (space->GetLiveBitmap() != nullptr) { if (space == to_space_) { - BindLiveToMarkBitmap(to_space_); + CHECK(to_space_->IsContinuousMemMapAllocSpace()); + to_space_->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); } else if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect // Add the main free list space and the non-moving @@ -119,7 +121,6 @@ void SemiSpace::BindBitmaps() { if (generational_ && !whole_heap_collection_) { // We won't collect the large object space if a bump pointer space only collection. is_large_object_space_immune_ = true; - GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked(); } timings_.EndSplit(); } @@ -180,8 +181,7 @@ void SemiSpace::MarkingPhase() { VLOG(heap) << "Bump pointer space only collection"; } } - Thread* self = Thread::Current(); - Locks::mutator_lock_->AssertExclusiveHeld(self); + Locks::mutator_lock_->AssertExclusiveHeld(self_); TimingLogger::ScopedSplit split("MarkingPhase", &timings_); // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the // wrong space. @@ -209,7 +209,7 @@ void SemiSpace::MarkingPhase() { // the live stack during the recursive mark. timings_.NewSplit("SwapStacks"); heap_->SwapStacks(); - WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); + WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); MarkRoots(); // Mark roots of immune spaces. UpdateAndMarkModUnion(); @@ -287,6 +287,11 @@ void SemiSpace::MarkReachableObjects() { if (is_large_object_space_immune_) { DCHECK(generational_ && !whole_heap_collection_); + // Delay copying the live set to the marked set until here from + // BindBitmaps() as the large objects on the allocation stack may + // be newly added to the live set above in MarkAllocStackAsLive(). + GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked(); + // When the large object space is immune, we need to scan the // large object space as roots as they contain references to their // classes (primitive array classes) that could move though they @@ -305,10 +310,9 @@ void SemiSpace::MarkReachableObjects() { void SemiSpace::ReclaimPhase() { TimingLogger::ScopedSplit split("ReclaimPhase", &timings_); - Thread* self = Thread::Current(); - ProcessReferences(self); + ProcessReferences(self_); { - ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_); + ReaderMutexLock mu(self_, *Locks::heap_bitmap_lock_); SweepSystemWeaks(); } // Record freed memory. @@ -329,7 +333,7 @@ void SemiSpace::ReclaimPhase() { timings_.EndSplit(); { - WriterMutexLock mu(self, *Locks::heap_bitmap_lock_); + WriterMutexLock mu(self_, *Locks::heap_bitmap_lock_); // Reclaim unmarked objects. Sweep(false); // Swap the live and mark bitmaps for each space which we modified space. This is an @@ -339,7 +343,8 @@ void SemiSpace::ReclaimPhase() { SwapBitmaps(); timings_.EndSplit(); // Unbind the live and mark bitmaps. - UnBindBitmaps(); + TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); + GetHeap()->UnBindBitmaps(); } // Release the memory used by the from space. if (kResetFromSpace) { @@ -530,14 +535,6 @@ void SemiSpace::MarkRoots() { timings_.EndSplit(); } -void SemiSpace::BindLiveToMarkBitmap(space::ContinuousSpace* space) { - CHECK(space->IsMallocSpace()); - space::MallocSpace* alloc_space = space->AsMallocSpace(); - accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap(); - accounting::SpaceBitmap* mark_bitmap = alloc_space->BindLiveToMarkBitmap(); - GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap); -} - mirror::Object* SemiSpace::MarkedForwardingAddressCallback(Object* object, void* arg) { return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object); } @@ -548,7 +545,7 @@ void SemiSpace::SweepSystemWeaks() { timings_.EndSplit(); } -bool SemiSpace::ShouldSweepSpace(space::MallocSpace* space) const { +bool SemiSpace::ShouldSweepSpace(space::ContinuousSpace* space) const { return space != from_space_ && space != to_space_ && !IsImmuneSpace(space); } @@ -556,16 +553,16 @@ void SemiSpace::Sweep(bool swap_bitmaps) { DCHECK(mark_stack_->IsEmpty()); TimingLogger::ScopedSplit("Sweep", &timings_); for (const auto& space : GetHeap()->GetContinuousSpaces()) { - if (space->IsMallocSpace()) { - space::MallocSpace* malloc_space = space->AsMallocSpace(); - if (!ShouldSweepSpace(malloc_space)) { + if (space->IsContinuousMemMapAllocSpace()) { + space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); + if (!ShouldSweepSpace(alloc_space)) { continue; } TimingLogger::ScopedSplit split( - malloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_); + alloc_space->IsZygoteSpace() ? "SweepZygoteSpace" : "SweepAllocSpace", &timings_); size_t freed_objects = 0; size_t freed_bytes = 0; - malloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); + alloc_space->Sweep(swap_bitmaps, &freed_objects, &freed_bytes); heap_->RecordFree(freed_objects, freed_bytes); freed_objects_.FetchAndAdd(freed_objects); freed_bytes_.FetchAndAdd(freed_bytes); @@ -660,20 +657,6 @@ inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr; } -void SemiSpace::UnBindBitmaps() { - TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_); - for (const auto& space : GetHeap()->GetContinuousSpaces()) { - if (space->IsMallocSpace()) { - space::MallocSpace* alloc_space = space->AsMallocSpace(); - if (alloc_space->HasBoundBitmaps()) { - alloc_space->UnBindBitmaps(); - heap_->GetMarkBitmap()->ReplaceBitmap(alloc_space->GetLiveBitmap(), - alloc_space->GetMarkBitmap()); - } - } - } -} - void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) { DCHECK(to_space != nullptr); to_space_ = to_space; @@ -686,7 +669,6 @@ void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) { void SemiSpace::FinishPhase() { TimingLogger::ScopedSplit split("FinishPhase", &timings_); - // Can't enqueue references if we hold the mutator lock. Heap* heap = GetHeap(); timings_.NewSplit("PostGcVerification"); heap->PostGcVerification(this); diff --git a/runtime/gc/collector/semi_space.h b/runtime/gc/collector/semi_space.h index bf129a301f..f81a7c2c88 100644 --- a/runtime/gc/collector/semi_space.h +++ b/runtime/gc/collector/semi_space.h @@ -114,9 +114,6 @@ class SemiSpace : public GarbageCollector { // the image. Mark that portion of the heap as immune. virtual void BindBitmaps() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - void BindLiveToMarkBitmap(space::ContinuousSpace* space) - EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); - void UnBindBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); @@ -171,7 +168,7 @@ class SemiSpace : public GarbageCollector { void ResizeMarkStack(size_t new_size); // Returns true if we should sweep the space. - virtual bool ShouldSweepSpace(space::MallocSpace* space) const; + virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const; // Returns how many threads we should use for the current GC phase based on if we are paused, // whether or not we care about pauses. diff --git a/runtime/gc/collector/sticky_mark_sweep.cc b/runtime/gc/collector/sticky_mark_sweep.cc index c562e8c4a7..30f3753ab4 100644 --- a/runtime/gc/collector/sticky_mark_sweep.cc +++ b/runtime/gc/collector/sticky_mark_sweep.cc @@ -40,10 +40,10 @@ void StickyMarkSweep::BindBitmaps() { for (const auto& space : GetHeap()->GetContinuousSpaces()) { if (space->IsMallocSpace() && space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect) { - BindLiveToMarkBitmap(space); + DCHECK(space->IsContinuousMemMapAllocSpace()); + space->AsContinuousMemMapAllocSpace()->BindLiveToMarkBitmap(); } } - GetHeap()->GetLargeObjectsSpace()->CopyLiveToMarked(); } diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 14edc2fa79..fd98e2917f 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -44,6 +44,7 @@ #include "gc/space/large_object_space.h" #include "gc/space/rosalloc_space-inl.h" #include "gc/space/space-inl.h" +#include "gc/space/zygote_space.h" #include "heap-inl.h" #include "image.h" #include "invoke_arg_array_builder.h" @@ -147,7 +148,7 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max total_wait_time_(0), total_allocation_time_(0), verify_object_mode_(kHeapVerificationNotPermitted), - gc_disable_count_(0), + disable_moving_gc_count_(0), running_on_valgrind_(RUNNING_ON_VALGRIND), use_tlab_(use_tlab) { if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) { @@ -328,19 +329,19 @@ bool Heap::HasImageSpace() const { return false; } -void Heap::IncrementDisableGC(Thread* self) { +void Heap::IncrementDisableMovingGC(Thread* self) { // Need to do this holding the lock to prevent races where the GC is about to run / running when // we attempt to disable it. ScopedThreadStateChange tsc(self, kWaitingForGcToComplete); MutexLock mu(self, *gc_complete_lock_); - WaitForGcToCompleteLocked(self); - ++gc_disable_count_; + ++disable_moving_gc_count_; + // TODO: Wait for compacting GC to complete if we ever have a concurrent compacting GC. } -void Heap::DecrementDisableGC(Thread* self) { +void Heap::DecrementDisableMovingGC(Thread* self) { MutexLock mu(self, *gc_complete_lock_); - CHECK_GE(gc_disable_count_, 0U); - --gc_disable_count_; + CHECK_GE(disable_moving_gc_count_, 0U); + --disable_moving_gc_count_; } void Heap::UpdateProcessState(ProcessState process_state) { @@ -485,7 +486,6 @@ void Heap::RemoveSpace(space::Space* space) { DCHECK(it != alloc_spaces_.end()); alloc_spaces_.erase(it); } - delete space; } void Heap::RegisterGCAllocation(size_t bytes) { @@ -605,7 +605,7 @@ struct SoftReferenceArgs { }; mirror::Object* Heap::PreserveSoftReferenceCallback(mirror::Object* obj, void* arg) { - SoftReferenceArgs* args = reinterpret_cast<SoftReferenceArgs*>(arg); + SoftReferenceArgs* args = reinterpret_cast<SoftReferenceArgs*>(arg); // TODO: Not preserve all soft references. return args->recursive_mark_callback_(obj, args->arg_); } @@ -749,7 +749,7 @@ void Heap::Trim() { uint64_t total_alloc_space_size = 0; uint64_t managed_reclaimed = 0; for (const auto& space : continuous_spaces_) { - if (space->IsMallocSpace() && !space->IsZygoteSpace()) { + if (space->IsMallocSpace()) { gc::space::MallocSpace* alloc_space = space->AsMallocSpace(); total_alloc_space_size += alloc_space->Size(); managed_reclaimed += alloc_space->Trim(); @@ -1180,14 +1180,17 @@ void Heap::TransitionCollector(CollectorType collector_type) { Thread* self = Thread::Current(); ScopedThreadStateChange tsc(self, kWaitingPerformingGc); Locks::mutator_lock_->AssertNotHeld(self); - // Busy wait until we can GC (StartGC can fail if we have a non-zero gc_disable_count_, this - // rarely occurs however). - while (!StartGC(self)) { + // Busy wait until we can GC (StartGC can fail if we have a non-zero + // compacting_gc_disable_count_, this should rarely occurs). + bool copying_transition = + IsCompactingGC(background_collector_type_) || IsCompactingGC(post_zygote_collector_type_); + while (!StartGC(self, copying_transition)) { usleep(100); } tl->SuspendAll(); switch (collector_type) { case kCollectorTypeSS: + // Fall-through. case kCollectorTypeGSS: { mprotect(temp_space_->Begin(), temp_space_->Capacity(), PROT_READ | PROT_WRITE); CHECK(main_space_ != nullptr); @@ -1195,14 +1198,16 @@ void Heap::TransitionCollector(CollectorType collector_type) { DCHECK(allocator_mem_map_.get() == nullptr); allocator_mem_map_.reset(main_space_->ReleaseMemMap()); madvise(main_space_->Begin(), main_space_->Size(), MADV_DONTNEED); - // RemoveSpace deletes the removed space. - RemoveSpace(main_space_); + // RemoveSpace does not delete the removed space. + space::Space* old_space = main_space_; + RemoveSpace(old_space); + delete old_space; break; } case kCollectorTypeMS: // Fall through. case kCollectorTypeCMS: { - if (collector_type_ == kCollectorTypeSS || collector_type_ == kCollectorTypeGSS) { + if (IsCompactingGC(collector_type_)) { // TODO: Use mem-map from temp space? MemMap* mem_map = allocator_mem_map_.release(); CHECK(mem_map != nullptr); @@ -1257,6 +1262,7 @@ void Heap::ChangeCollector(CollectorType collector_type) { gc_plan_.clear(); switch (collector_type_) { case kCollectorTypeSS: + // Fall-through. case kCollectorTypeGSS: { concurrent_gc_ = false; gc_plan_.push_back(collector::kGcTypeFull); @@ -1345,7 +1351,7 @@ class ZygoteCompactingCollector : public collector::SemiSpace { } } - virtual bool ShouldSweepSpace(space::MallocSpace* space) const { + virtual bool ShouldSweepSpace(space::ContinuousSpace* space) const { // Don't sweep any spaces since we probably blasted the internal accounting of the free list // allocator. return false; @@ -1385,6 +1391,17 @@ class ZygoteCompactingCollector : public collector::SemiSpace { } }; +void Heap::UnBindBitmaps() { + for (const auto& space : GetContinuousSpaces()) { + if (space->IsContinuousMemMapAllocSpace()) { + space::ContinuousMemMapAllocSpace* alloc_space = space->AsContinuousMemMapAllocSpace(); + if (alloc_space->HasBoundBitmaps()) { + alloc_space->UnBindBitmaps(); + } + } + } +} + void Heap::PreZygoteFork() { static Mutex zygote_creation_lock_("zygote creation lock", kZygoteCreationLock); Thread* self = Thread::Current(); @@ -1420,30 +1437,28 @@ void Heap::PreZygoteFork() { non_moving_space_->SetLimit(target_space.Limit()); VLOG(heap) << "Zygote size " << non_moving_space_->Size() << " bytes"; } + // Save the old space so that we can remove it after we complete creating the zygote space. + space::MallocSpace* old_alloc_space = non_moving_space_; // Turn the current alloc space into a zygote space and obtain the new alloc space composed of - // the remaining available heap memory. - space::MallocSpace* zygote_space = non_moving_space_; - main_space_ = non_moving_space_->CreateZygoteSpace("alloc space", low_memory_mode_); + // the remaining available space. + // Remove the old space before creating the zygote space since creating the zygote space sets + // the old alloc space's bitmaps to nullptr. + RemoveSpace(old_alloc_space); + space::ZygoteSpace* zygote_space = old_alloc_space->CreateZygoteSpace("alloc space", + low_memory_mode_, + &main_space_); + delete old_alloc_space; + CHECK(zygote_space != nullptr) << "Failed creating zygote space"; + AddSpace(zygote_space, false); + CHECK(main_space_ != nullptr); if (main_space_->IsRosAllocSpace()) { rosalloc_space_ = main_space_->AsRosAllocSpace(); } else if (main_space_->IsDlMallocSpace()) { dlmalloc_space_ = main_space_->AsDlMallocSpace(); } main_space_->SetFootprintLimit(main_space_->Capacity()); - // Change the GC retention policy of the zygote space to only collect when full. - zygote_space->SetGcRetentionPolicy(space::kGcRetentionPolicyFullCollect); AddSpace(main_space_); have_zygote_space_ = true; - // Remove the zygote space from alloc_spaces_ array since not doing so causes crashes in - // GetObjectsAllocated. This happens because the bin packing blows away the internal accounting - // stored in between objects. - if (zygote_space->IsAllocSpace()) { - // TODO: Refactor zygote spaces to be a new space type to avoid more of these types of issues. - auto it = std::find(alloc_spaces_.begin(), alloc_spaces_.end(), zygote_space->AsAllocSpace()); - CHECK(it != alloc_spaces_.end()); - alloc_spaces_.erase(it); - zygote_space->InvalidateAllocator(); - } // Create the zygote space mod union table. accounting::ModUnionTable* mod_union_table = new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space); @@ -1529,7 +1544,8 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus LOG(WARNING) << "Performing GC on a thread that is handling a stack overflow."; } gc_complete_lock_->AssertNotHeld(self); - if (!StartGC(self)) { + const bool compacting_gc = IsCompactingGC(collector_type_); + if (!StartGC(self, compacting_gc)) { return collector::kGcTypeNone; } if (gc_cause == kGcCauseForAlloc && runtime->HasStatsEnabled()) { @@ -1551,7 +1567,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus collector::GarbageCollector* collector = nullptr; // TODO: Clean this up. - if (collector_type_ == kCollectorTypeSS || collector_type_ == kCollectorTypeGSS) { + if (compacting_gc) { DCHECK(current_allocator_ == kAllocatorTypeBumpPointer || current_allocator_ == kAllocatorTypeTLAB); gc_type = semi_space_collector_->GetGcType(); @@ -1631,15 +1647,15 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus return gc_type; } -bool Heap::StartGC(Thread* self) { +bool Heap::StartGC(Thread* self, bool is_compacting) { MutexLock mu(self, *gc_complete_lock_); // Ensure there is only one GC at a time. WaitForGcToCompleteLocked(self); // TODO: if another thread beat this one to do the GC, perhaps we should just return here? // Not doing at the moment to ensure soft references are cleared. // GC can be disabled if someone has a used GetPrimitiveArrayCritical. - if (gc_disable_count_ != 0) { - LOG(WARNING) << "Skipping GC due to disable count " << gc_disable_count_; + if (is_compacting && disable_moving_gc_count_ != 0) { + LOG(WARNING) << "Skipping GC due to disable moving GC count " << disable_moving_gc_count_; return false; } is_gc_running_ = true; @@ -2125,7 +2141,8 @@ bool Heap::IsMovableObject(const mirror::Object* obj) const { // Objects in the main space are only copied during background -> foreground transitions or // visa versa. if (main_space_ != nullptr && main_space_->HasAddress(obj) && - (IsCompactingGC(background_collector_type_) || IsCompactingGC(collector_type_))) { + (IsCompactingGC(background_collector_type_) || + IsCompactingGC(post_zygote_collector_type_))) { return true; } } diff --git a/runtime/gc/heap.h b/runtime/gc/heap.h index 0232b4daf9..fd7a6145c3 100644 --- a/runtime/gc/heap.h +++ b/runtime/gc/heap.h @@ -243,9 +243,9 @@ class Heap { // compaction related errors. bool IsInTempSpace(const mirror::Object* obj) const; - // Enables us to prevent GC until objects are released. - void IncrementDisableGC(Thread* self); - void DecrementDisableGC(Thread* self); + // Enables us to compacting GC until objects are released. + void IncrementDisableMovingGC(Thread* self); + void DecrementDisableMovingGC(Thread* self); // Initiates an explicit garbage collection. void CollectGarbage(bool clear_soft_references) LOCKS_EXCLUDED(Locks::mutator_lock_); @@ -467,6 +467,9 @@ class Heap { void MarkAllocStackAsLive(accounting::ObjectStack* stack) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + // Unbind any bound bitmaps. + void UnBindBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + // DEPRECATED: Should remove in "near" future when support for multiple image spaces is added. // Assumes there is only one image space. space::ImageSpace* GetImageSpace() const; @@ -534,7 +537,7 @@ class Heap { void Compact(space::ContinuousMemMapAllocSpace* target_space, space::ContinuousMemMapAllocSpace* source_space); - bool StartGC(Thread* self) LOCKS_EXCLUDED(gc_complete_lock_); + bool StartGC(Thread* self, bool is_compacting) LOCKS_EXCLUDED(gc_complete_lock_); void FinishGC(Thread* self, collector::GcType gc_type) LOCKS_EXCLUDED(gc_complete_lock_); static ALWAYS_INLINE bool AllocatorHasAllocationStack(AllocatorType allocator_type) { @@ -880,8 +883,8 @@ class Heap { // The current state of heap verification, may be enabled or disabled. HeapVerificationMode verify_object_mode_; - // GC disable count, error on GC if > 0. - size_t gc_disable_count_ GUARDED_BY(gc_complete_lock_); + // Compacting GC disable count, prevents compacting GC from running iff > 0. + size_t disable_moving_gc_count_ GUARDED_BY(gc_complete_lock_); std::vector<collector::GarbageCollector*> garbage_collectors_; collector::SemiSpace* semi_space_collector_; diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc index 981af53490..9ae6a33f72 100644 --- a/runtime/gc/space/dlmalloc_space.cc +++ b/runtime/gc/space/dlmalloc_space.cc @@ -287,6 +287,7 @@ uint64_t DlMallocSpace::GetObjectsAllocated() { } void DlMallocSpace::Clear() { + // TODO: Delete and create new mspace here. madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED); GetLiveBitmap()->Clear(); GetMarkBitmap()->Clear(); diff --git a/runtime/gc/space/dlmalloc_space.h b/runtime/gc/space/dlmalloc_space.h index 671d2b2690..24308f7857 100644 --- a/runtime/gc/space/dlmalloc_space.h +++ b/runtime/gc/space/dlmalloc_space.h @@ -97,10 +97,6 @@ class DlMallocSpace : public MallocSpace { virtual void Clear(); - virtual void InvalidateAllocator() { - mspace_for_alloc_ = nullptr; - } - virtual bool IsDlMallocSpace() const { return true; } diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h index 78a83c92cd..c3f0ae6574 100644 --- a/runtime/gc/space/image_space.h +++ b/runtime/gc/space/image_space.h @@ -29,10 +29,6 @@ namespace space { // An image space is a space backed with a memory mapped image. class ImageSpace : public MemMapSpace { public: - bool CanAllocateInto() const { - return false; - } - SpaceType GetType() const { return kSpaceTypeImageSpace; } @@ -75,6 +71,10 @@ class ImageSpace : public MemMapSpace { void Dump(std::ostream& os) const; + // Sweeping image spaces is a NOP. + void Sweep(bool /* swap_bitmaps */, size_t* /* freed_objects */, size_t* /* freed_bytes */) { + } + private: // Tries to initialize an ImageSpace from the given image path, // returning NULL on error. diff --git a/runtime/gc/space/malloc_space.cc b/runtime/gc/space/malloc_space.cc index 2b2b26e197..9ca4eac523 100644 --- a/runtime/gc/space/malloc_space.cc +++ b/runtime/gc/space/malloc_space.cc @@ -19,6 +19,8 @@ #include "gc/accounting/card_table-inl.h" #include "gc/accounting/space_bitmap-inl.h" #include "gc/heap.h" +#include "gc/space/space-inl.h" +#include "gc/space/zygote_space.h" #include "mirror/class-inl.h" #include "mirror/object-inl.h" #include "runtime.h" @@ -33,22 +35,27 @@ namespace space { size_t MallocSpace::bitmap_index_ = 0; MallocSpace::MallocSpace(const std::string& name, MemMap* mem_map, - byte* begin, byte* end, byte* limit, size_t growth_limit) + byte* begin, byte* end, byte* limit, size_t growth_limit, + bool create_bitmaps) : ContinuousMemMapAllocSpace(name, mem_map, begin, end, limit, kGcRetentionPolicyAlwaysCollect), recent_free_pos_(0), lock_("allocation space lock", kAllocSpaceLock), growth_limit_(growth_limit) { - size_t bitmap_index = bitmap_index_++; - static const uintptr_t kGcCardSize = static_cast<uintptr_t>(accounting::CardTable::kCardSize); - CHECK(IsAligned<kGcCardSize>(reinterpret_cast<uintptr_t>(mem_map->Begin()))); - CHECK(IsAligned<kGcCardSize>(reinterpret_cast<uintptr_t>(mem_map->End()))); - live_bitmap_.reset(accounting::SpaceBitmap::Create( - StringPrintf("allocspace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)), - Begin(), Capacity())); - DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace live bitmap #" << bitmap_index; - mark_bitmap_.reset(accounting::SpaceBitmap::Create( - StringPrintf("allocspace %s mark-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)), - Begin(), Capacity())); - DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace mark bitmap #" << bitmap_index; + if (create_bitmaps) { + size_t bitmap_index = bitmap_index_++; + static const uintptr_t kGcCardSize = static_cast<uintptr_t>(accounting::CardTable::kCardSize); + CHECK(IsAligned<kGcCardSize>(reinterpret_cast<uintptr_t>(mem_map->Begin()))); + CHECK(IsAligned<kGcCardSize>(reinterpret_cast<uintptr_t>(mem_map->End()))); + live_bitmap_.reset(accounting::SpaceBitmap::Create( + StringPrintf("allocspace %s live-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)), + Begin(), Capacity())); + DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace live bitmap #" + << bitmap_index; + mark_bitmap_.reset(accounting::SpaceBitmap::Create( + StringPrintf("allocspace %s mark-bitmap %d", name.c_str(), static_cast<int>(bitmap_index)), + Begin(), Capacity())); + DCHECK(live_bitmap_.get() != NULL) << "could not create allocspace mark bitmap #" + << bitmap_index; + } for (auto& freed : recent_freed_objects_) { freed.first = nullptr; freed.second = nullptr; @@ -154,29 +161,8 @@ void* MallocSpace::MoreCore(intptr_t increment) { return original_end; } -// Returns the old mark bitmap. -accounting::SpaceBitmap* MallocSpace::BindLiveToMarkBitmap() { - accounting::SpaceBitmap* live_bitmap = GetLiveBitmap(); - accounting::SpaceBitmap* mark_bitmap = mark_bitmap_.release(); - temp_bitmap_.reset(mark_bitmap); - mark_bitmap_.reset(live_bitmap); - return mark_bitmap; -} - -bool MallocSpace::HasBoundBitmaps() const { - return temp_bitmap_.get() != nullptr; -} - -void MallocSpace::UnBindBitmaps() { - CHECK(HasBoundBitmaps()); - // At this point, the temp_bitmap holds our old mark bitmap. - accounting::SpaceBitmap* new_bitmap = temp_bitmap_.release(); - CHECK_EQ(mark_bitmap_.release(), live_bitmap_.get()); - mark_bitmap_.reset(new_bitmap); - DCHECK(temp_bitmap_.get() == NULL); -} - -MallocSpace* MallocSpace::CreateZygoteSpace(const char* alloc_space_name, bool low_memory_mode) { +ZygoteSpace* MallocSpace::CreateZygoteSpace(const char* alloc_space_name, bool low_memory_mode, + MallocSpace** out_malloc_space) { // For RosAlloc, revoke thread local runs before creating a new // alloc space so that we won't mix thread local runs from different // alloc spaces. @@ -220,15 +206,23 @@ MallocSpace* MallocSpace::CreateZygoteSpace(const char* alloc_space_name, bool l if (capacity - initial_size > 0) { CHECK_MEMORY_CALL(mprotect, (end, capacity - initial_size, PROT_NONE), alloc_space_name); } - MallocSpace* alloc_space = CreateInstance(alloc_space_name, mem_map.release(), allocator, - end_, end, limit_, growth_limit); + *out_malloc_space = CreateInstance(alloc_space_name, mem_map.release(), allocator, end_, end, + limit_, growth_limit); SetLimit(End()); live_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End())); CHECK_EQ(live_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(End())); mark_bitmap_->SetHeapLimit(reinterpret_cast<uintptr_t>(End())); CHECK_EQ(mark_bitmap_->HeapLimit(), reinterpret_cast<uintptr_t>(End())); - VLOG(heap) << "zygote space creation done"; - return alloc_space; + + // Create the actual zygote space. + ZygoteSpace* zygote_space = ZygoteSpace::Create("Zygote space", ReleaseMemMap(), + live_bitmap_.release(), mark_bitmap_.release()); + if (UNLIKELY(zygote_space == nullptr)) { + VLOG(heap) << "Failed creating zygote space from space " << GetName(); + } else { + VLOG(heap) << "zygote space creation done"; + } + return zygote_space; } void MallocSpace::Dump(std::ostream& os) const { @@ -239,24 +233,16 @@ void MallocSpace::Dump(std::ostream& os) const { << ",name=\"" << GetName() << "\"]"; } -struct SweepCallbackContext { - bool swap_bitmaps; - Heap* heap; - space::MallocSpace* space; - Thread* self; - size_t freed_objects; - size_t freed_bytes; -}; - -static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) { +void MallocSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) { SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); - space::AllocSpace* space = context->space; + DCHECK(context->space->IsMallocSpace()); + space::MallocSpace* space = context->space->AsMallocSpace(); Thread* self = context->self; Locks::heap_bitmap_lock_->AssertExclusiveHeld(self); // If the bitmaps aren't swapped we need to clear the bits since the GC isn't going to re-swap // the bitmaps as an optimization. if (!context->swap_bitmaps) { - accounting::SpaceBitmap* bitmap = context->space->GetLiveBitmap(); + accounting::SpaceBitmap* bitmap = space->GetLiveBitmap(); for (size_t i = 0; i < num_ptrs; ++i) { bitmap->Clear(ptrs[i]); } @@ -268,54 +254,6 @@ static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) { context->freed_bytes += space->FreeList(self, num_ptrs, ptrs); } -static void ZygoteSweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) { - SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); - Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self); - accounting::CardTable* card_table = context->heap->GetCardTable(); - // If the bitmaps aren't swapped we need to clear the bits since the GC isn't going to re-swap - // the bitmaps as an optimization. - if (!context->swap_bitmaps) { - accounting::SpaceBitmap* bitmap = context->space->GetLiveBitmap(); - for (size_t i = 0; i < num_ptrs; ++i) { - bitmap->Clear(ptrs[i]); - } - } - // We don't free any actual memory to avoid dirtying the shared zygote pages. - for (size_t i = 0; i < num_ptrs; ++i) { - // Need to mark the card since this will update the mod-union table next GC cycle. - card_table->MarkCard(ptrs[i]); - } -} - -void MallocSpace::Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes) { - DCHECK(freed_objects != nullptr); - DCHECK(freed_bytes != nullptr); - accounting::SpaceBitmap* live_bitmap = GetLiveBitmap(); - accounting::SpaceBitmap* mark_bitmap = GetMarkBitmap(); - // If the bitmaps are bound then sweeping this space clearly won't do anything. - if (live_bitmap == mark_bitmap) { - return; - } - SweepCallbackContext scc; - scc.swap_bitmaps = swap_bitmaps; - scc.heap = Runtime::Current()->GetHeap(); - scc.self = Thread::Current(); - scc.space = this; - scc.freed_objects = 0; - scc.freed_bytes = 0; - if (swap_bitmaps) { - std::swap(live_bitmap, mark_bitmap); - } - // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked. - accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, - reinterpret_cast<uintptr_t>(Begin()), - reinterpret_cast<uintptr_t>(End()), - IsZygoteSpace() ? &ZygoteSweepCallback : &SweepCallback, - reinterpret_cast<void*>(&scc)); - *freed_objects += scc.freed_objects; - *freed_bytes += scc.freed_bytes; -} - } // namespace space } // namespace gc } // namespace art diff --git a/runtime/gc/space/malloc_space.h b/runtime/gc/space/malloc_space.h index 7681b6d459..58cfe8b6b9 100644 --- a/runtime/gc/space/malloc_space.h +++ b/runtime/gc/space/malloc_space.h @@ -31,6 +31,8 @@ namespace collector { namespace space { +class ZygoteSpace; + // TODO: Remove define macro #define CHECK_MEMORY_CALL(call, args, what) \ do { \ @@ -41,19 +43,13 @@ namespace space { } \ } while (false) -// const bool kUseRosAlloc = true; - // A common parent of DlMallocSpace and RosAllocSpace. class MallocSpace : public ContinuousMemMapAllocSpace { public: typedef void(*WalkCallback)(void *start, void *end, size_t num_bytes, void* callback_arg); SpaceType GetType() const { - if (GetGcRetentionPolicy() == kGcRetentionPolicyFullCollect) { - return kSpaceTypeZygoteSpace; - } else { - return kSpaceTypeAllocSpace; - } + return kSpaceTypeMallocSpace; } // Allocate num_bytes without allowing the underlying space to grow. @@ -109,14 +105,6 @@ class MallocSpace : public ContinuousMemMapAllocSpace { return GetMemMap()->Size(); } - accounting::SpaceBitmap* GetLiveBitmap() const { - return live_bitmap_.get(); - } - - accounting::SpaceBitmap* GetMarkBitmap() const { - return mark_bitmap_.get(); - } - void Dump(std::ostream& os) const; void SetGrowthLimit(size_t growth_limit); @@ -127,33 +115,20 @@ class MallocSpace : public ContinuousMemMapAllocSpace { virtual MallocSpace* CreateInstance(const std::string& name, MemMap* mem_map, void* allocator, byte* begin, byte* end, byte* limit, size_t growth_limit) = 0; - // Turn ourself into a zygote space and return a new alloc space - // which has our unused memory. When true, the low memory mode - // argument specifies that the heap wishes the created space to be - // more aggressive in releasing unused pages. - MallocSpace* CreateZygoteSpace(const char* alloc_space_name, bool low_memory_mode); - + // Splits ourself into a zygote space and new malloc space which has our unused memory. When true, + // the low memory mode argument specifies that the heap wishes the created space to be more + // aggressive in releasing unused pages. Invalidates the space its called on. + ZygoteSpace* CreateZygoteSpace(const char* alloc_space_name, bool low_memory_mode, + MallocSpace** out_malloc_space) NO_THREAD_SAFETY_ANALYSIS; virtual uint64_t GetBytesAllocated() = 0; virtual uint64_t GetObjectsAllocated() = 0; - // Returns the old mark bitmap. - accounting::SpaceBitmap* BindLiveToMarkBitmap(); - bool HasBoundBitmaps() const; - void UnBindBitmaps(); - // Returns the class of a recently freed object. mirror::Class* FindRecentFreedObject(const mirror::Object* obj); - // Used to ensure that failure happens when you free / allocate into an invalidated space. If we - // don't do this we may get heap corruption instead of a segfault at null. - virtual void InvalidateAllocator() = 0; - - // Sweep the references in the malloc space. - void Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes); - protected: MallocSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end, - byte* limit, size_t growth_limit); + byte* limit, size_t growth_limit, bool create_bitmaps = true); static MemMap* CreateMemMap(const std::string& name, size_t starting_size, size_t* initial_size, size_t* growth_limit, size_t* capacity, byte* requested_begin); @@ -166,9 +141,9 @@ class MallocSpace : public ContinuousMemMapAllocSpace { void RegisterRecentFree(mirror::Object* ptr) EXCLUSIVE_LOCKS_REQUIRED(lock_); - UniquePtr<accounting::SpaceBitmap> live_bitmap_; - UniquePtr<accounting::SpaceBitmap> mark_bitmap_; - UniquePtr<accounting::SpaceBitmap> temp_bitmap_; + virtual accounting::SpaceBitmap::SweepCallback* GetSweepCallback() { + return &SweepCallback; + } // Recent allocation buffer. static constexpr size_t kRecentFreeCount = kDebugSpaces ? (1 << 16) : 0; @@ -190,9 +165,9 @@ class MallocSpace : public ContinuousMemMapAllocSpace { // one time by a call to ClearGrowthLimit. size_t growth_limit_; - friend class collector::MarkSweep; - private: + static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg); + DISALLOW_COPY_AND_ASSIGN(MallocSpace); }; diff --git a/runtime/gc/space/rosalloc_space.cc b/runtime/gc/space/rosalloc_space.cc index e5993f6a6f..177e38e6aa 100644 --- a/runtime/gc/space/rosalloc_space.cc +++ b/runtime/gc/space/rosalloc_space.cc @@ -312,6 +312,7 @@ void RosAllocSpace::RevokeAllThreadLocalBuffers() { } void RosAllocSpace::Clear() { + // TODO: Delete and create new mspace here. madvise(GetMemMap()->Begin(), GetMemMap()->Size(), MADV_DONTNEED); GetLiveBitmap()->Clear(); GetMarkBitmap()->Clear(); diff --git a/runtime/gc/space/rosalloc_space.h b/runtime/gc/space/rosalloc_space.h index 6720976e5a..555eb3cf09 100644 --- a/runtime/gc/space/rosalloc_space.h +++ b/runtime/gc/space/rosalloc_space.h @@ -95,10 +95,6 @@ class RosAllocSpace : public MallocSpace { // Returns the class of a recently freed object. mirror::Class* FindRecentFreedObject(const mirror::Object* obj); - virtual void InvalidateAllocator() { - rosalloc_for_alloc_ = NULL; - } - virtual bool IsRosAllocSpace() const { return true; } diff --git a/runtime/gc/space/space-inl.h b/runtime/gc/space/space-inl.h index 0c1d7a2769..e94c44e7f5 100644 --- a/runtime/gc/space/space-inl.h +++ b/runtime/gc/space/space-inl.h @@ -32,7 +32,7 @@ inline ImageSpace* Space::AsImageSpace() { } inline MallocSpace* Space::AsMallocSpace() { - DCHECK(GetType() == kSpaceTypeAllocSpace || GetType() == kSpaceTypeZygoteSpace); + DCHECK(GetType() == kSpaceTypeMallocSpace); DCHECK(IsDlMallocSpace() || IsRosAllocSpace()); return down_cast<MallocSpace*>(down_cast<MemMapSpace*>(this)); } diff --git a/runtime/gc/space/space.cc b/runtime/gc/space/space.cc index f8ba6b3e57..5478d5b1b4 100644 --- a/runtime/gc/space/space.cc +++ b/runtime/gc/space/space.cc @@ -17,6 +17,9 @@ #include "space.h" #include "base/logging.h" +#include "gc/accounting/heap_bitmap.h" +#include "runtime.h" +#include "thread-inl.h" namespace art { namespace gc { @@ -41,6 +44,59 @@ DiscontinuousSpace::DiscontinuousSpace(const std::string& name, mark_objects_(new accounting::ObjectSet("large marked objects")) { } +void ContinuousMemMapAllocSpace::Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes) { + DCHECK(freed_objects != nullptr); + DCHECK(freed_bytes != nullptr); + accounting::SpaceBitmap* live_bitmap = GetLiveBitmap(); + accounting::SpaceBitmap* mark_bitmap = GetMarkBitmap(); + // If the bitmaps are bound then sweeping this space clearly won't do anything. + if (live_bitmap == mark_bitmap) { + return; + } + SweepCallbackContext scc; + scc.swap_bitmaps = swap_bitmaps; + scc.heap = Runtime::Current()->GetHeap(); + scc.self = Thread::Current(); + scc.space = this; + scc.freed_objects = 0; + scc.freed_bytes = 0; + if (swap_bitmaps) { + std::swap(live_bitmap, mark_bitmap); + } + // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked. + accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, + reinterpret_cast<uintptr_t>(Begin()), + reinterpret_cast<uintptr_t>(End()), + GetSweepCallback(), + reinterpret_cast<void*>(&scc)); + *freed_objects += scc.freed_objects; + *freed_bytes += scc.freed_bytes; +} + +// Returns the old mark bitmap. +void ContinuousMemMapAllocSpace::BindLiveToMarkBitmap() { + CHECK(!HasBoundBitmaps()); + accounting::SpaceBitmap* live_bitmap = GetLiveBitmap(); + accounting::SpaceBitmap* mark_bitmap = mark_bitmap_.release(); + Runtime::Current()->GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap); + temp_bitmap_.reset(mark_bitmap); + mark_bitmap_.reset(live_bitmap); +} + +bool ContinuousMemMapAllocSpace::HasBoundBitmaps() const { + return temp_bitmap_.get() != nullptr; +} + +void ContinuousMemMapAllocSpace::UnBindBitmaps() { + CHECK(HasBoundBitmaps()); + // At this point, the temp_bitmap holds our old mark bitmap. + accounting::SpaceBitmap* new_bitmap = temp_bitmap_.release(); + Runtime::Current()->GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap_.get(), new_bitmap); + CHECK_EQ(mark_bitmap_.release(), live_bitmap_.get()); + mark_bitmap_.reset(new_bitmap); + DCHECK(temp_bitmap_.get() == nullptr); +} + } // namespace space } // namespace gc } // namespace art diff --git a/runtime/gc/space/space.h b/runtime/gc/space/space.h index 529234412d..32230b3529 100644 --- a/runtime/gc/space/space.h +++ b/runtime/gc/space/space.h @@ -44,6 +44,7 @@ namespace space { class AllocSpace; class BumpPointerSpace; +class ContinuousMemMapAllocSpace; class ContinuousSpace; class DiscontinuousSpace; class MallocSpace; @@ -51,6 +52,7 @@ class DlMallocSpace; class RosAllocSpace; class ImageSpace; class LargeObjectSpace; +class ZygoteSpace; static constexpr bool kDebugSpaces = kIsDebugBuild; @@ -68,7 +70,7 @@ std::ostream& operator<<(std::ostream& os, const GcRetentionPolicy& policy); enum SpaceType { kSpaceTypeImageSpace, - kSpaceTypeAllocSpace, + kSpaceTypeMallocSpace, kSpaceTypeZygoteSpace, kSpaceTypeBumpPointerSpace, kSpaceTypeLargeObjectSpace, @@ -91,11 +93,6 @@ class Space { return gc_retention_policy_; } - // Does the space support allocation? - virtual bool CanAllocateInto() const { - return true; - } - // Is the given object contained within this space? virtual bool Contains(const mirror::Object* obj) const = 0; @@ -111,7 +108,7 @@ class Space { // Is this a dlmalloc backed allocation space? bool IsMallocSpace() const { SpaceType type = GetType(); - return type == kSpaceTypeAllocSpace || type == kSpaceTypeZygoteSpace; + return type == kSpaceTypeMallocSpace; } MallocSpace* AsMallocSpace(); @@ -120,20 +117,24 @@ class Space { } virtual DlMallocSpace* AsDlMallocSpace() { LOG(FATAL) << "Unreachable"; - return NULL; + return nullptr; } virtual bool IsRosAllocSpace() const { return false; } virtual RosAllocSpace* AsRosAllocSpace() { LOG(FATAL) << "Unreachable"; - return NULL; + return nullptr; } // Is this the space allocated into by the Zygote and no-longer in use? bool IsZygoteSpace() const { return GetType() == kSpaceTypeZygoteSpace; } + virtual ZygoteSpace* AsZygoteSpace() { + LOG(FATAL) << "Unreachable"; + return nullptr; + } // Is this space a bump pointer space? bool IsBumpPointerSpace() const { @@ -141,7 +142,7 @@ class Space { } virtual BumpPointerSpace* AsBumpPointerSpace() { LOG(FATAL) << "Unreachable"; - return NULL; + return nullptr; } // Does this space hold large objects and implement the large object space abstraction? @@ -168,6 +169,14 @@ class Space { return nullptr; } + virtual bool IsContinuousMemMapAllocSpace() const { + return false; + } + virtual ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() { + LOG(FATAL) << "Unimplemented"; + return nullptr; + } + virtual ~Space() {} protected: @@ -181,6 +190,15 @@ class Space { std::string name_; protected: + struct SweepCallbackContext { + bool swap_bitmaps; + Heap* heap; + space::Space* space; + Thread* self; + size_t freed_objects; + size_t freed_bytes; + }; + // When should objects within this space be reclaimed? Not constant as we vary it in the case // of Zygote forking. GcRetentionPolicy gc_retention_policy_; @@ -378,22 +396,51 @@ class ContinuousMemMapAllocSpace : public MemMapSpace, public AllocSpace { virtual bool IsAllocSpace() const { return true; } - virtual AllocSpace* AsAllocSpace() { return this; } + virtual bool IsContinuousMemMapAllocSpace() const { + return true; + } + virtual ContinuousMemMapAllocSpace* AsContinuousMemMapAllocSpace() { + return this; + } + + bool HasBoundBitmaps() const EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + void BindLiveToMarkBitmap() + EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + void UnBindBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_); + virtual void Clear() { LOG(FATAL) << "Unimplemented"; } + virtual accounting::SpaceBitmap* GetLiveBitmap() const { + return live_bitmap_.get(); + } + virtual accounting::SpaceBitmap* GetMarkBitmap() const { + return mark_bitmap_.get(); + } + + virtual void Sweep(bool swap_bitmaps, size_t* freed_objects, size_t* freed_bytes); + virtual accounting::SpaceBitmap::SweepCallback* GetSweepCallback() { + LOG(FATAL) << "Unimplemented"; + return nullptr; + } + protected: + UniquePtr<accounting::SpaceBitmap> live_bitmap_; + UniquePtr<accounting::SpaceBitmap> mark_bitmap_; + UniquePtr<accounting::SpaceBitmap> temp_bitmap_; + ContinuousMemMapAllocSpace(const std::string& name, MemMap* mem_map, byte* begin, byte* end, byte* limit, GcRetentionPolicy gc_retention_policy) : MemMapSpace(name, mem_map, begin, end, limit, gc_retention_policy) { } private: + friend class gc::Heap; DISALLOW_COPY_AND_ASSIGN(ContinuousMemMapAllocSpace); }; diff --git a/runtime/gc/space/space_test.cc b/runtime/gc/space/space_test.cc index b1be9d8a84..427d5471d8 100644 --- a/runtime/gc/space/space_test.cc +++ b/runtime/gc/space/space_test.cc @@ -16,6 +16,7 @@ #include "dlmalloc_space.h" #include "large_object_space.h" +#include "zygote_space.h" #include "common_test.h" #include "globals.h" @@ -179,7 +180,16 @@ void SpaceTest::ZygoteSpaceTestBody(CreateSpaceFn create_space) { // Make sure that the zygote space isn't directly at the start of the space. space->Alloc(self, 1U * MB, &dummy); - space = space->CreateZygoteSpace("alloc space", Runtime::Current()->GetHeap()->IsLowMemoryMode()); + + gc::Heap* heap = Runtime::Current()->GetHeap(); + space::Space* old_space = space; + heap->RemoveSpace(old_space); + space::ZygoteSpace* zygote_space = space->CreateZygoteSpace("alloc space", + heap->IsLowMemoryMode(), + &space); + delete old_space; + // Add the zygote space. + AddSpace(zygote_space); // Make space findable to the heap, will also delete space when runtime is cleaned up AddSpace(space); diff --git a/runtime/gc/space/zygote_space.cc b/runtime/gc/space/zygote_space.cc new file mode 100644 index 0000000000..a303765c8e --- /dev/null +++ b/runtime/gc/space/zygote_space.cc @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "zygote_space.h" + +#include "gc/accounting/card_table-inl.h" +#include "gc/accounting/space_bitmap-inl.h" +#include "gc/heap.h" +#include "thread-inl.h" +#include "utils.h" + +namespace art { +namespace gc { +namespace space { + +class CountObjectsAllocated { + public: + explicit CountObjectsAllocated(size_t* objects_allocated) + : objects_allocated_(objects_allocated) {} + + void operator()(mirror::Object* obj) const { + ++*objects_allocated_; + } + + private: + size_t* const objects_allocated_; +}; + +ZygoteSpace* ZygoteSpace::Create(const std::string& name, MemMap* mem_map, + accounting::SpaceBitmap* live_bitmap, + accounting::SpaceBitmap* mark_bitmap) { + DCHECK(live_bitmap != nullptr); + DCHECK(mark_bitmap != nullptr); + size_t objects_allocated = 0; + CountObjectsAllocated visitor(&objects_allocated); + ReaderMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_); + live_bitmap->VisitMarkedRange(reinterpret_cast<uintptr_t>(mem_map->Begin()), + reinterpret_cast<uintptr_t>(mem_map->End()), visitor); + ZygoteSpace* zygote_space = new ZygoteSpace(name, mem_map, objects_allocated); + CHECK(zygote_space->live_bitmap_.get() == nullptr); + CHECK(zygote_space->mark_bitmap_.get() == nullptr); + zygote_space->live_bitmap_.reset(live_bitmap); + zygote_space->mark_bitmap_.reset(mark_bitmap); + return zygote_space; +} + +ZygoteSpace::ZygoteSpace(const std::string& name, MemMap* mem_map, size_t objects_allocated) + : ContinuousMemMapAllocSpace(name, mem_map, mem_map->Begin(), mem_map->End(), mem_map->End(), + kGcRetentionPolicyFullCollect), + objects_allocated_(objects_allocated) { +} + +void ZygoteSpace::Dump(std::ostream& os) const { + os << GetType() + << " begin=" << reinterpret_cast<void*>(Begin()) + << ",end=" << reinterpret_cast<void*>(End()) + << ",size=" << PrettySize(Size()) + << ",name=\"" << GetName() << "\"]"; +} + +void ZygoteSpace::SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg) { + SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg); + DCHECK(context->space->IsZygoteSpace()); + ZygoteSpace* zygote_space = context->space->AsZygoteSpace(); + Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self); + accounting::CardTable* card_table = context->heap->GetCardTable(); + // If the bitmaps aren't swapped we need to clear the bits since the GC isn't going to re-swap + // the bitmaps as an optimization. + if (!context->swap_bitmaps) { + accounting::SpaceBitmap* bitmap = zygote_space->GetLiveBitmap(); + for (size_t i = 0; i < num_ptrs; ++i) { + bitmap->Clear(ptrs[i]); + } + } + // We don't free any actual memory to avoid dirtying the shared zygote pages. + for (size_t i = 0; i < num_ptrs; ++i) { + // Need to mark the card since this will update the mod-union table next GC cycle. + card_table->MarkCard(ptrs[i]); + } + zygote_space->objects_allocated_.FetchAndSub(num_ptrs); +} + +} // namespace space +} // namespace gc +} // namespace art diff --git a/runtime/gc/space/zygote_space.h b/runtime/gc/space/zygote_space.h new file mode 100644 index 0000000000..10a5492b30 --- /dev/null +++ b/runtime/gc/space/zygote_space.h @@ -0,0 +1,95 @@ +/* + * Copyright (C) 2014 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_GC_SPACE_ZYGOTE_SPACE_H_ +#define ART_RUNTIME_GC_SPACE_ZYGOTE_SPACE_H_ + +#include "malloc_space.h" +#include "mem_map.h" + +namespace art { +namespace gc { + +namespace accounting { +class SpaceBitmap; +} + +namespace space { + +// An zygote space is a space which you cannot allocate into or free from. +class ZygoteSpace : public ContinuousMemMapAllocSpace { + public: + // Returns the remaining storage in the out_map field. + static ZygoteSpace* Create(const std::string& name, MemMap* mem_map, + accounting::SpaceBitmap* live_bitmap, + accounting::SpaceBitmap* mark_bitmap) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + + void Dump(std::ostream& os) const; + virtual SpaceType GetType() const { + return kSpaceTypeZygoteSpace; + } + virtual ZygoteSpace* AsZygoteSpace() { + return this; + } + virtual mirror::Object* AllocWithGrowth(Thread* /*self*/, size_t /*num_bytes*/, + size_t* /*bytes_allocated*/) { + LOG(FATAL) << "Unimplemented"; + return nullptr; + } + virtual mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated) { + LOG(FATAL) << "Unimplemented"; + return nullptr; + } + virtual size_t AllocationSize(const mirror::Object* obj) { + LOG(FATAL) << "Unimplemented"; + return 0; + } + virtual size_t Free(Thread* self, mirror::Object* ptr) { + LOG(FATAL) << "Unimplemented"; + return 0; + } + virtual size_t FreeList(Thread* self, size_t num_ptrs, mirror::Object** ptrs) { + LOG(FATAL) << "Unimplemented"; + return 0; + } + virtual uint64_t GetBytesAllocated() { + return Size(); + } + virtual uint64_t GetObjectsAllocated() { + return objects_allocated_; + } + + protected: + virtual accounting::SpaceBitmap::SweepCallback* GetSweepCallback() { + return &SweepCallback; + } + + private: + ZygoteSpace(const std::string& name, MemMap* mem_map, size_t objects_allocated); + static void SweepCallback(size_t num_ptrs, mirror::Object** ptrs, void* arg); + + AtomicInteger objects_allocated_; + + friend class Space; + DISALLOW_COPY_AND_ASSIGN(ZygoteSpace); +}; + +} // namespace space +} // namespace gc +} // namespace art + +#endif // ART_RUNTIME_GC_SPACE_ZYGOTE_SPACE_H_ diff --git a/runtime/interpreter/interpreter_common.cc b/runtime/interpreter/interpreter_common.cc index be358e3822..0f94ccdfe2 100644 --- a/runtime/interpreter/interpreter_common.cc +++ b/runtime/interpreter/interpreter_common.cc @@ -200,26 +200,20 @@ bool DoFilledNewArray(const Instruction* inst, const ShadowFrame& shadow_frame, DCHECK(self->IsExceptionPending()); return false; } + uint32_t arg[5]; // only used in filled-new-array. + uint32_t vregC; // only used in filled-new-array-range. if (is_range) { - uint32_t vregC = inst->VRegC_3rc(); - const bool is_primitive_int_component = componentClass->IsPrimitiveInt(); - for (int32_t i = 0; i < length; ++i) { - if (is_primitive_int_component) { - newArray->AsIntArray()->Set(i, shadow_frame.GetVReg(vregC + i)); - } else { - newArray->AsObjectArray<Object>()->Set(i, shadow_frame.GetVRegReference(vregC + i)); - } - } + vregC = inst->VRegC_3rc(); } else { - uint32_t arg[5]; inst->GetArgs(arg); - const bool is_primitive_int_component = componentClass->IsPrimitiveInt(); - for (int32_t i = 0; i < length; ++i) { - if (is_primitive_int_component) { - newArray->AsIntArray()->Set(i, shadow_frame.GetVReg(arg[i])); - } else { - newArray->AsObjectArray<Object>()->Set(i, shadow_frame.GetVRegReference(arg[i])); - } + } + const bool is_primitive_int_component = componentClass->IsPrimitiveInt(); + for (int32_t i = 0; i < length; ++i) { + size_t src_reg = is_range ? vregC + i : arg[i]; + if (is_primitive_int_component) { + newArray->AsIntArray()->SetWithoutChecks(i, shadow_frame.GetVReg(src_reg)); + } else { + newArray->AsObjectArray<Object>()->SetWithoutChecks(i, shadow_frame.GetVRegReference(src_reg)); } } diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc index 942c275139..ca03885aab 100644 --- a/runtime/interpreter/interpreter_goto_table_impl.cc +++ b/runtime/interpreter/interpreter_goto_table_impl.cc @@ -932,8 +932,8 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* } else { int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); BooleanArray* array = a->AsBooleanArray(); - if (LIKELY(array->IsValidIndex(index))) { - shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]); + if (LIKELY(array->CheckIsValidIndex(index))) { + shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index)); ADVANCE(2); } else { HANDLE_PENDING_EXCEPTION(); @@ -950,8 +950,8 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* } else { int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); ByteArray* array = a->AsByteArray(); - if (LIKELY(array->IsValidIndex(index))) { - shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]); + if (LIKELY(array->CheckIsValidIndex(index))) { + shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index)); ADVANCE(2); } else { HANDLE_PENDING_EXCEPTION(); @@ -968,8 +968,8 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* } else { int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); CharArray* array = a->AsCharArray(); - if (LIKELY(array->IsValidIndex(index))) { - shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]); + if (LIKELY(array->CheckIsValidIndex(index))) { + shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index)); ADVANCE(2); } else { HANDLE_PENDING_EXCEPTION(); @@ -986,8 +986,8 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* } else { int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); ShortArray* array = a->AsShortArray(); - if (LIKELY(array->IsValidIndex(index))) { - shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]); + if (LIKELY(array->CheckIsValidIndex(index))) { + shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index)); ADVANCE(2); } else { HANDLE_PENDING_EXCEPTION(); @@ -1004,8 +1004,8 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* } else { int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); IntArray* array = a->AsIntArray(); - if (LIKELY(array->IsValidIndex(index))) { - shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]); + if (LIKELY(array->CheckIsValidIndex(index))) { + shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index)); ADVANCE(2); } else { HANDLE_PENDING_EXCEPTION(); @@ -1022,8 +1022,8 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* } else { int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); LongArray* array = a->AsLongArray(); - if (LIKELY(array->IsValidIndex(index))) { - shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data), array->GetData()[index]); + if (LIKELY(array->CheckIsValidIndex(index))) { + shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index)); ADVANCE(2); } else { HANDLE_PENDING_EXCEPTION(); @@ -1040,7 +1040,7 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* } else { int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); ObjectArray<Object>* array = a->AsObjectArray<Object>(); - if (LIKELY(array->IsValidIndex(index))) { + if (LIKELY(array->CheckIsValidIndex(index))) { shadow_frame.SetVRegReference(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index)); ADVANCE(2); } else { @@ -1059,8 +1059,8 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* uint8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data)); int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); BooleanArray* array = a->AsBooleanArray(); - if (LIKELY(array->IsValidIndex(index))) { - array->GetData()[index] = val; + if (LIKELY(array->CheckIsValidIndex(index))) { + array->SetWithoutChecks(index, val); ADVANCE(2); } else { HANDLE_PENDING_EXCEPTION(); @@ -1078,8 +1078,8 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* int8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data)); int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); ByteArray* array = a->AsByteArray(); - if (LIKELY(array->IsValidIndex(index))) { - array->GetData()[index] = val; + if (LIKELY(array->CheckIsValidIndex(index))) { + array->SetWithoutChecks(index, val); ADVANCE(2); } else { HANDLE_PENDING_EXCEPTION(); @@ -1097,8 +1097,8 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* uint16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data)); int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); CharArray* array = a->AsCharArray(); - if (LIKELY(array->IsValidIndex(index))) { - array->GetData()[index] = val; + if (LIKELY(array->CheckIsValidIndex(index))) { + array->SetWithoutChecks(index, val); ADVANCE(2); } else { HANDLE_PENDING_EXCEPTION(); @@ -1116,8 +1116,8 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* int16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data)); int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); ShortArray* array = a->AsShortArray(); - if (LIKELY(array->IsValidIndex(index))) { - array->GetData()[index] = val; + if (LIKELY(array->CheckIsValidIndex(index))) { + array->SetWithoutChecks(index, val); ADVANCE(2); } else { HANDLE_PENDING_EXCEPTION(); @@ -1135,8 +1135,8 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* int32_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data)); int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); IntArray* array = a->AsIntArray(); - if (LIKELY(array->IsValidIndex(index))) { - array->GetData()[index] = val; + if (LIKELY(array->CheckIsValidIndex(index))) { + array->SetWithoutChecks(index, val); ADVANCE(2); } else { HANDLE_PENDING_EXCEPTION(); @@ -1154,8 +1154,8 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* int64_t val = shadow_frame.GetVRegLong(inst->VRegA_23x(inst_data)); int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); LongArray* array = a->AsLongArray(); - if (LIKELY(array->IsValidIndex(index))) { - array->GetData()[index] = val; + if (LIKELY(array->CheckIsValidIndex(index))) { + array->SetWithoutChecks(index, val); ADVANCE(2); } else { HANDLE_PENDING_EXCEPTION(); @@ -1173,7 +1173,7 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem* int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); Object* val = shadow_frame.GetVRegReference(inst->VRegA_23x(inst_data)); ObjectArray<Object>* array = a->AsObjectArray<Object>(); - if (LIKELY(array->IsValidIndex(index) && array->CheckAssignable(val))) { + if (LIKELY(array->CheckIsValidIndex(index) && array->CheckAssignable(val))) { array->SetWithoutChecks(index, val); ADVANCE(2); } else { diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc index 75041eaa16..7631736a5b 100644 --- a/runtime/interpreter/interpreter_switch_impl.cc +++ b/runtime/interpreter/interpreter_switch_impl.cc @@ -830,8 +830,8 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem } int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); BooleanArray* array = a->AsBooleanArray(); - if (LIKELY(array->IsValidIndex(index))) { - shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]); + if (LIKELY(array->CheckIsValidIndex(index))) { + shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index)); inst = inst->Next_2xx(); } else { HANDLE_PENDING_EXCEPTION(); @@ -848,8 +848,8 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem } int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); ByteArray* array = a->AsByteArray(); - if (LIKELY(array->IsValidIndex(index))) { - shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]); + if (LIKELY(array->CheckIsValidIndex(index))) { + shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index)); inst = inst->Next_2xx(); } else { HANDLE_PENDING_EXCEPTION(); @@ -866,8 +866,8 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem } int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); CharArray* array = a->AsCharArray(); - if (LIKELY(array->IsValidIndex(index))) { - shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]); + if (LIKELY(array->CheckIsValidIndex(index))) { + shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index)); inst = inst->Next_2xx(); } else { HANDLE_PENDING_EXCEPTION(); @@ -884,8 +884,8 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem } int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); ShortArray* array = a->AsShortArray(); - if (LIKELY(array->IsValidIndex(index))) { - shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]); + if (LIKELY(array->CheckIsValidIndex(index))) { + shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index)); inst = inst->Next_2xx(); } else { HANDLE_PENDING_EXCEPTION(); @@ -902,8 +902,8 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem } int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); IntArray* array = a->AsIntArray(); - if (LIKELY(array->IsValidIndex(index))) { - shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetData()[index]); + if (LIKELY(array->CheckIsValidIndex(index))) { + shadow_frame.SetVReg(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index)); inst = inst->Next_2xx(); } else { HANDLE_PENDING_EXCEPTION(); @@ -920,8 +920,8 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem } int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); LongArray* array = a->AsLongArray(); - if (LIKELY(array->IsValidIndex(index))) { - shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data), array->GetData()[index]); + if (LIKELY(array->CheckIsValidIndex(index))) { + shadow_frame.SetVRegLong(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index)); inst = inst->Next_2xx(); } else { HANDLE_PENDING_EXCEPTION(); @@ -938,7 +938,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem } int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); ObjectArray<Object>* array = a->AsObjectArray<Object>(); - if (LIKELY(array->IsValidIndex(index))) { + if (LIKELY(array->CheckIsValidIndex(index))) { shadow_frame.SetVRegReference(inst->VRegA_23x(inst_data), array->GetWithoutChecks(index)); inst = inst->Next_2xx(); } else { @@ -957,8 +957,8 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem uint8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data)); int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); BooleanArray* array = a->AsBooleanArray(); - if (LIKELY(array->IsValidIndex(index))) { - array->GetData()[index] = val; + if (LIKELY(array->CheckIsValidIndex(index))) { + array->SetWithoutChecks(index, val); inst = inst->Next_2xx(); } else { HANDLE_PENDING_EXCEPTION(); @@ -976,8 +976,8 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem int8_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data)); int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); ByteArray* array = a->AsByteArray(); - if (LIKELY(array->IsValidIndex(index))) { - array->GetData()[index] = val; + if (LIKELY(array->CheckIsValidIndex(index))) { + array->SetWithoutChecks(index, val); inst = inst->Next_2xx(); } else { HANDLE_PENDING_EXCEPTION(); @@ -995,8 +995,8 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem uint16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data)); int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); CharArray* array = a->AsCharArray(); - if (LIKELY(array->IsValidIndex(index))) { - array->GetData()[index] = val; + if (LIKELY(array->CheckIsValidIndex(index))) { + array->SetWithoutChecks(index, val); inst = inst->Next_2xx(); } else { HANDLE_PENDING_EXCEPTION(); @@ -1014,8 +1014,8 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem int16_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data)); int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); ShortArray* array = a->AsShortArray(); - if (LIKELY(array->IsValidIndex(index))) { - array->GetData()[index] = val; + if (LIKELY(array->CheckIsValidIndex(index))) { + array->SetWithoutChecks(index, val); inst = inst->Next_2xx(); } else { HANDLE_PENDING_EXCEPTION(); @@ -1033,8 +1033,8 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem int32_t val = shadow_frame.GetVReg(inst->VRegA_23x(inst_data)); int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); IntArray* array = a->AsIntArray(); - if (LIKELY(array->IsValidIndex(index))) { - array->GetData()[index] = val; + if (LIKELY(array->CheckIsValidIndex(index))) { + array->SetWithoutChecks(index, val); inst = inst->Next_2xx(); } else { HANDLE_PENDING_EXCEPTION(); @@ -1052,8 +1052,8 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem int64_t val = shadow_frame.GetVRegLong(inst->VRegA_23x(inst_data)); int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); LongArray* array = a->AsLongArray(); - if (LIKELY(array->IsValidIndex(index))) { - array->GetData()[index] = val; + if (LIKELY(array->CheckIsValidIndex(index))) { + array->SetWithoutChecks(index, val); inst = inst->Next_2xx(); } else { HANDLE_PENDING_EXCEPTION(); @@ -1071,7 +1071,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem int32_t index = shadow_frame.GetVReg(inst->VRegC_23x()); Object* val = shadow_frame.GetVRegReference(inst->VRegA_23x(inst_data)); ObjectArray<Object>* array = a->AsObjectArray<Object>(); - if (LIKELY(array->IsValidIndex(index) && array->CheckAssignable(val))) { + if (LIKELY(array->CheckIsValidIndex(index) && array->CheckAssignable(val))) { array->SetWithoutChecks(index, val); inst = inst->Next_2xx(); } else { diff --git a/runtime/jdwp/jdwp_handler.cc b/runtime/jdwp/jdwp_handler.cc index 6522a62300..a514e6967b 100644 --- a/runtime/jdwp/jdwp_handler.cc +++ b/runtime/jdwp/jdwp_handler.cc @@ -21,6 +21,7 @@ #include <string> #include "atomic.h" +#include "base/hex_dump.h" #include "base/logging.h" #include "base/macros.h" #include "base/stringprintf.h" @@ -1705,7 +1706,7 @@ void JdwpState::ProcessRequest(Request& request, ExpandBuf* pReply) { } if (i == arraysize(gHandlers)) { LOG(ERROR) << "Command not implemented: " << DescribeCommand(request); - LOG(ERROR) << HexDump(request.data(), request.size()); + LOG(ERROR) << HexDump(request.data(), request.size(), false, ""); result = ERR_NOT_IMPLEMENTED; } @@ -1729,7 +1730,7 @@ void JdwpState::ProcessRequest(Request& request, ExpandBuf* pReply) { size_t respLen = expandBufGetLength(pReply) - kJDWPHeaderLen; VLOG(jdwp) << "REPLY: " << GetCommandName(request) << " " << result << " (length=" << respLen << ")"; if (false) { - VLOG(jdwp) << HexDump(expandBufGetBuffer(pReply) + kJDWPHeaderLen, respLen); + VLOG(jdwp) << HexDump(expandBufGetBuffer(pReply) + kJDWPHeaderLen, respLen, false, ""); } VLOG(jdwp) << "----------"; diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc index 5186399151..30b4ee83e0 100644 --- a/runtime/jni_internal.cc +++ b/runtime/jni_internal.cc @@ -523,6 +523,12 @@ class SharedLibrary { return dlsym(handle_, symbol_name.c_str()); } + void VisitRoots(RootVisitor* visitor, void* arg) { + if (class_loader_ != nullptr) { + class_loader_ = visitor(class_loader_, arg); + } + } + private: enum JNI_OnLoadState { kPending, @@ -613,6 +619,12 @@ class Libraries { return NULL; } + void VisitRoots(RootVisitor* visitor, void* arg) { + for (auto& lib_pair : libraries_) { + lib_pair.second->VisitRoots(visitor, arg); + } + } + private: SafeMap<std::string, SharedLibrary*> libraries_; }; @@ -2195,7 +2207,7 @@ class JNI { Array* array = soa.Decode<Array*>(java_array); gc::Heap* heap = Runtime::Current()->GetHeap(); if (heap->IsMovableObject(array)) { - heap->IncrementDisableGC(soa.Self()); + heap->IncrementDisableMovingGC(soa.Self()); // Re-decode in case the object moved since IncrementDisableGC waits for GC to complete. array = soa.Decode<Array*>(java_array); } @@ -2646,7 +2658,8 @@ class JNI { if (is_copy) { delete[] reinterpret_cast<uint64_t*>(elements); } else if (heap->IsMovableObject(array)) { - heap->DecrementDisableGC(soa.Self()); + // Non copy to a movable object must means that we had disabled the moving GC. + heap->DecrementDisableMovingGC(soa.Self()); } UnpinPrimitiveArray(soa, array); } @@ -3384,6 +3397,11 @@ void JavaVMExt::VisitRoots(RootVisitor* visitor, void* arg) { MutexLock mu(self, pins_lock); pin_table.VisitRoots(visitor, arg); } + { + MutexLock mu(self, libraries_lock); + // Libraries contains shared libraries which hold a pointer to a class loader. + libraries->VisitRoots(visitor, arg); + } // The weak_globals table is visited by the GC itself (because it mutates the table). } diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc index d3b823668e..0a3e1a15c8 100644 --- a/runtime/mem_map.cc +++ b/runtime/mem_map.cc @@ -19,6 +19,7 @@ #include <inttypes.h> #include <backtrace/BacktraceMap.h> +#include "UniquePtr.h" #include "base/stringprintf.h" #include "ScopedFd.h" #include "utils.h" @@ -55,12 +56,12 @@ static void CheckMapRequest(byte* addr, size_t byte_count) { uintptr_t base = reinterpret_cast<uintptr_t>(addr); uintptr_t limit = base + byte_count; - BacktraceMap map(getpid()); - if (!map.Build()) { + UniquePtr<BacktraceMap> map(BacktraceMap::Create(getpid())); + if (!map->Build()) { PLOG(WARNING) << "Failed to build process map"; return; } - for (BacktraceMap::const_iterator it = map.begin(); it != map.end(); ++it) { + for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) { CHECK(!(base >= it->start && base < it->end) // start of new within old && !(limit > it->start && limit < it->end) // end of new within old && !(base <= it->start && limit > it->end)) // start/end of new includes all of old @@ -69,7 +70,7 @@ static void CheckMapRequest(byte* addr, size_t byte_count) { base, limit, static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end), it->name.c_str()) - << std::make_pair(it, map.end()); + << std::make_pair(it, map->end()); } } diff --git a/runtime/method_reference.h b/runtime/method_reference.h index 1ff4ea0942..8e46d7e607 100644 --- a/runtime/method_reference.h +++ b/runtime/method_reference.h @@ -17,6 +17,8 @@ #ifndef ART_RUNTIME_METHOD_REFERENCE_H_ #define ART_RUNTIME_METHOD_REFERENCE_H_ +#include <stdint.h> + namespace art { class DexFile; diff --git a/runtime/mirror/array.h b/runtime/mirror/array.h index 52659464fc..207573fa00 100644 --- a/runtime/mirror/array.h +++ b/runtime/mirror/array.h @@ -19,6 +19,7 @@ #include "object.h" #include "gc/heap.h" +#include "thread.h" namespace art { namespace mirror { @@ -83,7 +84,9 @@ class MANAGED Array : public Object { return reinterpret_cast<const void*>(data); } - bool IsValidIndex(int32_t index) const + // Returns true if the index is valid. If not, throws an ArrayIndexOutOfBoundsException and + // returns false. + bool CheckIsValidIndex(int32_t index) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (UNLIKELY(static_cast<uint32_t>(index) >= static_cast<uint32_t>(GetLength()))) { ThrowArrayIndexOutOfBoundsException(index); @@ -93,12 +96,13 @@ class MANAGED Array : public Object { } protected: - void ThrowArrayIndexOutOfBoundsException(int32_t index) const - SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ThrowArrayStoreException(Object* object) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: + void ThrowArrayIndexOutOfBoundsException(int32_t index) const + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + // The number of array elements. int32_t length_; // Marker for the data (used by generated code) @@ -126,18 +130,31 @@ class MANAGED PrimitiveArray : public Array { } T Get(int32_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (!IsValidIndex(i)) { + if (UNLIKELY(!CheckIsValidIndex(i))) { + DCHECK(Thread::Current()->IsExceptionPending()); return T(0); } + return GetWithoutChecks(i); + } + + T GetWithoutChecks(int32_t i) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(CheckIsValidIndex(i)); return GetData()[i]; } void Set(int32_t i, T value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - if (IsValidIndex(i)) { - GetData()[i] = value; + if (LIKELY(CheckIsValidIndex(i))) { + SetWithoutChecks(i, value); + } else { + DCHECK(Thread::Current()->IsExceptionPending()); } } + void SetWithoutChecks(int32_t i, T value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(CheckIsValidIndex(i)); + GetData()[i] = value; + } + static void SetArrayClass(Class* array_class) { CHECK(array_class_ == NULL); CHECK(array_class != NULL); diff --git a/runtime/mirror/object_array-inl.h b/runtime/mirror/object_array-inl.h index be49b42f0d..6a50dfe019 100644 --- a/runtime/mirror/object_array-inl.h +++ b/runtime/mirror/object_array-inl.h @@ -50,11 +50,11 @@ inline ObjectArray<T>* ObjectArray<T>::Alloc(Thread* self, Class* object_array_c template<class T> inline T* ObjectArray<T>::Get(int32_t i) const { - if (UNLIKELY(!IsValidIndex(i))) { + if (UNLIKELY(!CheckIsValidIndex(i))) { + DCHECK(Thread::Current()->IsExceptionPending()); return NULL; } - MemberOffset data_offset(DataOffset(sizeof(Object*)).Int32Value() + i * sizeof(Object*)); - return GetFieldObject<T*>(data_offset, false); + return GetWithoutChecks(i); } template<class T> @@ -71,9 +71,8 @@ inline bool ObjectArray<T>::CheckAssignable(T* object) { template<class T> inline void ObjectArray<T>::Set(int32_t i, T* object) { - if (LIKELY(IsValidIndex(i) && CheckAssignable(object))) { - MemberOffset data_offset(DataOffset(sizeof(Object*)).Int32Value() + i * sizeof(Object*)); - SetFieldObject(data_offset, object, false); + if (LIKELY(CheckIsValidIndex(i) && CheckAssignable(object))) { + SetWithoutChecks(i, object); } else { DCHECK(Thread::Current()->IsExceptionPending()); } @@ -81,21 +80,24 @@ inline void ObjectArray<T>::Set(int32_t i, T* object) { template<class T> inline void ObjectArray<T>::SetWithoutChecks(int32_t i, T* object) { - DCHECK(IsValidIndex(i)); + DCHECK(CheckIsValidIndex(i)); + DCHECK(CheckAssignable(object)); MemberOffset data_offset(DataOffset(sizeof(Object*)).Int32Value() + i * sizeof(Object*)); SetFieldObject(data_offset, object, false); } template<class T> inline void ObjectArray<T>::SetPtrWithoutChecks(int32_t i, T* object) { - DCHECK(IsValidIndex(i)); + DCHECK(CheckIsValidIndex(i)); + // TODO enable this check. It fails when writing the image in ImageWriter::FixupObjectArray. + // DCHECK(CheckAssignable(object)); MemberOffset data_offset(DataOffset(sizeof(Object*)).Int32Value() + i * sizeof(Object*)); SetFieldPtr(data_offset, object, false); } template<class T> inline T* ObjectArray<T>::GetWithoutChecks(int32_t i) const { - DCHECK(IsValidIndex(i)); + DCHECK(CheckIsValidIndex(i)); MemberOffset data_offset(DataOffset(sizeof(Object*)).Int32Value() + i * sizeof(Object*)); return GetFieldObject<T*>(data_offset, false); } @@ -104,10 +106,10 @@ template<class T> inline void ObjectArray<T>::Copy(const ObjectArray<T>* src, int src_pos, ObjectArray<T>* dst, int dst_pos, size_t length) { - if (src->IsValidIndex(src_pos) && - src->IsValidIndex(src_pos+length-1) && - dst->IsValidIndex(dst_pos) && - dst->IsValidIndex(dst_pos+length-1)) { + if (src->CheckIsValidIndex(src_pos) && + src->CheckIsValidIndex(src_pos + length - 1) && + dst->CheckIsValidIndex(dst_pos) && + dst->CheckIsValidIndex(dst_pos + length - 1)) { MemberOffset src_offset(DataOffset(sizeof(Object*)).Int32Value() + src_pos * sizeof(Object*)); MemberOffset dst_offset(DataOffset(sizeof(Object*)).Int32Value() + dst_pos * sizeof(Object*)); Class* array_class = dst->GetClass(); @@ -139,6 +141,8 @@ inline void ObjectArray<T>::Copy(const ObjectArray<T>* src, int src_pos, } } heap->WriteBarrierArray(dst, dst_pos, length); + } else { + DCHECK(Thread::Current()->IsExceptionPending()); } } diff --git a/runtime/oat.cc b/runtime/oat.cc index caf18f1c80..81d45405a7 100644 --- a/runtime/oat.cc +++ b/runtime/oat.cc @@ -22,7 +22,7 @@ namespace art { const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' }; -const uint8_t OatHeader::kOatVersion[] = { '0', '1', '3', '\0' }; +const uint8_t OatHeader::kOatVersion[] = { '0', '1', '4', '\0' }; OatHeader::OatHeader() { memset(this, 0, sizeof(*this)); diff --git a/runtime/profiler.cc b/runtime/profiler.cc index 0e738124f7..365c9c34d7 100644 --- a/runtime/profiler.cc +++ b/runtime/profiler.cc @@ -36,6 +36,11 @@ #include "ScopedLocalRef.h" #include "thread.h" #include "thread_list.h" + +#ifdef HAVE_ANDROID_OS +#include "cutils/properties.h" +#endif + #if !defined(ART_USE_PORTABLE_COMPILER) #include "entrypoints/quick/quick_entrypoints.h" #endif @@ -259,6 +264,17 @@ void BackgroundMethodSamplingProfiler::Start(int period, int duration, } } + // Only on target... +#ifdef HAVE_ANDROID_OS + // Switch off profiler if the dalvik.vm.profiler property has value 0. + char buf[PROP_VALUE_MAX]; + property_get("dalvik.vm.profiler", buf, "0"); + if (strcmp(buf, "0") == 0) { + LOG(INFO) << "Profiler disabled. To enable setprop dalvik.vm.profiler 1"; + return; + } +#endif + LOG(INFO) << "Starting profile with period " << period << "s, duration " << duration << "s, interval " << interval_us << "us. Profile file " << profile_file_name; diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 25912247b3..d070207555 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -318,10 +318,19 @@ size_t ParseMemoryOption(const char* s, size_t div) { return 0; } -size_t ParseIntegerOrDie(const std::string& s) { - std::string::size_type colon = s.find(':'); +static const std::string StringAfterChar(const std::string& s, char c) { + std::string::size_type colon = s.find(c); if (colon == std::string::npos) { - LOG(FATAL) << "Missing integer: " << s; + LOG(FATAL) << "Missing char " << c << " in string " << s; + } + // Add one to remove the char we were trimming until. + return s.substr(colon + 1); +} + +static size_t ParseIntegerOrDie(const std::string& s, char after_char) { + std::string::size_type colon = s.find(after_char); + if (colon == std::string::npos) { + LOG(FATAL) << "Missing char " << after_char << " in string " << s; } const char* begin = &s[colon + 1]; char* end; @@ -332,10 +341,10 @@ size_t ParseIntegerOrDie(const std::string& s) { return result; } -double ParseDoubleOrDie(const std::string& option, const char* prefix, - double min, double max, bool ignore_unrecognized, - double defval) { - std::istringstream iss(option.substr(strlen(prefix))); + +static double ParseDoubleOrDie(const std::string& option, char after_char, double min, double max, + bool ignore_unrecognized, double defval) { + std::istringstream iss(StringAfterChar(option, after_char)); double value; iss >> value; // Ensure that we have a value, there was no cruft after it and it satisfies a sensible range. @@ -473,7 +482,7 @@ Runtime::ParsedOptions* Runtime::ParsedOptions::Create(const Options& options, b parsed->boot_class_path_ = reinterpret_cast<const std::vector<const DexFile*>*>(options[i].second); } else if (StartsWith(option, "-Ximage:")) { - parsed->image_ = option.substr(strlen("-Ximage:")).data(); + parsed->image_ = StringAfterChar(option, ':'); } else if (StartsWith(option, "-Xcheck:jni")) { parsed->check_jni_ = true; } else if (StartsWith(option, "-Xrunjdwp:") || StartsWith(option, "-agentlib:jdwp=")) { @@ -539,15 +548,12 @@ Runtime::ParsedOptions* Runtime::ParsedOptions::Create(const Options& options, b } parsed->heap_max_free_ = size; } else if (StartsWith(option, "-XX:HeapTargetUtilization=")) { - parsed->heap_target_utilization_ = ParseDoubleOrDie(option, "-XX:HeapTargetUtilization=", - 0.1, 0.9, ignore_unrecognized, - parsed->heap_target_utilization_); + parsed->heap_target_utilization_ = ParseDoubleOrDie( + option, '=', 0.1, 0.9, ignore_unrecognized, parsed->heap_target_utilization_); } else if (StartsWith(option, "-XX:ParallelGCThreads=")) { - parsed->parallel_gc_threads_ = - ParseMemoryOption(option.substr(strlen("-XX:ParallelGCThreads=")).c_str(), 1024); + parsed->parallel_gc_threads_ = ParseIntegerOrDie(option, '='); } else if (StartsWith(option, "-XX:ConcGCThreads=")) { - parsed->conc_gc_threads_ = - ParseMemoryOption(option.substr(strlen("-XX:ConcGCThreads=")).c_str(), 1024); + parsed->conc_gc_threads_ = ParseIntegerOrDie(option, '='); } else if (StartsWith(option, "-Xss")) { size_t size = ParseMemoryOption(option.substr(strlen("-Xss")).c_str(), 1); if (size == 0) { @@ -560,15 +566,11 @@ Runtime::ParsedOptions* Runtime::ParsedOptions::Create(const Options& options, b } parsed->stack_size_ = size; } else if (StartsWith(option, "-XX:MaxSpinsBeforeThinLockInflation=")) { - parsed->max_spins_before_thin_lock_inflation_ = - strtoul(option.substr(strlen("-XX:MaxSpinsBeforeThinLockInflation=")).c_str(), - nullptr, 10); - } else if (option == "-XX:LongPauseLogThreshold") { - parsed->long_pause_log_threshold_ = - ParseMemoryOption(option.substr(strlen("-XX:LongPauseLogThreshold=")).c_str(), 1024); - } else if (option == "-XX:LongGCLogThreshold") { - parsed->long_gc_log_threshold_ = - ParseMemoryOption(option.substr(strlen("-XX:LongGCLogThreshold")).c_str(), 1024); + parsed->max_spins_before_thin_lock_inflation_ = ParseIntegerOrDie(option, '='); + } else if (StartsWith(option, "-XX:LongPauseLogThreshold=")) { + parsed->long_pause_log_threshold_ = MsToNs(ParseIntegerOrDie(option, '=')); + } else if (StartsWith(option, "-XX:LongGCLogThreshold=")) { + parsed->long_gc_log_threshold_ = MsToNs(ParseIntegerOrDie(option, '=')); } else if (option == "-XX:DumpGCPerformanceOnShutdown") { parsed->dump_gc_performance_on_shutdown_ = true; } else if (option == "-XX:IgnoreMaxFootprint") { @@ -608,7 +610,7 @@ Runtime::ParsedOptions* Runtime::ParsedOptions::Create(const Options& options, b } } } else if (StartsWith(option, "-XX:BackgroundGC=")) { - const std::string substring = option.substr(strlen("-XX:BackgroundGC=")); + const std::string substring = StringAfterChar(option, '='); gc::CollectorType collector_type = ParseCollectorType(substring); if (collector_type != gc::kCollectorTypeNone) { parsed->background_collector_type_ = collector_type; @@ -650,9 +652,9 @@ Runtime::ParsedOptions* Runtime::ParsedOptions::Create(const Options& options, b } else if (StartsWith(option, "-Xjnigreflimit:")) { // Silently ignored for backwards compatibility. } else if (StartsWith(option, "-Xlockprofthreshold:")) { - parsed->lock_profiling_threshold_ = ParseIntegerOrDie(option); + parsed->lock_profiling_threshold_ = ParseIntegerOrDie(option, ':'); } else if (StartsWith(option, "-Xstacktracefile:")) { - parsed->stack_trace_file_ = option.substr(strlen("-Xstacktracefile:")); + parsed->stack_trace_file_ = StringAfterChar(option, ':'); } else if (option == "sensitiveThread") { parsed->hook_is_sensitive_thread_ = reinterpret_cast<bool (*)()>(const_cast<void*>(options[i].second)); } else if (option == "vfprintf") { @@ -671,7 +673,7 @@ Runtime::ParsedOptions* Runtime::ParsedOptions::Create(const Options& options, b } else if (StartsWith(option, "-Xmethod-trace-file:")) { parsed->method_trace_file_ = option.substr(strlen("-Xmethod-trace-file:")); } else if (StartsWith(option, "-Xmethod-trace-file-size:")) { - parsed->method_trace_file_size_ = ParseIntegerOrDie(option); + parsed->method_trace_file_size_ = ParseIntegerOrDie(option, ':'); } else if (option == "-Xprofile:threadcpuclock") { Trace::SetDefaultClockSource(kProfilerClockSourceThreadCpu); } else if (option == "-Xprofile:wallclock") { @@ -679,18 +681,17 @@ Runtime::ParsedOptions* Runtime::ParsedOptions::Create(const Options& options, b } else if (option == "-Xprofile:dualclock") { Trace::SetDefaultClockSource(kProfilerClockSourceDual); } else if (StartsWith(option, "-Xprofile:")) { - parsed->profile_output_filename_ = option.substr(strlen("-Xprofile:")); + parsed->profile_output_filename_ = StringAfterChar(option, ';'); parsed->profile_ = true; } else if (StartsWith(option, "-Xprofile-period:")) { - parsed->profile_period_s_ = ParseIntegerOrDie(option); + parsed->profile_period_s_ = ParseIntegerOrDie(option, ':'); } else if (StartsWith(option, "-Xprofile-duration:")) { - parsed->profile_duration_s_ = ParseIntegerOrDie(option); + parsed->profile_duration_s_ = ParseIntegerOrDie(option, ':'); } else if (StartsWith(option, "-Xprofile-interval:")) { - parsed->profile_interval_us_ = ParseIntegerOrDie(option); + parsed->profile_interval_us_ = ParseIntegerOrDie(option, ':'); } else if (StartsWith(option, "-Xprofile-backoff:")) { - parsed->profile_backoff_coefficient_ = ParseDoubleOrDie(option, "-Xprofile-backoff:", - 1.0, 10.0, ignore_unrecognized, - parsed->profile_backoff_coefficient_); + parsed->profile_backoff_coefficient_ = ParseDoubleOrDie( + option, ':', 1.0, 10.0, ignore_unrecognized, parsed->profile_backoff_coefficient_); } else if (option == "-compiler-filter:interpret-only") { parsed->compiler_filter_ = kInterpretOnly; } else if (option == "-compiler-filter:space") { @@ -704,15 +705,15 @@ Runtime::ParsedOptions* Runtime::ParsedOptions::Create(const Options& options, b } else if (option == "-sea_ir") { parsed->sea_ir_mode_ = true; } else if (StartsWith(option, "-huge-method-max:")) { - parsed->huge_method_threshold_ = ParseIntegerOrDie(option); + parsed->huge_method_threshold_ = ParseIntegerOrDie(option, ':'); } else if (StartsWith(option, "-large-method-max:")) { - parsed->large_method_threshold_ = ParseIntegerOrDie(option); + parsed->large_method_threshold_ = ParseIntegerOrDie(option, ':'); } else if (StartsWith(option, "-small-method-max:")) { - parsed->small_method_threshold_ = ParseIntegerOrDie(option); + parsed->small_method_threshold_ = ParseIntegerOrDie(option, ':'); } else if (StartsWith(option, "-tiny-method-max:")) { - parsed->tiny_method_threshold_ = ParseIntegerOrDie(option); + parsed->tiny_method_threshold_ = ParseIntegerOrDie(option, ':'); } else if (StartsWith(option, "-num-dex-methods-max:")) { - parsed->num_dex_methods_threshold_ = ParseIntegerOrDie(option); + parsed->num_dex_methods_threshold_ = ParseIntegerOrDie(option, ':'); } else { if (!ignore_unrecognized) { // TODO: print usage via vfprintf diff --git a/runtime/thread.cc b/runtime/thread.cc index e7fd660ae8..d195ebfe49 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -1624,6 +1624,7 @@ static const EntryPointInfo gThreadEntryPointInfo[] = { PORTABLE_ENTRY_POINT_INFO(pPortableResolutionTrampoline), PORTABLE_ENTRY_POINT_INFO(pPortableToInterpreterBridge), QUICK_ENTRY_POINT_INFO(pAllocArray), + QUICK_ENTRY_POINT_INFO(pAllocArrayResolved), QUICK_ENTRY_POINT_INFO(pAllocArrayWithAccessCheck), QUICK_ENTRY_POINT_INFO(pAllocObject), QUICK_ENTRY_POINT_INFO(pAllocObjectResolved), diff --git a/test/Android.mk b/test/Android.mk index 27834f0860..d716f9b5d3 100644 --- a/test/Android.mk +++ b/test/Android.mk @@ -71,6 +71,7 @@ define build-art-test-dex LOCAL_NO_STANDARD_LIBRARIES := true LOCAL_MODULE_PATH := $(3) LOCAL_DEX_PREOPT_IMAGE := $(TARGET_CORE_IMG_OUT) + LOCAL_DEX_PREOPT := false LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common.mk LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk include $(BUILD_JAVA_LIBRARY) @@ -84,6 +85,7 @@ define build-art-test-dex LOCAL_JAVA_LIBRARIES := $(HOST_CORE_JARS) LOCAL_NO_STANDARD_LIBRARIES := true LOCAL_DEX_PREOPT_IMAGE := $(HOST_CORE_IMG_OUT) + LOCAL_DEX_PREOPT := false LOCAL_ADDITIONAL_DEPENDENCIES := art/build/Android.common.mk LOCAL_ADDITIONAL_DEPENDENCIES += $(LOCAL_PATH)/Android.mk include $(BUILD_HOST_DALVIK_JAVA_LIBRARY) |