diff options
216 files changed, 8098 insertions, 2230 deletions
diff --git a/Android.mk b/Android.mk index 0e86188063..f8c537816f 100644 --- a/Android.mk +++ b/Android.mk @@ -388,6 +388,7 @@ build-art-target: $(TARGET_OUT_EXECUTABLES)/art $(ART_TARGET_DEPENDENCIES) $(TAR # libstdc++ is needed when building for ART_TARGET_LINUX. ART_TARGET_SHARED_LIBRARY_BENCHMARK := $(TARGET_OUT_SHARED_LIBRARIES)/libartbenchmark.so build-art-target-golem: dex2oat dalvikvm patchoat linker libstdc++ \ + $(TARGET_OUT_EXECUTABLES)/art \ $(TARGET_OUT)/etc/public.libraries.txt \ $(ART_TARGET_DEX_DEPENDENCIES) \ $(ART_TARGET_SHARED_LIBRARY_DEPENDENCIES) \ diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk index 1591e34885..1876efcd58 100644 --- a/build/Android.common_test.mk +++ b/build/Android.common_test.mk @@ -54,11 +54,11 @@ ART_TEST_FULL ?= false ART_TEST_QUIET ?= true # Do you want interpreter tests run? -ART_TEST_INTERPRETER ?= $(ART_TEST_FULL) -ART_TEST_INTERPRETER_ACCESS_CHECKS ?= $(ART_TEST_FULL) +ART_TEST_INTERPRETER ?= true +ART_TEST_INTERPRETER_ACCESS_CHECKS ?= true # Do you want JIT tests run? -ART_TEST_JIT ?= $(ART_TEST_FULL) +ART_TEST_JIT ?= true # Do you want optimizing compiler tests run? ART_TEST_OPTIMIZING ?= true @@ -215,6 +215,7 @@ define build-art-test-dex LOCAL_MODULE_PATH := $(3) LOCAL_DEX_PREOPT_IMAGE_LOCATION := $(TARGET_CORE_IMG_OUT) ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),) + LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex LOCAL_JACK_FLAGS := -D jack.dex.output.policy=minimal-multidex -D jack.preprocessor=true -D jack.preprocessor.file=$(LOCAL_PATH)/$(2)/main.jpp endif include $(BUILD_JAVA_LIBRARY) @@ -230,6 +231,7 @@ define build-art-test-dex LOCAL_JAVA_LIBRARIES := $(HOST_CORE_JARS) LOCAL_DEX_PREOPT_IMAGE := $(HOST_CORE_IMG_LOCATION) ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),) + LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex LOCAL_JACK_FLAGS := -D jack.dex.output.policy=minimal-multidex -D jack.preprocessor=true -D jack.preprocessor.file=$(LOCAL_PATH)/$(2)/main.jpp endif include $(BUILD_HOST_DALVIK_JAVA_LIBRARY) diff --git a/build/Android.cpplint.mk b/build/Android.cpplint.mk index d09f2902db..f924a855b7 100644 --- a/build/Android.cpplint.mk +++ b/build/Android.cpplint.mk @@ -21,7 +21,7 @@ ART_CPPLINT_FILTER := --filter=-whitespace/line_length,-build/include,-readabili ART_CPPLINT_FLAGS := --quiet --root=$(ANDROID_BUILD_TOP) ART_CPPLINT_INGORED := \ runtime/elf.h \ - runtime/openjdkjvmti/jvmti.h + runtime/openjdkjvmti/include/jvmti.h # This: # 1) Gets a list of all .h & .cc files in the art directory. diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc index 550e8c4605..5b331bcbec 100644 --- a/cmdline/cmdline_parser_test.cc +++ b/cmdline/cmdline_parser_test.cc @@ -476,7 +476,7 @@ TEST_F(CmdlineParserTest, TestJitOptions) { * -Xps-* */ TEST_F(CmdlineParserTest, ProfileSaverOptions) { - ProfileSaverOptions opt = ProfileSaverOptions(true, 1, 2, 3, 4, 5, 6, 7); + ProfileSaverOptions opt = ProfileSaverOptions(true, 1, 2, 3, 4, 5, 6, 7, "abc"); EXPECT_SINGLE_PARSE_VALUE(opt, "-Xjitsaveprofilinginfo " @@ -486,7 +486,8 @@ TEST_F(CmdlineParserTest, ProfileSaverOptions) { "-Xps-min-methods-to-save:4 " "-Xps-min-classes-to-save:5 " "-Xps-min-notification-before-wake:6 " - "-Xps-max-notification-before-wake:7", + "-Xps-max-notification-before-wake:7 " + "-Xps-profile-path:abc", M::ProfileSaverOpts); } // TEST_F diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h index f1123eb692..71c4e95921 100644 --- a/cmdline/cmdline_types.h +++ b/cmdline/cmdline_types.h @@ -752,9 +752,13 @@ struct CmdlineType<ProfileSaverOptions> : CmdlineTypeParser<ProfileSaverOptions> return ParseInto(existing, &ProfileSaverOptions::max_notification_before_wake_, type_parser.Parse(suffix)); - } else { - return Result::Failure(std::string("Invalid suboption '") + option + "'"); } + if (android::base::StartsWith(option, "profile-path:")) { + existing.profile_path_ = suffix; + return Result::SuccessNoValue(); + } + + return Result::Failure(std::string("Invalid suboption '") + option + "'"); } static const char* Name() { return "ProfileSaverOptions"; } @@ -774,6 +778,5 @@ struct CmdlineType<ExperimentalFlags> : CmdlineTypeParser<ExperimentalFlags> { static const char* Name() { return "ExperimentalFlags"; } }; - } // namespace art #endif // ART_CMDLINE_CMDLINE_TYPES_H_ diff --git a/compiler/Android.bp b/compiler/Android.bp index c59e36b597..d57f301ff9 100644 --- a/compiler/Android.bp +++ b/compiler/Android.bp @@ -351,6 +351,7 @@ art_cc_test { "optimizing/pretty_printer_test.cc", "optimizing/reference_type_propagation_test.cc", "optimizing/side_effects_test.cc", + "optimizing/ssa_liveness_analysis_test.cc", "optimizing/ssa_test.cc", "optimizing/stack_map_test.cc", "optimizing/suspend_check_test.cc", diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc index 9a45379a05..8b3029261f 100644 --- a/compiler/common_compiler_test.cc +++ b/compiler/common_compiler_test.cc @@ -55,11 +55,17 @@ void CommonCompilerTest::MakeExecutable(ArtMethod* method) { // If the code size is 0 it means the method was skipped due to profile guided compilation. if (compiled_method != nullptr && compiled_method->GetQuickCode().size() != 0u) { ArrayRef<const uint8_t> code = compiled_method->GetQuickCode(); - uint32_t code_size = code.size(); + const uint32_t code_size = code.size(); ArrayRef<const uint8_t> vmap_table = compiled_method->GetVmapTable(); - uint32_t vmap_table_offset = vmap_table.empty() ? 0u + const uint32_t vmap_table_offset = vmap_table.empty() ? 0u : sizeof(OatQuickMethodHeader) + vmap_table.size(); + // The method info is directly before the vmap table. + ArrayRef<const uint8_t> method_info = compiled_method->GetMethodInfo(); + const uint32_t method_info_offset = method_info.empty() ? 0u + : vmap_table_offset + method_info.size(); + OatQuickMethodHeader method_header(vmap_table_offset, + method_info_offset, compiled_method->GetFrameSizeInBytes(), compiled_method->GetCoreSpillMask(), compiled_method->GetFpSpillMask(), @@ -68,11 +74,12 @@ void CommonCompilerTest::MakeExecutable(ArtMethod* method) { header_code_and_maps_chunks_.push_back(std::vector<uint8_t>()); std::vector<uint8_t>* chunk = &header_code_and_maps_chunks_.back(); const size_t max_padding = GetInstructionSetAlignment(compiled_method->GetInstructionSet()); - const size_t size = vmap_table.size() + sizeof(method_header) + code_size; + const size_t size = method_info.size() + vmap_table.size() + sizeof(method_header) + code_size; chunk->reserve(size + max_padding); chunk->resize(sizeof(method_header)); memcpy(&(*chunk)[0], &method_header, sizeof(method_header)); chunk->insert(chunk->begin(), vmap_table.begin(), vmap_table.end()); + chunk->insert(chunk->begin(), method_info.begin(), method_info.end()); chunk->insert(chunk->end(), code.begin(), code.end()); CHECK_EQ(chunk->size(), size); const void* unaligned_code_ptr = chunk->data() + (size - code_size); diff --git a/compiler/compiled_method.cc b/compiler/compiled_method.cc index f06d90c81c..0d9021fcfb 100644 --- a/compiler/compiled_method.cc +++ b/compiler/compiled_method.cc @@ -105,15 +105,15 @@ CompiledMethod::CompiledMethod(CompilerDriver* driver, const size_t frame_size_in_bytes, const uint32_t core_spill_mask, const uint32_t fp_spill_mask, - const ArrayRef<const SrcMapElem>& src_mapping_table, + const ArrayRef<const uint8_t>& method_info, const ArrayRef<const uint8_t>& vmap_table, const ArrayRef<const uint8_t>& cfi_info, const ArrayRef<const LinkerPatch>& patches) : CompiledCode(driver, instruction_set, quick_code), - frame_size_in_bytes_(frame_size_in_bytes), core_spill_mask_(core_spill_mask), + frame_size_in_bytes_(frame_size_in_bytes), + core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask), - src_mapping_table_( - driver->GetCompiledMethodStorage()->DeduplicateSrcMappingTable(src_mapping_table)), + method_info_(driver->GetCompiledMethodStorage()->DeduplicateMethodInfo(method_info)), vmap_table_(driver->GetCompiledMethodStorage()->DeduplicateVMapTable(vmap_table)), cfi_info_(driver->GetCompiledMethodStorage()->DeduplicateCFIInfo(cfi_info)), patches_(driver->GetCompiledMethodStorage()->DeduplicateLinkerPatches(patches)) { @@ -126,7 +126,7 @@ CompiledMethod* CompiledMethod::SwapAllocCompiledMethod( const size_t frame_size_in_bytes, const uint32_t core_spill_mask, const uint32_t fp_spill_mask, - const ArrayRef<const SrcMapElem>& src_mapping_table, + const ArrayRef<const uint8_t>& method_info, const ArrayRef<const uint8_t>& vmap_table, const ArrayRef<const uint8_t>& cfi_info, const ArrayRef<const LinkerPatch>& patches) { @@ -139,7 +139,7 @@ CompiledMethod* CompiledMethod::SwapAllocCompiledMethod( frame_size_in_bytes, core_spill_mask, fp_spill_mask, - src_mapping_table, + method_info, vmap_table, cfi_info, patches); return ret; @@ -156,7 +156,7 @@ CompiledMethod::~CompiledMethod() { storage->ReleaseLinkerPatches(patches_); storage->ReleaseCFIInfo(cfi_info_); storage->ReleaseVMapTable(vmap_table_); - storage->ReleaseSrcMappingTable(src_mapping_table_); + storage->ReleaseMethodInfo(method_info_); } } // namespace art diff --git a/compiler/compiled_method.h b/compiler/compiled_method.h index 00e2d62bff..aa529f8352 100644 --- a/compiler/compiled_method.h +++ b/compiler/compiled_method.h @@ -109,57 +109,6 @@ inline bool operator==(const SrcMapElem& lhs, const SrcMapElem& rhs) { return lhs.from_ == rhs.from_ && lhs.to_ == rhs.to_; } -template <class Allocator> -class SrcMap FINAL : public std::vector<SrcMapElem, Allocator> { - public: - using std::vector<SrcMapElem, Allocator>::begin; - using typename std::vector<SrcMapElem, Allocator>::const_iterator; - using std::vector<SrcMapElem, Allocator>::empty; - using std::vector<SrcMapElem, Allocator>::end; - using std::vector<SrcMapElem, Allocator>::resize; - using std::vector<SrcMapElem, Allocator>::shrink_to_fit; - using std::vector<SrcMapElem, Allocator>::size; - - explicit SrcMap() {} - explicit SrcMap(const Allocator& alloc) : std::vector<SrcMapElem, Allocator>(alloc) {} - - template <class InputIt> - SrcMap(InputIt first, InputIt last, const Allocator& alloc) - : std::vector<SrcMapElem, Allocator>(first, last, alloc) {} - - void push_back(const SrcMapElem& elem) { - if (!empty()) { - // Check that the addresses are inserted in sorted order. - DCHECK_GE(elem.from_, this->back().from_); - // If two consequitive entries map to the same value, ignore the later. - // E.g. for map {{0, 1}, {4, 1}, {8, 2}}, all values in [0,8) map to 1. - if (elem.to_ == this->back().to_) { - return; - } - } - std::vector<SrcMapElem, Allocator>::push_back(elem); - } - - // Returns true and the corresponding "to" value if the mapping is found. - // Oterwise returns false and 0. - std::pair<bool, int32_t> Find(uint32_t from) const { - // Finds first mapping such that lb.from_ >= from. - auto lb = std::lower_bound(begin(), end(), SrcMapElem {from, INT32_MIN}); - if (lb != end() && lb->from_ == from) { - // Found exact match. - return std::make_pair(true, lb->to_); - } else if (lb != begin()) { - // The previous mapping is still in effect. - return std::make_pair(true, (--lb)->to_); - } else { - // Not found because 'from' is smaller than first entry in the map. - return std::make_pair(false, 0); - } - } -}; - -using DefaultSrcMap = SrcMap<std::allocator<SrcMapElem>>; - class LinkerPatch { public: // Note: We explicitly specify the underlying type of the enum because GCC @@ -420,7 +369,7 @@ class CompiledMethod FINAL : public CompiledCode { const size_t frame_size_in_bytes, const uint32_t core_spill_mask, const uint32_t fp_spill_mask, - const ArrayRef<const SrcMapElem>& src_mapping_table, + const ArrayRef<const uint8_t>& method_info, const ArrayRef<const uint8_t>& vmap_table, const ArrayRef<const uint8_t>& cfi_info, const ArrayRef<const LinkerPatch>& patches); @@ -434,7 +383,7 @@ class CompiledMethod FINAL : public CompiledCode { const size_t frame_size_in_bytes, const uint32_t core_spill_mask, const uint32_t fp_spill_mask, - const ArrayRef<const SrcMapElem>& src_mapping_table, + const ArrayRef<const uint8_t>& method_info, const ArrayRef<const uint8_t>& vmap_table, const ArrayRef<const uint8_t>& cfi_info, const ArrayRef<const LinkerPatch>& patches); @@ -453,8 +402,8 @@ class CompiledMethod FINAL : public CompiledCode { return fp_spill_mask_; } - ArrayRef<const SrcMapElem> GetSrcMappingTable() const { - return GetArray(src_mapping_table_); + ArrayRef<const uint8_t> GetMethodInfo() const { + return GetArray(method_info_); } ArrayRef<const uint8_t> GetVmapTable() const { @@ -476,9 +425,9 @@ class CompiledMethod FINAL : public CompiledCode { const uint32_t core_spill_mask_; // For quick code, a bit mask describing spilled FPR callee-save registers. const uint32_t fp_spill_mask_; - // For quick code, a set of pairs (PC, DEX) mapping from native PC offset to DEX offset. - const LengthPrefixedArray<SrcMapElem>* const src_mapping_table_; - // For quick code, a uleb128 encoded map from GPR/FPR register to dex register. Size prefixed. + // For quick code, method specific information that is not very dedupe friendly (method indices). + const LengthPrefixedArray<uint8_t>* const method_info_; + // For quick code, holds code infos which contain stack maps, inline information, and etc. const LengthPrefixedArray<uint8_t>* const vmap_table_; // For quick code, a FDE entry for the debug_frame section. const LengthPrefixedArray<uint8_t>* const cfi_info_; diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc index 76aeaa55d7..808e28c9ea 100644 --- a/compiler/dex/dex_to_dex_compiler.cc +++ b/compiler/dex/dex_to_dex_compiler.cc @@ -370,7 +370,7 @@ CompiledMethod* ArtCompileDEX( 0, 0, 0, - ArrayRef<const SrcMapElem>(), // src_mapping_table + ArrayRef<const uint8_t>(), // method_info ArrayRef<const uint8_t>(builder.GetData()), // vmap_table ArrayRef<const uint8_t>(), // cfi data ArrayRef<const LinkerPatch>()); diff --git a/compiler/driver/compiled_method_storage.cc b/compiler/driver/compiled_method_storage.cc index a0a8f81c1f..e6a47ba60f 100644 --- a/compiler/driver/compiled_method_storage.cc +++ b/compiler/driver/compiled_method_storage.cc @@ -172,8 +172,8 @@ CompiledMethodStorage::CompiledMethodStorage(int swap_fd) : swap_space_(swap_fd == -1 ? nullptr : new SwapSpace(swap_fd, 10 * MB)), dedupe_enabled_(true), dedupe_code_("dedupe code", LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())), - dedupe_src_mapping_table_("dedupe source mapping table", - LengthPrefixedArrayAlloc<SrcMapElem>(swap_space_.get())), + dedupe_method_info_("dedupe method info", + LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())), dedupe_vmap_table_("dedupe vmap table", LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())), dedupe_cfi_info_("dedupe cfi info", LengthPrefixedArrayAlloc<uint8_t>(swap_space_.get())), @@ -207,13 +207,13 @@ void CompiledMethodStorage::ReleaseCode(const LengthPrefixedArray<uint8_t>* code ReleaseArrayIfNotDeduplicated(code); } -const LengthPrefixedArray<SrcMapElem>* CompiledMethodStorage::DeduplicateSrcMappingTable( - const ArrayRef<const SrcMapElem>& src_map) { - return AllocateOrDeduplicateArray(src_map, &dedupe_src_mapping_table_); +const LengthPrefixedArray<uint8_t>* CompiledMethodStorage::DeduplicateMethodInfo( + const ArrayRef<const uint8_t>& src_map) { + return AllocateOrDeduplicateArray(src_map, &dedupe_method_info_); } -void CompiledMethodStorage::ReleaseSrcMappingTable(const LengthPrefixedArray<SrcMapElem>* src_map) { - ReleaseArrayIfNotDeduplicated(src_map); +void CompiledMethodStorage::ReleaseMethodInfo(const LengthPrefixedArray<uint8_t>* method_info) { + ReleaseArrayIfNotDeduplicated(method_info); } const LengthPrefixedArray<uint8_t>* CompiledMethodStorage::DeduplicateVMapTable( diff --git a/compiler/driver/compiled_method_storage.h b/compiler/driver/compiled_method_storage.h index 124b5a6e25..27011e8955 100644 --- a/compiler/driver/compiled_method_storage.h +++ b/compiler/driver/compiled_method_storage.h @@ -29,7 +29,6 @@ namespace art { class LinkerPatch; -class SrcMapElem; class CompiledMethodStorage { public: @@ -52,9 +51,9 @@ class CompiledMethodStorage { const LengthPrefixedArray<uint8_t>* DeduplicateCode(const ArrayRef<const uint8_t>& code); void ReleaseCode(const LengthPrefixedArray<uint8_t>* code); - const LengthPrefixedArray<SrcMapElem>* DeduplicateSrcMappingTable( - const ArrayRef<const SrcMapElem>& src_map); - void ReleaseSrcMappingTable(const LengthPrefixedArray<SrcMapElem>* src_map); + const LengthPrefixedArray<uint8_t>* DeduplicateMethodInfo( + const ArrayRef<const uint8_t>& method_info); + void ReleaseMethodInfo(const LengthPrefixedArray<uint8_t>* method_info); const LengthPrefixedArray<uint8_t>* DeduplicateVMapTable(const ArrayRef<const uint8_t>& table); void ReleaseVMapTable(const LengthPrefixedArray<uint8_t>* table); @@ -96,7 +95,7 @@ class CompiledMethodStorage { bool dedupe_enabled_; ArrayDedupeSet<uint8_t> dedupe_code_; - ArrayDedupeSet<SrcMapElem> dedupe_src_mapping_table_; + ArrayDedupeSet<uint8_t> dedupe_method_info_; ArrayDedupeSet<uint8_t> dedupe_vmap_table_; ArrayDedupeSet<uint8_t> dedupe_cfi_info_; ArrayDedupeSet<LinkerPatch> dedupe_linker_patches_; diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc index b72d0acb8e..6572d170e6 100644 --- a/compiler/driver/compiled_method_storage_test.cc +++ b/compiler/driver/compiled_method_storage_test.cc @@ -51,11 +51,11 @@ TEST(CompiledMethodStorage, Deduplicate) { ArrayRef<const uint8_t>(raw_code1), ArrayRef<const uint8_t>(raw_code2), }; - const SrcMapElem raw_src_map1[] = { { 1u, 2u }, { 3u, 4u }, { 5u, 6u } }; - const SrcMapElem raw_src_map2[] = { { 8u, 7u }, { 6u, 5u }, { 4u, 3u }, { 2u, 1u } }; - ArrayRef<const SrcMapElem> src_map[] = { - ArrayRef<const SrcMapElem>(raw_src_map1), - ArrayRef<const SrcMapElem>(raw_src_map2), + const uint8_t raw_method_info_map1[] = { 1u, 2u, 3u, 4u, 5u, 6u }; + const uint8_t raw_method_info_map2[] = { 8u, 7u, 6u, 5u, 4u, 3u, 2u, 1u }; + ArrayRef<const uint8_t> method_info[] = { + ArrayRef<const uint8_t>(raw_method_info_map1), + ArrayRef<const uint8_t>(raw_method_info_map2), }; const uint8_t raw_vmap_table1[] = { 2, 4, 6 }; const uint8_t raw_vmap_table2[] = { 7, 5, 3, 1 }; @@ -85,7 +85,7 @@ TEST(CompiledMethodStorage, Deduplicate) { std::vector<CompiledMethod*> compiled_methods; compiled_methods.reserve(1u << 7); for (auto&& c : code) { - for (auto&& s : src_map) { + for (auto&& s : method_info) { for (auto&& v : vmap_table) { for (auto&& f : cfi_info) { for (auto&& p : patches) { @@ -113,7 +113,7 @@ TEST(CompiledMethodStorage, Deduplicate) { bool same_patches = ((i ^ j) & patches_bit) == 0u; ASSERT_EQ(same_code, lhs->GetQuickCode().data() == rhs->GetQuickCode().data()) << i << " " << j; - ASSERT_EQ(same_src_map, lhs->GetSrcMappingTable().data() == rhs->GetSrcMappingTable().data()) + ASSERT_EQ(same_src_map, lhs->GetMethodInfo().data() == rhs->GetMethodInfo().data()) << i << " " << j; ASSERT_EQ(same_vmap_table, lhs->GetVmapTable().data() == rhs->GetVmapTable().data()) << i << " " << j; diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h index cbde587241..874e35716c 100644 --- a/compiler/driver/compiler_driver.h +++ b/compiler/driver/compiler_driver.h @@ -65,8 +65,6 @@ class InstructionSetFeatures; class ParallelCompilationManager; class ScopedObjectAccess; template <class Allocator> class SrcMap; -class SrcMapElem; -using SwapSrcMap = SrcMap<SwapAllocator<SrcMapElem>>; template<class T> class Handle; class TimingLogger; class VdexFile; diff --git a/compiler/exception_test.cc b/compiler/exception_test.cc index eac46e5909..c975944a04 100644 --- a/compiler/exception_test.cc +++ b/compiler/exception_test.cc @@ -74,8 +74,8 @@ class ExceptionTest : public CommonRuntimeTest { fake_header_code_and_maps_.resize(stack_maps_offset + fake_code_.size()); MemoryRegion stack_maps_region(&fake_header_code_and_maps_[0], stack_maps_size); - stack_maps.FillIn(stack_maps_region); - OatQuickMethodHeader method_header(stack_maps_offset, 4 * sizeof(void*), 0u, 0u, code_size); + stack_maps.FillInCodeInfo(stack_maps_region); + OatQuickMethodHeader method_header(stack_maps_offset, 0u, 4 * sizeof(void*), 0u, 0u, code_size); memcpy(&fake_header_code_and_maps_[stack_maps_size], &method_header, sizeof(method_header)); std::copy(fake_code_.begin(), fake_code_.end(), diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index a4a1fd36ce..aefdb548ff 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -976,9 +976,6 @@ void ImageWriter::PruneNonImageClasses() { dex_cache->ClearResolvedField(pair.index, target_ptr_size_); } } - // Clean the dex field. It might have been populated during the initialization phase, but - // contains data only valid during a real run. - dex_cache->SetFieldObject<false>(mirror::DexCache::DexOffset(), nullptr); } // Drop the array class cache in the ClassLinker, as these are roots holding those classes live. @@ -1578,10 +1575,8 @@ void ImageWriter::CalculateNewObjectOffsets() { } // Calculate the size of the class table. ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_); - CHECK_EQ(class_loaders_.size(), compile_app_image_ ? 1u : 0u); - mirror::ClassLoader* class_loader = compile_app_image_ ? *class_loaders_.begin() : nullptr; - DCHECK_EQ(image_info.class_table_->NumZygoteClasses(class_loader), 0u); - if (image_info.class_table_->NumNonZygoteClasses(class_loader) != 0u) { + DCHECK_EQ(image_info.class_table_->NumReferencedZygoteClasses(), 0u); + if (image_info.class_table_->NumReferencedNonZygoteClasses() != 0u) { image_info.class_table_bytes_ += image_info.class_table_->WriteToMemory(nullptr); } } @@ -1926,9 +1921,8 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) { // above comment for intern tables. ClassTable temp_class_table; temp_class_table.ReadFromMemory(class_table_memory_ptr); - ObjPtr<mirror::ClassLoader> class_loader = GetClassLoader(); - CHECK_EQ(temp_class_table.NumZygoteClasses(class_loader), - table->NumNonZygoteClasses(class_loader) + table->NumZygoteClasses(class_loader)); + CHECK_EQ(temp_class_table.NumReferencedZygoteClasses(), + table->NumReferencedNonZygoteClasses() + table->NumReferencedZygoteClasses()); UnbufferedRootVisitor visitor(&root_visitor, RootInfo(kRootUnknown)); temp_class_table.VisitRoots(visitor); } diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc index 3bd290da17..68ec7bd860 100644 --- a/compiler/jni/quick/jni_compiler.cc +++ b/compiler/jni/quick/jni_compiler.cc @@ -660,8 +660,8 @@ static CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, frame_size, main_jni_conv->CoreSpillMask(), main_jni_conv->FpSpillMask(), - ArrayRef<const SrcMapElem>(), - ArrayRef<const uint8_t>(), // vmap_table. + /* method_info */ ArrayRef<const uint8_t>(), + /* vmap_table */ ArrayRef<const uint8_t>(), ArrayRef<const uint8_t>(*jni_asm->cfi().data()), ArrayRef<const LinkerPatch>()); } diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h index 233daf4a39..908cb412bf 100644 --- a/compiler/linker/relative_patcher_test.h +++ b/compiler/linker/relative_patcher_test.h @@ -87,7 +87,7 @@ class RelativePatcherTest : public testing::Test { /* frame_size_in_bytes */ 0u, /* core_spill_mask */ 0u, /* fp_spill_mask */ 0u, - /* src_mapping_table */ ArrayRef<const SrcMapElem>(), + /* method_info */ ArrayRef<const uint8_t>(), /* vmap_table */ ArrayRef<const uint8_t>(), /* cfi_info */ ArrayRef<const uint8_t>(), patches)); diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc index 97b13746fc..ead41240c2 100644 --- a/compiler/oat_test.cc +++ b/compiler/oat_test.cc @@ -485,7 +485,7 @@ TEST_F(OatTest, OatHeaderSizeCheck) { // it is time to update OatHeader::kOatVersion EXPECT_EQ(72U, sizeof(OatHeader)); EXPECT_EQ(4U, sizeof(OatMethodOffsets)); - EXPECT_EQ(20U, sizeof(OatQuickMethodHeader)); + EXPECT_EQ(24U, sizeof(OatQuickMethodHeader)); EXPECT_EQ(161 * static_cast<size_t>(GetInstructionSetPointerSize(kRuntimeISA)), sizeof(QuickEntryPoints)); } diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc index 5812bd75a5..8e25aa3421 100644 --- a/compiler/oat_writer.cc +++ b/compiler/oat_writer.cc @@ -326,6 +326,7 @@ OatWriter::OatWriter(bool compiling_boot_image, TimingLogger* timings, ProfileCo size_relative_call_thunks_(0), size_misc_thunks_(0), size_vmap_table_(0), + size_method_info_(0), size_oat_dex_file_location_size_(0), size_oat_dex_file_location_data_(0), size_oat_dex_file_location_checksum_(0), @@ -809,6 +810,7 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor { DCHECK_LT(method_offsets_index_, oat_class->method_headers_.size()); OatQuickMethodHeader* method_header = &oat_class->method_headers_[method_offsets_index_]; uint32_t vmap_table_offset = method_header->GetVmapTableOffset(); + uint32_t method_info_offset = method_header->GetMethodInfoOffset(); // The code offset was 0 when the mapping/vmap table offset was set, so it's set // to 0-offset and we need to adjust it by code_offset. uint32_t code_offset = quick_code_offset - thumb_offset; @@ -819,13 +821,18 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor { vmap_table_offset += code_offset; DCHECK_LT(vmap_table_offset, code_offset); } + if (method_info_offset != 0u) { + method_info_offset += code_offset; + DCHECK_LT(method_info_offset, code_offset); + } } else { + CHECK(compiled_method->GetMethodInfo().empty()); if (kIsVdexEnabled) { // We write the offset in the .vdex file. DCHECK_EQ(vmap_table_offset, 0u); vmap_table_offset = current_quickening_info_offset_; - ArrayRef<const uint8_t> map = compiled_method->GetVmapTable(); - current_quickening_info_offset_ += map.size() * sizeof(map.front()); + ArrayRef<const uint8_t> vmap_table = compiled_method->GetVmapTable(); + current_quickening_info_offset_ += vmap_table.size() * sizeof(vmap_table.front()); } else { // We write the offset of the quickening info relative to the code. vmap_table_offset += code_offset; @@ -836,6 +843,7 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor { uint32_t core_spill_mask = compiled_method->GetCoreSpillMask(); uint32_t fp_spill_mask = compiled_method->GetFpSpillMask(); *method_header = OatQuickMethodHeader(vmap_table_offset, + method_info_offset, frame_size_in_bytes, core_spill_mask, fp_spill_mask, @@ -909,6 +917,9 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor { if (UNLIKELY(lhs->GetVmapTable().data() != rhs->GetVmapTable().data())) { return lhs->GetVmapTable().data() < rhs->GetVmapTable().data(); } + if (UNLIKELY(lhs->GetMethodInfo().data() != rhs->GetMethodInfo().data())) { + return lhs->GetMethodInfo().data() < rhs->GetMethodInfo().data(); + } if (UNLIKELY(lhs->GetPatches().data() != rhs->GetPatches().data())) { return lhs->GetPatches().data() < rhs->GetPatches().data(); } @@ -983,6 +994,44 @@ class OatWriter::InitMapMethodVisitor : public OatDexMethodVisitor { SafeMap<const uint8_t*, uint32_t> dedupe_map_; }; +class OatWriter::InitMethodInfoVisitor : public OatDexMethodVisitor { + public: + InitMethodInfoVisitor(OatWriter* writer, size_t offset) : OatDexMethodVisitor(writer, offset) {} + + bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it ATTRIBUTE_UNUSED) + REQUIRES_SHARED(Locks::mutator_lock_) { + OatClass* oat_class = &writer_->oat_classes_[oat_class_index_]; + CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index); + + if (compiled_method != nullptr) { + DCHECK_LT(method_offsets_index_, oat_class->method_offsets_.size()); + DCHECK_EQ(oat_class->method_headers_[method_offsets_index_].GetMethodInfoOffset(), 0u); + ArrayRef<const uint8_t> map = compiled_method->GetMethodInfo(); + const uint32_t map_size = map.size() * sizeof(map[0]); + if (map_size != 0u) { + size_t offset = dedupe_map_.GetOrCreate( + map.data(), + [this, map_size]() { + uint32_t new_offset = offset_; + offset_ += map_size; + return new_offset; + }); + // Code offset is not initialized yet, so set the map offset to 0u-offset. + DCHECK_EQ(oat_class->method_offsets_[method_offsets_index_].code_offset_, 0u); + oat_class->method_headers_[method_offsets_index_].SetMethodInfoOffset(0u - offset); + } + ++method_offsets_index_; + } + + return true; + } + + private: + // Deduplication is already done on a pointer basis by the compiler driver, + // so we can simply compare the pointers to find out if things are duplicated. + SafeMap<const uint8_t*, uint32_t> dedupe_map_; +}; + class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor { public: InitImageMethodVisitor(OatWriter* writer, @@ -1509,7 +1558,7 @@ class OatWriter::WriteMapMethodVisitor : public OatDexMethodVisitor { OatClass* oat_class = &writer_->oat_classes_[oat_class_index_]; const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index); - if (compiled_method != nullptr) { // ie. not an abstract method + if (compiled_method != nullptr) { // i.e. not an abstract method size_t file_offset = file_offset_; OutputStream* out = out_; @@ -1558,6 +1607,63 @@ class OatWriter::WriteMapMethodVisitor : public OatDexMethodVisitor { } }; +class OatWriter::WriteMethodInfoVisitor : public OatDexMethodVisitor { + public: + WriteMethodInfoVisitor(OatWriter* writer, + OutputStream* out, + const size_t file_offset, + size_t relative_offset) + : OatDexMethodVisitor(writer, relative_offset), + out_(out), + file_offset_(file_offset) {} + + bool VisitMethod(size_t class_def_method_index, const ClassDataItemIterator& it) { + OatClass* oat_class = &writer_->oat_classes_[oat_class_index_]; + const CompiledMethod* compiled_method = oat_class->GetCompiledMethod(class_def_method_index); + + if (compiled_method != nullptr) { // i.e. not an abstract method + size_t file_offset = file_offset_; + OutputStream* out = out_; + uint32_t map_offset = oat_class->method_headers_[method_offsets_index_].GetMethodInfoOffset(); + uint32_t code_offset = oat_class->method_offsets_[method_offsets_index_].code_offset_; + ++method_offsets_index_; + DCHECK((compiled_method->GetMethodInfo().size() == 0u && map_offset == 0u) || + (compiled_method->GetMethodInfo().size() != 0u && map_offset != 0u)) + << compiled_method->GetMethodInfo().size() << " " << map_offset << " " + << dex_file_->PrettyMethod(it.GetMemberIndex()); + if (map_offset != 0u) { + // Transform map_offset to actual oat data offset. + map_offset = (code_offset - compiled_method->CodeDelta()) - map_offset; + DCHECK_NE(map_offset, 0u); + DCHECK_LE(map_offset, offset_) << dex_file_->PrettyMethod(it.GetMemberIndex()); + + ArrayRef<const uint8_t> map = compiled_method->GetMethodInfo(); + size_t map_size = map.size() * sizeof(map[0]); + if (map_offset == offset_) { + // Write deduplicated map (code info for Optimizing or transformation info for dex2dex). + if (UNLIKELY(!out->WriteFully(map.data(), map_size))) { + ReportWriteFailure(it); + return false; + } + offset_ += map_size; + } + } + DCHECK_OFFSET_(); + } + + return true; + } + + private: + OutputStream* const out_; + size_t const file_offset_; + + void ReportWriteFailure(const ClassDataItemIterator& it) { + PLOG(ERROR) << "Failed to write map for " + << dex_file_->PrettyMethod(it.GetMemberIndex()) << " to " << out_->GetLocation(); + } +}; + // Visit all methods from all classes in all dex files with the specified visitor. bool OatWriter::VisitDexMethods(DexMethodVisitor* visitor) { for (const DexFile* dex_file : *dex_files_) { @@ -1651,11 +1757,18 @@ size_t OatWriter::InitOatMaps(size_t offset) { if (!compiler_driver_->GetCompilerOptions().IsAnyMethodCompilationEnabled()) { return offset; } - InitMapMethodVisitor visitor(this, offset); - bool success = VisitDexMethods(&visitor); - DCHECK(success); - offset = visitor.GetOffset(); - + { + InitMapMethodVisitor visitor(this, offset); + bool success = VisitDexMethods(&visitor); + DCHECK(success); + offset = visitor.GetOffset(); + } + { + InitMethodInfoVisitor visitor(this, offset); + bool success = VisitDexMethods(&visitor); + DCHECK(success); + offset = visitor.GetOffset(); + } return offset; } @@ -1996,6 +2109,7 @@ bool OatWriter::WriteCode(OutputStream* out) { DO_STAT(size_relative_call_thunks_); DO_STAT(size_misc_thunks_); DO_STAT(size_vmap_table_); + DO_STAT(size_method_info_); DO_STAT(size_oat_dex_file_location_size_); DO_STAT(size_oat_dex_file_location_data_); DO_STAT(size_oat_dex_file_location_checksum_); @@ -2111,13 +2225,24 @@ bool OatWriter::WriteClasses(OutputStream* out) { } size_t OatWriter::WriteMaps(OutputStream* out, const size_t file_offset, size_t relative_offset) { - size_t vmap_tables_offset = relative_offset; - WriteMapMethodVisitor visitor(this, out, file_offset, relative_offset); - if (UNLIKELY(!VisitDexMethods(&visitor))) { - return 0; + { + size_t vmap_tables_offset = relative_offset; + WriteMapMethodVisitor visitor(this, out, file_offset, relative_offset); + if (UNLIKELY(!VisitDexMethods(&visitor))) { + return 0; + } + relative_offset = visitor.GetOffset(); + size_vmap_table_ = relative_offset - vmap_tables_offset; + } + { + size_t method_infos_offset = relative_offset; + WriteMethodInfoVisitor visitor(this, out, file_offset, relative_offset); + if (UNLIKELY(!VisitDexMethods(&visitor))) { + return 0; + } + relative_offset = visitor.GetOffset(); + size_method_info_ = relative_offset - method_infos_offset; } - relative_offset = visitor.GetOffset(); - size_vmap_table_ = relative_offset - vmap_tables_offset; return relative_offset; } diff --git a/compiler/oat_writer.h b/compiler/oat_writer.h index 511371480a..e778f75551 100644 --- a/compiler/oat_writer.h +++ b/compiler/oat_writer.h @@ -254,9 +254,11 @@ class OatWriter { class InitOatClassesMethodVisitor; class InitCodeMethodVisitor; class InitMapMethodVisitor; + class InitMethodInfoVisitor; class InitImageMethodVisitor; class WriteCodeMethodVisitor; class WriteMapMethodVisitor; + class WriteMethodInfoVisitor; class WriteQuickeningInfoMethodVisitor; // Visit all the methods in all the compiled dex files in their definition order @@ -425,6 +427,7 @@ class OatWriter { uint32_t size_relative_call_thunks_; uint32_t size_misc_thunks_; uint32_t size_vmap_table_; + uint32_t size_method_info_; uint32_t size_oat_dex_file_location_size_; uint32_t size_oat_dex_file_location_data_; uint32_t size_oat_dex_file_location_checksum_; diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 424b8507fb..b7c80756b0 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -654,8 +654,12 @@ std::unique_ptr<CodeGenerator> CodeGenerator::Create(HGraph* graph, } } -size_t CodeGenerator::ComputeStackMapsSize() { - return stack_map_stream_.PrepareForFillIn(); +void CodeGenerator::ComputeStackMapAndMethodInfoSize(size_t* stack_map_size, + size_t* method_info_size) { + DCHECK(stack_map_size != nullptr); + DCHECK(method_info_size != nullptr); + *stack_map_size = stack_map_stream_.PrepareForFillIn(); + *method_info_size = stack_map_stream_.ComputeMethodInfoSize(); } static void CheckCovers(uint32_t dex_pc, @@ -723,10 +727,13 @@ static void CheckLoopEntriesCanBeUsedForOsr(const HGraph& graph, } } -void CodeGenerator::BuildStackMaps(MemoryRegion region, const DexFile::CodeItem& code_item) { - stack_map_stream_.FillIn(region); +void CodeGenerator::BuildStackMaps(MemoryRegion stack_map_region, + MemoryRegion method_info_region, + const DexFile::CodeItem& code_item) { + stack_map_stream_.FillInCodeInfo(stack_map_region); + stack_map_stream_.FillInMethodInfo(method_info_region); if (kIsDebugBuild) { - CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(region), code_item); + CheckLoopEntriesCanBeUsedForOsr(*graph_, CodeInfo(stack_map_region), code_item); } } diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index b912672792..ea463eeb62 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -341,8 +341,10 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { slow_paths_.push_back(std::unique_ptr<SlowPathCode>(slow_path)); } - void BuildStackMaps(MemoryRegion region, const DexFile::CodeItem& code_item); - size_t ComputeStackMapsSize(); + void BuildStackMaps(MemoryRegion stack_map_region, + MemoryRegion method_info_region, + const DexFile::CodeItem& code_item); + void ComputeStackMapAndMethodInfoSize(size_t* stack_map_size, size_t* method_info_size); size_t GetNumberOfJitRoots() const { return jit_string_roots_.size() + jit_class_roots_.size(); } diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 97b61edbb9..28cc942dfb 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -2395,7 +2395,7 @@ void LocationsBuilderARM64::HandleShift(HBinaryOperation* instr) { case Primitive::kPrimLong: { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1))); - locations->SetOut(Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; } default: @@ -2565,7 +2565,7 @@ void LocationsBuilderARM64::VisitIntermediateAddress(HIntermediateAddress* instr new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, ARM64EncodableConstantOrRegister(instruction->GetOffset(), instruction)); - locations->SetOut(Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); } void InstructionCodeGeneratorARM64::VisitIntermediateAddress(HIntermediateAddress* instruction) { diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index f5ada5224b..d75779cef6 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -2000,15 +2000,10 @@ CodeGeneratorARMVIXL::CodeGeneratorARMVIXL(HGraph* graph, graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) { // Always save the LR register to mimic Quick. AddAllocatedRegister(Location::RegisterLocation(LR)); - // Give d14 and d15 as scratch registers to VIXL. - // They are removed from the register allocator in `SetupBlockedRegisters()`. - // TODO(VIXL): We need two scratch D registers for `EmitSwap` when swapping two double stack - // slots. If that is sufficiently rare, and we have pressure on FP registers, we could instead - // spill in `EmitSwap`. But if we actually are guaranteed to have 32 D registers, we could give - // d30 and d31 to VIXL to avoid removing registers from the allocator. If that is the case, we may - // also want to investigate giving those 14 other D registers to the allocator. - GetVIXLAssembler()->GetScratchVRegisterList()->Combine(d14); - GetVIXLAssembler()->GetScratchVRegisterList()->Combine(d15); + // Give D30 and D31 as scratch register to VIXL. The register allocator only works on + // S0-S31, which alias to D0-D15. + GetVIXLAssembler()->GetScratchVRegisterList()->Combine(d31); + GetVIXLAssembler()->GetScratchVRegisterList()->Combine(d30); } void JumpTableARMVIXL::EmitTable(CodeGeneratorARMVIXL* codegen) { @@ -2074,13 +2069,6 @@ void CodeGeneratorARMVIXL::SetupBlockedRegisters() const { // Reserve temp register. blocked_core_registers_[IP] = true; - // Registers s28-s31 (d14-d15) are left to VIXL for scratch registers. - // (They are given to the `MacroAssembler` in `CodeGeneratorARMVIXL::CodeGeneratorARMVIXL`.) - blocked_fpu_registers_[28] = true; - blocked_fpu_registers_[29] = true; - blocked_fpu_registers_[30] = true; - blocked_fpu_registers_[31] = true; - if (GetGraph()->IsDebuggable()) { // Stubs do not save callee-save floating point registers. If the graph // is debuggable, we need to deal with these registers differently. For @@ -6549,13 +6537,16 @@ void ParallelMoveResolverARMVIXL::Exchange(vixl32::Register reg, int mem) { void ParallelMoveResolverARMVIXL::Exchange(int mem1, int mem2) { // TODO(VIXL32): Double check the performance of this implementation. UseScratchRegisterScope temps(GetAssembler()->GetVIXLAssembler()); - vixl32::SRegister temp_1 = temps.AcquireS(); - vixl32::SRegister temp_2 = temps.AcquireS(); + vixl32::Register temp1 = temps.Acquire(); + ScratchRegisterScope ensure_scratch( + this, temp1.GetCode(), r0.GetCode(), codegen_->GetNumberOfCoreRegisters()); + vixl32::Register temp2(ensure_scratch.GetRegister()); - __ Vldr(temp_1, MemOperand(sp, mem1)); - __ Vldr(temp_2, MemOperand(sp, mem2)); - __ Vstr(temp_1, MemOperand(sp, mem2)); - __ Vstr(temp_2, MemOperand(sp, mem1)); + int stack_offset = ensure_scratch.IsSpilled() ? kArmWordSize : 0; + GetAssembler()->LoadFromOffset(kLoadWord, temp1, sp, mem1 + stack_offset); + GetAssembler()->LoadFromOffset(kLoadWord, temp2, sp, mem2 + stack_offset); + GetAssembler()->StoreToOffset(kStoreWord, temp1, sp, mem2 + stack_offset); + GetAssembler()->StoreToOffset(kStoreWord, temp2, sp, mem1 + stack_offset); } void ParallelMoveResolverARMVIXL::EmitSwap(size_t index) { @@ -6578,7 +6569,7 @@ void ParallelMoveResolverARMVIXL::EmitSwap(size_t index) { } else if (source.IsStackSlot() && destination.IsStackSlot()) { Exchange(source.GetStackIndex(), destination.GetStackIndex()); } else if (source.IsFpuRegister() && destination.IsFpuRegister()) { - vixl32::SRegister temp = temps.AcquireS(); + vixl32::Register temp = temps.Acquire(); __ Vmov(temp, SRegisterFrom(source)); __ Vmov(SRegisterFrom(source), SRegisterFrom(destination)); __ Vmov(SRegisterFrom(destination), temp); @@ -6637,12 +6628,12 @@ void ParallelMoveResolverARMVIXL::EmitSwap(size_t index) { } } -void ParallelMoveResolverARMVIXL::SpillScratch(int reg ATTRIBUTE_UNUSED) { - TODO_VIXL32(FATAL); +void ParallelMoveResolverARMVIXL::SpillScratch(int reg) { + __ Push(vixl32::Register(reg)); } -void ParallelMoveResolverARMVIXL::RestoreScratch(int reg ATTRIBUTE_UNUSED) { - TODO_VIXL32(FATAL); +void ParallelMoveResolverARMVIXL::RestoreScratch(int reg) { + __ Pop(vixl32::Register(reg)); } HLoadClass::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadClassKind( diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 02c3ad6e39..5246dbc5cb 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -1681,6 +1681,25 @@ void InstructionCodeGeneratorMIPS64::VisitArrayLength(HArrayLength* instruction) } } +Location LocationsBuilderMIPS64::RegisterOrZeroConstant(HInstruction* instruction) { + return (instruction->IsConstant() && instruction->AsConstant()->IsZeroBitPattern()) + ? Location::ConstantLocation(instruction->AsConstant()) + : Location::RequiresRegister(); +} + +Location LocationsBuilderMIPS64::FpuRegisterOrConstantForStore(HInstruction* instruction) { + // We can store 0.0 directly (from the ZERO register) without loading it into an FPU register. + // We can store a non-zero float or double constant without first loading it into the FPU, + // but we should only prefer this if the constant has a single use. + if (instruction->IsConstant() && + (instruction->AsConstant()->IsZeroBitPattern() || + instruction->GetUses().HasExactlyOneElement())) { + return Location::ConstantLocation(instruction->AsConstant()); + // Otherwise fall through and require an FPU register for the constant. + } + return Location::RequiresFpuRegister(); +} + void LocationsBuilderMIPS64::VisitArraySet(HArraySet* instruction) { bool needs_runtime_call = instruction->NeedsTypeCheck(); LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary( @@ -1695,9 +1714,9 @@ void LocationsBuilderMIPS64::VisitArraySet(HArraySet* instruction) { locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); if (Primitive::IsFloatingPointType(instruction->InputAt(2)->GetType())) { - locations->SetInAt(2, Location::RequiresFpuRegister()); + locations->SetInAt(2, FpuRegisterOrConstantForStore(instruction->InputAt(2))); } else { - locations->SetInAt(2, Location::RequiresRegister()); + locations->SetInAt(2, RegisterOrZeroConstant(instruction->InputAt(2))); } } } @@ -1706,24 +1725,29 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) { LocationSummary* locations = instruction->GetLocations(); GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>(); Location index = locations->InAt(1); + Location value_location = locations->InAt(2); Primitive::Type value_type = instruction->GetComponentType(); bool needs_runtime_call = locations->WillCall(); bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(value_type, instruction->GetValue()); auto null_checker = GetImplicitNullChecker(instruction, codegen_); + GpuRegister base_reg = index.IsConstant() ? obj : TMP; switch (value_type) { case Primitive::kPrimBoolean: case Primitive::kPrimByte: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value(); - GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>(); if (index.IsConstant()) { - size_t offset = - (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset; - __ StoreToOffset(kStoreByte, value, obj, offset, null_checker); + data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1; } else { - __ Daddu(TMP, obj, index.AsRegister<GpuRegister>()); - __ StoreToOffset(kStoreByte, value, TMP, data_offset, null_checker); + __ Daddu(base_reg, obj, index.AsRegister<GpuRegister>()); + } + if (value_location.IsConstant()) { + int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant()); + __ StoreConstToOffset(kStoreByte, value, base_reg, data_offset, TMP, null_checker); + } else { + GpuRegister value = value_location.AsRegister<GpuRegister>(); + __ StoreToOffset(kStoreByte, value, base_reg, data_offset, null_checker); } break; } @@ -1731,15 +1755,18 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) { case Primitive::kPrimShort: case Primitive::kPrimChar: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value(); - GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>(); if (index.IsConstant()) { - size_t offset = - (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset; - __ StoreToOffset(kStoreHalfword, value, obj, offset, null_checker); + data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2; } else { - __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_2); - __ Daddu(TMP, obj, TMP); - __ StoreToOffset(kStoreHalfword, value, TMP, data_offset, null_checker); + __ Dsll(base_reg, index.AsRegister<GpuRegister>(), TIMES_2); + __ Daddu(base_reg, obj, base_reg); + } + if (value_location.IsConstant()) { + int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant()); + __ StoreConstToOffset(kStoreHalfword, value, base_reg, data_offset, TMP, null_checker); + } else { + GpuRegister value = value_location.AsRegister<GpuRegister>(); + __ StoreToOffset(kStoreHalfword, value, base_reg, data_offset, null_checker); } break; } @@ -1748,54 +1775,57 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) { case Primitive::kPrimNot: { if (!needs_runtime_call) { uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value(); - GpuRegister base_reg; - GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>(); if (index.IsConstant()) { data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4; - base_reg = obj; } else { DCHECK(index.IsRegister()) << index; - __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4); - __ Daddu(TMP, obj, TMP); - base_reg = TMP; + __ Dsll(base_reg, index.AsRegister<GpuRegister>(), TIMES_4); + __ Daddu(base_reg, obj, base_reg); } - if (kPoisonHeapReferences && needs_write_barrier) { - // Note that in the case where `value` is a null reference, - // we do not enter this block, as a null reference does not - // need poisoning. - DCHECK_EQ(value_type, Primitive::kPrimNot); - // Use Sw() instead of StoreToOffset() in order to be able to - // hold the poisoned reference in AT and thus avoid allocating - // yet another temporary register. - if (index.IsConstant()) { - if (!IsInt<16>(static_cast<int32_t>(data_offset))) { - int16_t low16 = Low16Bits(data_offset); - // For consistency with StoreToOffset() and such treat data_offset as int32_t. - uint64_t high48 = static_cast<uint64_t>(static_cast<int32_t>(data_offset)) - low16; - int16_t upper16 = High16Bits(high48); - // Allow the full [-2GB,+2GB) range in case `low16` is negative and needs a - // compensatory 64KB added, which may push `high48` above 2GB and require - // the dahi instruction. - int16_t higher16 = High32Bits(high48) + ((upper16 < 0) ? 1 : 0); - __ Daui(TMP, obj, upper16); - if (higher16 != 0) { - __ Dahi(TMP, higher16); + if (value_location.IsConstant()) { + int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant()); + __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker); + DCHECK(!needs_write_barrier); + } else { + GpuRegister value = value_location.AsRegister<GpuRegister>(); + if (kPoisonHeapReferences && needs_write_barrier) { + // Note that in the case where `value` is a null reference, + // we do not enter this block, as a null reference does not + // need poisoning. + DCHECK_EQ(value_type, Primitive::kPrimNot); + // Use Sw() instead of StoreToOffset() in order to be able to + // hold the poisoned reference in AT and thus avoid allocating + // yet another temporary register. + if (index.IsConstant()) { + if (!IsInt<16>(static_cast<int32_t>(data_offset))) { + int16_t low16 = Low16Bits(data_offset); + // For consistency with StoreToOffset() and such treat data_offset as int32_t. + uint64_t high48 = static_cast<uint64_t>(static_cast<int32_t>(data_offset)) - low16; + int16_t upper16 = High16Bits(high48); + // Allow the full [-2GB,+2GB) range in case `low16` is negative and needs a + // compensatory 64KB added, which may push `high48` above 2GB and require + // the dahi instruction. + int16_t higher16 = High32Bits(high48) + ((upper16 < 0) ? 1 : 0); + __ Daui(TMP, obj, upper16); + if (higher16 != 0) { + __ Dahi(TMP, higher16); + } + base_reg = TMP; + data_offset = low16; } - base_reg = TMP; - data_offset = low16; + } else { + DCHECK(IsInt<16>(static_cast<int32_t>(data_offset))); } + __ PoisonHeapReference(AT, value); + __ Sw(AT, base_reg, data_offset); + null_checker(); } else { - DCHECK(IsInt<16>(static_cast<int32_t>(data_offset))); + __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker); + } + if (needs_write_barrier) { + DCHECK_EQ(value_type, Primitive::kPrimNot); + codegen_->MarkGCCard(obj, value, instruction->GetValueCanBeNull()); } - __ PoisonHeapReference(AT, value); - __ Sw(AT, base_reg, data_offset); - null_checker(); - } else { - __ StoreToOffset(kStoreWord, value, base_reg, data_offset, null_checker); - } - if (needs_write_barrier) { - DCHECK_EQ(value_type, Primitive::kPrimNot); - codegen_->MarkGCCard(obj, value, instruction->GetValueCanBeNull()); } } else { DCHECK_EQ(value_type, Primitive::kPrimNot); @@ -1809,47 +1839,54 @@ void InstructionCodeGeneratorMIPS64::VisitArraySet(HArraySet* instruction) { case Primitive::kPrimLong: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value(); - GpuRegister value = locations->InAt(2).AsRegister<GpuRegister>(); if (index.IsConstant()) { - size_t offset = - (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset; - __ StoreToOffset(kStoreDoubleword, value, obj, offset, null_checker); + data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8; } else { - __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8); - __ Daddu(TMP, obj, TMP); - __ StoreToOffset(kStoreDoubleword, value, TMP, data_offset, null_checker); + __ Dsll(base_reg, index.AsRegister<GpuRegister>(), TIMES_8); + __ Daddu(base_reg, obj, base_reg); + } + if (value_location.IsConstant()) { + int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant()); + __ StoreConstToOffset(kStoreDoubleword, value, base_reg, data_offset, TMP, null_checker); + } else { + GpuRegister value = value_location.AsRegister<GpuRegister>(); + __ StoreToOffset(kStoreDoubleword, value, base_reg, data_offset, null_checker); } break; } case Primitive::kPrimFloat: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value(); - FpuRegister value = locations->InAt(2).AsFpuRegister<FpuRegister>(); - DCHECK(locations->InAt(2).IsFpuRegister()); if (index.IsConstant()) { - size_t offset = - (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset; - __ StoreFpuToOffset(kStoreWord, value, obj, offset, null_checker); + data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4; } else { - __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_4); - __ Daddu(TMP, obj, TMP); - __ StoreFpuToOffset(kStoreWord, value, TMP, data_offset, null_checker); + __ Dsll(base_reg, index.AsRegister<GpuRegister>(), TIMES_4); + __ Daddu(base_reg, obj, base_reg); + } + if (value_location.IsConstant()) { + int32_t value = CodeGenerator::GetInt32ValueOf(value_location.GetConstant()); + __ StoreConstToOffset(kStoreWord, value, base_reg, data_offset, TMP, null_checker); + } else { + FpuRegister value = value_location.AsFpuRegister<FpuRegister>(); + __ StoreFpuToOffset(kStoreWord, value, base_reg, data_offset, null_checker); } break; } case Primitive::kPrimDouble: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value(); - FpuRegister value = locations->InAt(2).AsFpuRegister<FpuRegister>(); - DCHECK(locations->InAt(2).IsFpuRegister()); if (index.IsConstant()) { - size_t offset = - (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset; - __ StoreFpuToOffset(kStoreDoubleword, value, obj, offset, null_checker); + data_offset += index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8; } else { - __ Dsll(TMP, index.AsRegister<GpuRegister>(), TIMES_8); - __ Daddu(TMP, obj, TMP); - __ StoreFpuToOffset(kStoreDoubleword, value, TMP, data_offset, null_checker); + __ Dsll(base_reg, index.AsRegister<GpuRegister>(), TIMES_8); + __ Daddu(base_reg, obj, base_reg); + } + if (value_location.IsConstant()) { + int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant()); + __ StoreConstToOffset(kStoreDoubleword, value, base_reg, data_offset, TMP, null_checker); + } else { + FpuRegister value = value_location.AsFpuRegister<FpuRegister>(); + __ StoreFpuToOffset(kStoreDoubleword, value, base_reg, data_offset, null_checker); } break; } @@ -3326,9 +3363,9 @@ void LocationsBuilderMIPS64::HandleFieldSet(HInstruction* instruction, new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); locations->SetInAt(0, Location::RequiresRegister()); if (Primitive::IsFloatingPointType(instruction->InputAt(1)->GetType())) { - locations->SetInAt(1, Location::RequiresFpuRegister()); + locations->SetInAt(1, FpuRegisterOrConstantForStore(instruction->InputAt(1))); } else { - locations->SetInAt(1, Location::RequiresRegister()); + locations->SetInAt(1, RegisterOrZeroConstant(instruction->InputAt(1))); } } @@ -3338,6 +3375,7 @@ void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction, Primitive::Type type = field_info.GetFieldType(); LocationSummary* locations = instruction->GetLocations(); GpuRegister obj = locations->InAt(0).AsRegister<GpuRegister>(); + Location value_location = locations->InAt(1); StoreOperandType store_type = kStoreByte; uint32_t offset = field_info.GetFieldOffset().Uint32Value(); bool needs_write_barrier = CodeGenerator::StoreNeedsWriteBarrier(type, instruction->InputAt(1)); @@ -3365,29 +3403,34 @@ void InstructionCodeGeneratorMIPS64::HandleFieldSet(HInstruction* instruction, LOG(FATAL) << "Unreachable type " << type; UNREACHABLE(); } - if (!Primitive::IsFloatingPointType(type)) { - DCHECK(locations->InAt(1).IsRegister()); - GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>(); - if (kPoisonHeapReferences && needs_write_barrier) { - // Note that in the case where `value` is a null reference, - // we do not enter this block, as a null reference does not - // need poisoning. - DCHECK_EQ(type, Primitive::kPrimNot); - __ PoisonHeapReference(TMP, src); - __ StoreToOffset(store_type, TMP, obj, offset, null_checker); + + if (value_location.IsConstant()) { + int64_t value = CodeGenerator::GetInt64ValueOf(value_location.GetConstant()); + __ StoreConstToOffset(store_type, value, obj, offset, TMP, null_checker); + } else { + if (!Primitive::IsFloatingPointType(type)) { + DCHECK(value_location.IsRegister()); + GpuRegister src = value_location.AsRegister<GpuRegister>(); + if (kPoisonHeapReferences && needs_write_barrier) { + // Note that in the case where `value` is a null reference, + // we do not enter this block, as a null reference does not + // need poisoning. + DCHECK_EQ(type, Primitive::kPrimNot); + __ PoisonHeapReference(TMP, src); + __ StoreToOffset(store_type, TMP, obj, offset, null_checker); + } else { + __ StoreToOffset(store_type, src, obj, offset, null_checker); + } } else { - __ StoreToOffset(store_type, src, obj, offset, null_checker); + DCHECK(value_location.IsFpuRegister()); + FpuRegister src = value_location.AsFpuRegister<FpuRegister>(); + __ StoreFpuToOffset(store_type, src, obj, offset, null_checker); } - } else { - DCHECK(locations->InAt(1).IsFpuRegister()); - FpuRegister src = locations->InAt(1).AsFpuRegister<FpuRegister>(); - __ StoreFpuToOffset(store_type, src, obj, offset, null_checker); } - // TODO: memory barriers? if (needs_write_barrier) { - DCHECK(locations->InAt(1).IsRegister()); - GpuRegister src = locations->InAt(1).AsRegister<GpuRegister>(); + DCHECK(value_location.IsRegister()); + GpuRegister src = value_location.AsRegister<GpuRegister>(); codegen_->MarkGCCard(obj, src, value_can_be_null); } } @@ -5067,12 +5110,34 @@ void InstructionCodeGeneratorMIPS64::VisitPackedSwitch(HPackedSwitch* switch_ins } } -void LocationsBuilderMIPS64::VisitClassTableGet(HClassTableGet*) { - UNIMPLEMENTED(FATAL) << "ClassTableGet is unimplemented on mips64"; +void LocationsBuilderMIPS64::VisitClassTableGet(HClassTableGet* instruction) { + LocationSummary* locations = + new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetOut(Location::RequiresRegister()); } -void InstructionCodeGeneratorMIPS64::VisitClassTableGet(HClassTableGet*) { - UNIMPLEMENTED(FATAL) << "ClassTableGet is unimplemented on mips64"; +void InstructionCodeGeneratorMIPS64::VisitClassTableGet(HClassTableGet* instruction) { + LocationSummary* locations = instruction->GetLocations(); + if (instruction->GetTableKind() == HClassTableGet::TableKind::kVTable) { + uint32_t method_offset = mirror::Class::EmbeddedVTableEntryOffset( + instruction->GetIndex(), kMips64PointerSize).SizeValue(); + __ LoadFromOffset(kLoadDoubleword, + locations->Out().AsRegister<GpuRegister>(), + locations->InAt(0).AsRegister<GpuRegister>(), + method_offset); + } else { + uint32_t method_offset = static_cast<uint32_t>(ImTable::OffsetOfElement( + instruction->GetIndex(), kMips64PointerSize)); + __ LoadFromOffset(kLoadDoubleword, + locations->Out().AsRegister<GpuRegister>(), + locations->InAt(0).AsRegister<GpuRegister>(), + mirror::Class::ImtPtrOffset(kMips64PointerSize).Uint32Value()); + __ LoadFromOffset(kLoadDoubleword, + locations->Out().AsRegister<GpuRegister>(), + locations->Out().AsRegister<GpuRegister>(), + method_offset); + } } } // namespace mips64 diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h index 3056f7f464..6040dc9492 100644 --- a/compiler/optimizing/code_generator_mips64.h +++ b/compiler/optimizing/code_generator_mips64.h @@ -189,6 +189,8 @@ class LocationsBuilderMIPS64 : public HGraphVisitor { void HandleShift(HBinaryOperation* operation); void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info); void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info); + Location RegisterOrZeroConstant(HInstruction* instruction); + Location FpuRegisterOrConstantForStore(HInstruction* instruction); InvokeDexCallingConventionVisitorMIPS64 parameter_visitor_; diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 644fceebe4..08f1adfcff 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -983,7 +983,7 @@ Location CodeGeneratorX86_64::GenerateCalleeMethodStaticOrDirectCall(HInvokeStat callee_method = invoke->GetLocations()->InAt(invoke->GetSpecialInputIndex()); break; case HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress: - __ movq(temp.AsRegister<CpuRegister>(), Immediate(invoke->GetMethodAddress())); + Load64BitValue(temp.AsRegister<CpuRegister>(), invoke->GetMethodAddress()); break; case HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative: { __ movq(temp.AsRegister<CpuRegister>(), @@ -5531,7 +5531,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S uint32_t address = dchecked_integral_cast<uint32_t>( reinterpret_cast<uintptr_t>(cls->GetClass().Get())); DCHECK_NE(address, 0u); - __ movl(out, Immediate(address)); // Zero-extended. + __ movl(out, Immediate(static_cast<int32_t>(address))); // Zero-extended. break; } case HLoadClass::LoadKind::kBssEntry: { @@ -5666,7 +5666,7 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) NO_THREA uint32_t address = dchecked_integral_cast<uint32_t>( reinterpret_cast<uintptr_t>(load->GetString().Get())); DCHECK_NE(address, 0u); - __ movl(out, Immediate(address)); // Zero-extended. + __ movl(out, Immediate(static_cast<int32_t>(address))); // Zero-extended. return; // No dex cache slow path. } case HLoadString::LoadKind::kBssEntry: { diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 583008bbe8..62f5114e59 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -55,6 +55,9 @@ static constexpr size_t kMaximumNumberOfCumulatedDexRegisters = 64; // Avoid inlining within a huge method due to memory pressure. static constexpr size_t kMaximumCodeUnitSize = 4096; +// Controls the use of inline caches in AOT mode. +static constexpr bool kUseAOTInlineCaches = false; + void HInliner::Run() { const CompilerOptions& compiler_options = compiler_driver_->GetCompilerOptions(); if ((compiler_options.GetInlineDepthLimit() == 0) @@ -376,6 +379,10 @@ bool HInliner::TryInlineFromInlineCache(const DexFile& caller_dex_file, HInvoke* invoke_instruction, ArtMethod* resolved_method) REQUIRES_SHARED(Locks::mutator_lock_) { + if (Runtime::Current()->IsAotCompiler() && !kUseAOTInlineCaches) { + return false; + } + StackHandleScope<1> hs(Thread::Current()); Handle<mirror::ObjectArray<mirror::Class>> inline_cache; InlineCacheType inline_cache_type = Runtime::Current()->IsAotCompiler() @@ -892,10 +899,6 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget( return false; } - if (graph_->GetInstructionSet() == kMips64) { - // TODO: Support HClassTableGet for mips64. - return false; - } ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker(); PointerSize pointer_size = class_linker->GetImagePointerSize(); diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc index 28095c4d3f..98b80f5d3c 100644 --- a/compiler/optimizing/intrinsics_arm.cc +++ b/compiler/optimizing/intrinsics_arm.cc @@ -41,6 +41,54 @@ ArenaAllocator* IntrinsicCodeGeneratorARM::GetAllocator() { using IntrinsicSlowPathARM = IntrinsicSlowPath<InvokeDexCallingConventionVisitorARM>; +#define __ assembler-> + +// Compute base address for the System.arraycopy intrinsic in `base`. +static void GenSystemArrayCopyBaseAddress(ArmAssembler* assembler, + Primitive::Type type, + const Register& array, + const Location& pos, + const Register& base) { + // This routine is only used by the SystemArrayCopy intrinsic at the + // moment. We can allow Primitive::kPrimNot as `type` to implement + // the SystemArrayCopyChar intrinsic. + DCHECK_EQ(type, Primitive::kPrimNot); + const int32_t element_size = Primitive::ComponentSize(type); + const uint32_t element_size_shift = Primitive::ComponentSizeShift(type); + const uint32_t data_offset = mirror::Array::DataOffset(element_size).Uint32Value(); + + if (pos.IsConstant()) { + int32_t constant = pos.GetConstant()->AsIntConstant()->GetValue(); + __ AddConstant(base, array, element_size * constant + data_offset); + } else { + __ add(base, array, ShifterOperand(pos.AsRegister<Register>(), LSL, element_size_shift)); + __ AddConstant(base, data_offset); + } +} + +// Compute end address for the System.arraycopy intrinsic in `end`. +static void GenSystemArrayCopyEndAddress(ArmAssembler* assembler, + Primitive::Type type, + const Location& copy_length, + const Register& base, + const Register& end) { + // This routine is only used by the SystemArrayCopy intrinsic at the + // moment. We can allow Primitive::kPrimNot as `type` to implement + // the SystemArrayCopyChar intrinsic. + DCHECK_EQ(type, Primitive::kPrimNot); + const int32_t element_size = Primitive::ComponentSize(type); + const uint32_t element_size_shift = Primitive::ComponentSizeShift(type); + + if (copy_length.IsConstant()) { + int32_t constant = copy_length.GetConstant()->AsIntConstant()->GetValue(); + __ AddConstant(end, base, element_size * constant); + } else { + __ add(end, base, ShifterOperand(copy_length.AsRegister<Register>(), LSL, element_size_shift)); + } +} + +#undef __ + // NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy. #define __ down_cast<ArmAssembler*>(codegen->GetAssembler())-> // NOLINT @@ -55,6 +103,7 @@ class ReadBarrierSystemArrayCopySlowPathARM : public SlowPathCode { void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); + ArmAssembler* assembler = arm_codegen->GetAssembler(); LocationSummary* locations = instruction_->GetLocations(); DCHECK(locations->CanCall()); DCHECK(instruction_->IsInvokeStaticOrDirect()) @@ -63,9 +112,8 @@ class ReadBarrierSystemArrayCopySlowPathARM : public SlowPathCode { DCHECK(instruction_->GetLocations()->Intrinsified()); DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kSystemArrayCopy); - int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot); - uint32_t element_size_shift = Primitive::ComponentSizeShift(Primitive::kPrimNot); - uint32_t offset = mirror::Array::DataOffset(element_size).Uint32Value(); + Primitive::Type type = Primitive::kPrimNot; + const int32_t element_size = Primitive::ComponentSize(type); Register dest = locations->InAt(2).AsRegister<Register>(); Location dest_pos = locations->InAt(3); @@ -76,15 +124,7 @@ class ReadBarrierSystemArrayCopySlowPathARM : public SlowPathCode { __ Bind(GetEntryLabel()); // Compute the base destination address in `dst_curr_addr`. - if (dest_pos.IsConstant()) { - int32_t constant = dest_pos.GetConstant()->AsIntConstant()->GetValue(); - __ AddConstant(dst_curr_addr, dest, element_size * constant + offset); - } else { - __ add(dst_curr_addr, - dest, - ShifterOperand(dest_pos.AsRegister<Register>(), LSL, element_size_shift)); - __ AddConstant(dst_curr_addr, offset); - } + GenSystemArrayCopyBaseAddress(assembler, type, dest, dest_pos, dst_curr_addr); Label loop; __ Bind(&loop); @@ -108,6 +148,8 @@ class ReadBarrierSystemArrayCopySlowPathARM : public SlowPathCode { DCHECK_NE(src_stop_addr, IP); DCHECK_NE(tmp, IP); DCHECK(0 <= tmp && tmp < kNumberOfCoreRegisters) << tmp; + // TODO: Load the entrypoint once before the loop, instead of + // loading it at every iteration. int32_t entry_point_offset = CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArmPointerSize>(tmp); // This runtime call does not require a stack map. @@ -1925,138 +1967,113 @@ void IntrinsicCodeGeneratorARM::VisitSystemArrayCopy(HInvoke* invoke) { __ CompareAndBranchIfNonZero(temp3, intrinsic_slow_path->GetEntryLabel()); } - int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot); - uint32_t element_size_shift = Primitive::ComponentSizeShift(Primitive::kPrimNot); - uint32_t offset = mirror::Array::DataOffset(element_size).Uint32Value(); - - // Compute the base source address in `temp1`. - if (src_pos.IsConstant()) { - int32_t constant = src_pos.GetConstant()->AsIntConstant()->GetValue(); - __ AddConstant(temp1, src, element_size * constant + offset); - } else { - __ add(temp1, src, ShifterOperand(src_pos.AsRegister<Register>(), LSL, element_size_shift)); - __ AddConstant(temp1, offset); - } - - // Compute the end source address in `temp3`. - if (length.IsConstant()) { - int32_t constant = length.GetConstant()->AsIntConstant()->GetValue(); - __ AddConstant(temp3, temp1, element_size * constant); + if (length.IsConstant() && length.GetConstant()->AsIntConstant()->GetValue() == 0) { + // Null constant length: not need to emit the loop code at all. } else { - __ add(temp3, temp1, ShifterOperand(length.AsRegister<Register>(), LSL, element_size_shift)); - } + Label done; + const Primitive::Type type = Primitive::kPrimNot; + const int32_t element_size = Primitive::ComponentSize(type); - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { - // TODO: Also convert this intrinsic to the IsGcMarking strategy? - - // The base destination address is computed later, as `temp2` is - // used for intermediate computations. - - // SystemArrayCopy implementation for Baker read barriers (see - // also CodeGeneratorARM::GenerateReferenceLoadWithBakerReadBarrier): - // - // if (src_ptr != end_ptr) { - // uint32_t rb_state = Lockword(src->monitor_).ReadBarrierState(); - // lfence; // Load fence or artificial data dependency to prevent load-load reordering - // bool is_gray = (rb_state == ReadBarrier::GrayState()); - // if (is_gray) { - // // Slow-path copy. - // do { - // *dest_ptr++ = MaybePoison(ReadBarrier::Mark(MaybeUnpoison(*src_ptr++))); - // } while (src_ptr != end_ptr) - // } else { - // // Fast-path copy. - // do { - // *dest_ptr++ = *src_ptr++; - // } while (src_ptr != end_ptr) - // } - // } - - Label loop, done; - - // Don't enter copy loop if `length == 0`. - __ cmp(temp1, ShifterOperand(temp3)); - __ b(&done, EQ); - - // /* int32_t */ monitor = src->monitor_ - __ LoadFromOffset(kLoadWord, temp2, src, monitor_offset); - // /* LockWord */ lock_word = LockWord(monitor) - static_assert(sizeof(LockWord) == sizeof(int32_t), - "art::LockWord and int32_t have different sizes."); - - // Introduce a dependency on the lock_word including the rb_state, - // which shall prevent load-load reordering without using - // a memory barrier (which would be more expensive). - // `src` is unchanged by this operation, but its value now depends - // on `temp2`. - __ add(src, src, ShifterOperand(temp2, LSR, 32)); - - // Slow path used to copy array when `src` is gray. - SlowPathCode* read_barrier_slow_path = - new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathARM(invoke); - codegen_->AddSlowPath(read_barrier_slow_path); - - // Given the numeric representation, it's enough to check the low bit of the - // rb_state. We do that by shifting the bit out of the lock word with LSRS - // which can be a 16-bit instruction unlike the TST immediate. - static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0"); - static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1"); - __ Lsrs(temp2, temp2, LockWord::kReadBarrierStateShift + 1); - // Carry flag is the last bit shifted out by LSRS. - __ b(read_barrier_slow_path->GetEntryLabel(), CS); - - // Fast-path copy. - - // Compute the base destination address in `temp2`. - if (dest_pos.IsConstant()) { - int32_t constant = dest_pos.GetConstant()->AsIntConstant()->GetValue(); - __ AddConstant(temp2, dest, element_size * constant + offset); - } else { - __ add(temp2, dest, ShifterOperand(dest_pos.AsRegister<Register>(), LSL, element_size_shift)); - __ AddConstant(temp2, offset); + if (length.IsRegister()) { + // Don't enter the copy loop if the length is null. + __ CompareAndBranchIfZero(length.AsRegister<Register>(), &done); } - // Iterate over the arrays and do a raw copy of the objects. We don't need to - // poison/unpoison. - __ Bind(&loop); - __ ldr(IP, Address(temp1, element_size, Address::PostIndex)); - __ str(IP, Address(temp2, element_size, Address::PostIndex)); - __ cmp(temp1, ShifterOperand(temp3)); - __ b(&loop, NE); - - __ Bind(read_barrier_slow_path->GetExitLabel()); - __ Bind(&done); - } else { - // Non read barrier code. - - // Compute the base destination address in `temp2`. - if (dest_pos.IsConstant()) { - int32_t constant = dest_pos.GetConstant()->AsIntConstant()->GetValue(); - __ AddConstant(temp2, dest, element_size * constant + offset); + if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + // TODO: Also convert this intrinsic to the IsGcMarking strategy? + + // SystemArrayCopy implementation for Baker read barriers (see + // also CodeGeneratorARM::GenerateReferenceLoadWithBakerReadBarrier): + // + // uint32_t rb_state = Lockword(src->monitor_).ReadBarrierState(); + // lfence; // Load fence or artificial data dependency to prevent load-load reordering + // bool is_gray = (rb_state == ReadBarrier::GrayState()); + // if (is_gray) { + // // Slow-path copy. + // do { + // *dest_ptr++ = MaybePoison(ReadBarrier::Mark(MaybeUnpoison(*src_ptr++))); + // } while (src_ptr != end_ptr) + // } else { + // // Fast-path copy. + // do { + // *dest_ptr++ = *src_ptr++; + // } while (src_ptr != end_ptr) + // } + + // /* int32_t */ monitor = src->monitor_ + __ LoadFromOffset(kLoadWord, temp2, src, monitor_offset); + // /* LockWord */ lock_word = LockWord(monitor) + static_assert(sizeof(LockWord) == sizeof(int32_t), + "art::LockWord and int32_t have different sizes."); + + // Introduce a dependency on the lock_word including the rb_state, + // which shall prevent load-load reordering without using + // a memory barrier (which would be more expensive). + // `src` is unchanged by this operation, but its value now depends + // on `temp2`. + __ add(src, src, ShifterOperand(temp2, LSR, 32)); + + // Compute the base source address in `temp1`. + // Note that `temp1` (the base source address) is computed from + // `src` (and `src_pos`) here, and thus honors the artificial + // dependency of `src` on `temp2`. + GenSystemArrayCopyBaseAddress(GetAssembler(), type, src, src_pos, temp1); + // Compute the end source address in `temp3`. + GenSystemArrayCopyEndAddress(GetAssembler(), type, length, temp1, temp3); + // The base destination address is computed later, as `temp2` is + // used for intermediate computations. + + // Slow path used to copy array when `src` is gray. + // Note that the base destination address is computed in `temp2` + // by the slow path code. + SlowPathCode* read_barrier_slow_path = + new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathARM(invoke); + codegen_->AddSlowPath(read_barrier_slow_path); + + // Given the numeric representation, it's enough to check the low bit of the + // rb_state. We do that by shifting the bit out of the lock word with LSRS + // which can be a 16-bit instruction unlike the TST immediate. + static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0"); + static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1"); + __ Lsrs(temp2, temp2, LockWord::kReadBarrierStateShift + 1); + // Carry flag is the last bit shifted out by LSRS. + __ b(read_barrier_slow_path->GetEntryLabel(), CS); + + // Fast-path copy. + // Compute the base destination address in `temp2`. + GenSystemArrayCopyBaseAddress(GetAssembler(), type, dest, dest_pos, temp2); + // Iterate over the arrays and do a raw copy of the objects. We don't need to + // poison/unpoison. + Label loop; + __ Bind(&loop); + __ ldr(IP, Address(temp1, element_size, Address::PostIndex)); + __ str(IP, Address(temp2, element_size, Address::PostIndex)); + __ cmp(temp1, ShifterOperand(temp3)); + __ b(&loop, NE); + + __ Bind(read_barrier_slow_path->GetExitLabel()); } else { - __ add(temp2, dest, ShifterOperand(dest_pos.AsRegister<Register>(), LSL, element_size_shift)); - __ AddConstant(temp2, offset); + // Non read barrier code. + // Compute the base source address in `temp1`. + GenSystemArrayCopyBaseAddress(GetAssembler(), type, src, src_pos, temp1); + // Compute the base destination address in `temp2`. + GenSystemArrayCopyBaseAddress(GetAssembler(), type, dest, dest_pos, temp2); + // Compute the end source address in `temp3`. + GenSystemArrayCopyEndAddress(GetAssembler(), type, length, temp1, temp3); + // Iterate over the arrays and do a raw copy of the objects. We don't need to + // poison/unpoison. + Label loop; + __ Bind(&loop); + __ ldr(IP, Address(temp1, element_size, Address::PostIndex)); + __ str(IP, Address(temp2, element_size, Address::PostIndex)); + __ cmp(temp1, ShifterOperand(temp3)); + __ b(&loop, NE); } - - // Iterate over the arrays and do a raw copy of the objects. We don't need to - // poison/unpoison. - Label loop, done; - __ cmp(temp1, ShifterOperand(temp3)); - __ b(&done, EQ); - __ Bind(&loop); - __ ldr(IP, Address(temp1, element_size, Address::PostIndex)); - __ str(IP, Address(temp2, element_size, Address::PostIndex)); - __ cmp(temp1, ShifterOperand(temp3)); - __ b(&loop, NE); __ Bind(&done); } // We only need one card marking on the destination array. - codegen_->MarkGCCard(temp1, - temp2, - dest, - Register(kNoRegister), - /* value_can_be_null */ false); + codegen_->MarkGCCard(temp1, temp2, dest, Register(kNoRegister), /* value_can_be_null */ false); __ Bind(intrinsic_slow_path->GetExitLabel()); } diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 934ba1b9fb..423fd3c6ae 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -198,6 +198,8 @@ class ReadBarrierSystemArrayCopySlowPathARM64 : public SlowPathCodeARM64 { DCHECK_NE(LocationFrom(src_stop_addr).reg(), IP0); DCHECK_NE(tmp_.reg(), IP0); DCHECK(0 <= tmp_.reg() && tmp_.reg() < kNumberOfWRegisters) << tmp_.reg(); + // TODO: Load the entrypoint once before the loop, instead of + // loading it at every iteration. int32_t entry_point_offset = CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArm64PointerSize>(tmp_.reg()); // This runtime call does not require a stack map. @@ -1560,7 +1562,10 @@ void IntrinsicCodeGeneratorARM64::VisitStringEquals(HInvoke* invoke) { // Load `count` field of the argument string and check if it matches the const string. // Also compares the compression style, if differs return false. __ Ldr(temp, MemOperand(arg.X(), count_offset)); + // Temporarily release temp1 as we may not be able to embed the flagged count in CMP immediate. + scratch_scope.Release(temp1); __ Cmp(temp, Operand(mirror::String::GetFlaggedCount(const_string_length, is_compressed))); + temp1 = scratch_scope.AcquireW(); __ B(&return_false, ne); } else { // Load `count` fields of this and argument strings. @@ -2188,8 +2193,9 @@ static void CheckSystemArrayCopyPosition(MacroAssembler* masm, } } -// Compute base source address, base destination address, and end source address -// for System.arraycopy* intrinsics. +// Compute base source address, base destination address, and end +// source address for System.arraycopy* intrinsics in `src_base`, +// `dst_base` and `src_end` respectively. static void GenSystemArrayCopyAddresses(MacroAssembler* masm, Primitive::Type type, const Register& src, @@ -2200,12 +2206,13 @@ static void GenSystemArrayCopyAddresses(MacroAssembler* masm, const Register& src_base, const Register& dst_base, const Register& src_end) { + // This routine is used by the SystemArrayCopy and the SystemArrayCopyChar intrinsics. DCHECK(type == Primitive::kPrimNot || type == Primitive::kPrimChar) << "Unexpected element type: " << type; const int32_t element_size = Primitive::ComponentSize(type); const int32_t element_size_shift = Primitive::ComponentSizeShift(type); + const uint32_t data_offset = mirror::Array::DataOffset(element_size).Uint32Value(); - uint32_t data_offset = mirror::Array::DataOffset(element_size).Uint32Value(); if (src_pos.IsConstant()) { int32_t constant = src_pos.GetConstant()->AsIntConstant()->GetValue(); __ Add(src_base, src, element_size * constant + data_offset); @@ -2709,111 +2716,131 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) { __ Cbnz(temp2, intrinsic_slow_path->GetEntryLabel()); } - Register src_curr_addr = temp1.X(); - Register dst_curr_addr = temp2.X(); - Register src_stop_addr = temp3.X(); - - GenSystemArrayCopyAddresses(masm, - Primitive::kPrimNot, - src, - src_pos, - dest, - dest_pos, - length, - src_curr_addr, - dst_curr_addr, - src_stop_addr); - - const int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot); - - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { - // TODO: Also convert this intrinsic to the IsGcMarking strategy? - - // SystemArrayCopy implementation for Baker read barriers (see - // also CodeGeneratorARM::GenerateReferenceLoadWithBakerReadBarrier): - // - // if (src_ptr != end_ptr) { - // uint32_t rb_state = Lockword(src->monitor_).ReadBarrierState(); - // lfence; // Load fence or artificial data dependency to prevent load-load reordering - // bool is_gray = (rb_state == ReadBarrier::GrayState()); - // if (is_gray) { - // // Slow-path copy. - // do { - // *dest_ptr++ = MaybePoison(ReadBarrier::Mark(MaybeUnpoison(*src_ptr++))); - // } while (src_ptr != end_ptr) - // } else { - // // Fast-path copy. - // do { - // *dest_ptr++ = *src_ptr++; - // } while (src_ptr != end_ptr) - // } - // } - - vixl::aarch64::Label loop, done; - - // Don't enter copy loop if `length == 0`. - __ Cmp(src_curr_addr, src_stop_addr); - __ B(&done, eq); - - // Make sure `tmp` is not IP0, as it is clobbered by - // ReadBarrierMarkRegX entry points in - // ReadBarrierSystemArrayCopySlowPathARM64. - temps.Exclude(ip0); - Register tmp = temps.AcquireW(); - DCHECK_NE(LocationFrom(tmp).reg(), IP0); - - // /* int32_t */ monitor = src->monitor_ - __ Ldr(tmp, HeapOperand(src.W(), monitor_offset)); - // /* LockWord */ lock_word = LockWord(monitor) - static_assert(sizeof(LockWord) == sizeof(int32_t), - "art::LockWord and int32_t have different sizes."); - - // Introduce a dependency on the lock_word including rb_state, - // to prevent load-load reordering, and without using - // a memory barrier (which would be more expensive). - // `src` is unchanged by this operation, but its value now depends - // on `tmp`. - __ Add(src.X(), src.X(), Operand(tmp.X(), LSR, 32)); - - // Slow path used to copy array when `src` is gray. - SlowPathCodeARM64* read_barrier_slow_path = - new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathARM64(invoke, LocationFrom(tmp)); - codegen_->AddSlowPath(read_barrier_slow_path); - - // Given the numeric representation, it's enough to check the low bit of the rb_state. - static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0"); - static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1"); - __ Tbnz(tmp, LockWord::kReadBarrierStateShift, read_barrier_slow_path->GetEntryLabel()); - - // Fast-path copy. - // Iterate over the arrays and do a raw copy of the objects. We don't need to - // poison/unpoison. - __ Bind(&loop); - __ Ldr(tmp, MemOperand(src_curr_addr, element_size, PostIndex)); - __ Str(tmp, MemOperand(dst_curr_addr, element_size, PostIndex)); - __ Cmp(src_curr_addr, src_stop_addr); - __ B(&loop, ne); - - __ Bind(read_barrier_slow_path->GetExitLabel()); - __ Bind(&done); + if (length.IsConstant() && length.GetConstant()->AsIntConstant()->GetValue() == 0) { + // Null constant length: not need to emit the loop code at all. } else { - // Non read barrier code. - - // Iterate over the arrays and do a raw copy of the objects. We don't need to - // poison/unpoison. - vixl::aarch64::Label loop, done; - __ Bind(&loop); - __ Cmp(src_curr_addr, src_stop_addr); - __ B(&done, eq); - { + Register src_curr_addr = temp1.X(); + Register dst_curr_addr = temp2.X(); + Register src_stop_addr = temp3.X(); + vixl::aarch64::Label done; + const Primitive::Type type = Primitive::kPrimNot; + const int32_t element_size = Primitive::ComponentSize(type); + + if (length.IsRegister()) { + // Don't enter the copy loop if the length is null. + __ Cbz(WRegisterFrom(length), &done); + } + + if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + // TODO: Also convert this intrinsic to the IsGcMarking strategy? + + // SystemArrayCopy implementation for Baker read barriers (see + // also CodeGeneratorARM::GenerateReferenceLoadWithBakerReadBarrier): + // + // uint32_t rb_state = Lockword(src->monitor_).ReadBarrierState(); + // lfence; // Load fence or artificial data dependency to prevent load-load reordering + // bool is_gray = (rb_state == ReadBarrier::GrayState()); + // if (is_gray) { + // // Slow-path copy. + // do { + // *dest_ptr++ = MaybePoison(ReadBarrier::Mark(MaybeUnpoison(*src_ptr++))); + // } while (src_ptr != end_ptr) + // } else { + // // Fast-path copy. + // do { + // *dest_ptr++ = *src_ptr++; + // } while (src_ptr != end_ptr) + // } + + // Make sure `tmp` is not IP0, as it is clobbered by + // ReadBarrierMarkRegX entry points in + // ReadBarrierSystemArrayCopySlowPathARM64. + temps.Exclude(ip0); Register tmp = temps.AcquireW(); + DCHECK_NE(LocationFrom(tmp).reg(), IP0); + + // /* int32_t */ monitor = src->monitor_ + __ Ldr(tmp, HeapOperand(src.W(), monitor_offset)); + // /* LockWord */ lock_word = LockWord(monitor) + static_assert(sizeof(LockWord) == sizeof(int32_t), + "art::LockWord and int32_t have different sizes."); + + // Introduce a dependency on the lock_word including rb_state, + // to prevent load-load reordering, and without using + // a memory barrier (which would be more expensive). + // `src` is unchanged by this operation, but its value now depends + // on `tmp`. + __ Add(src.X(), src.X(), Operand(tmp.X(), LSR, 32)); + + // Compute base source address, base destination address, and end + // source address for System.arraycopy* intrinsics in `src_base`, + // `dst_base` and `src_end` respectively. + // Note that `src_curr_addr` is computed from from `src` (and + // `src_pos`) here, and thus honors the artificial dependency + // of `src` on `tmp`. + GenSystemArrayCopyAddresses(masm, + type, + src, + src_pos, + dest, + dest_pos, + length, + src_curr_addr, + dst_curr_addr, + src_stop_addr); + + // Slow path used to copy array when `src` is gray. + SlowPathCodeARM64* read_barrier_slow_path = + new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathARM64(invoke, LocationFrom(tmp)); + codegen_->AddSlowPath(read_barrier_slow_path); + + // Given the numeric representation, it's enough to check the low bit of the rb_state. + static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0"); + static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1"); + __ Tbnz(tmp, LockWord::kReadBarrierStateShift, read_barrier_slow_path->GetEntryLabel()); + + // Fast-path copy. + // Iterate over the arrays and do a raw copy of the objects. We don't need to + // poison/unpoison. + vixl::aarch64::Label loop; + __ Bind(&loop); __ Ldr(tmp, MemOperand(src_curr_addr, element_size, PostIndex)); __ Str(tmp, MemOperand(dst_curr_addr, element_size, PostIndex)); + __ Cmp(src_curr_addr, src_stop_addr); + __ B(&loop, ne); + + __ Bind(read_barrier_slow_path->GetExitLabel()); + } else { + // Non read barrier code. + // Compute base source address, base destination address, and end + // source address for System.arraycopy* intrinsics in `src_base`, + // `dst_base` and `src_end` respectively. + GenSystemArrayCopyAddresses(masm, + type, + src, + src_pos, + dest, + dest_pos, + length, + src_curr_addr, + dst_curr_addr, + src_stop_addr); + // Iterate over the arrays and do a raw copy of the objects. We don't need to + // poison/unpoison. + vixl::aarch64::Label loop; + __ Bind(&loop); + { + Register tmp = temps.AcquireW(); + __ Ldr(tmp, MemOperand(src_curr_addr, element_size, PostIndex)); + __ Str(tmp, MemOperand(dst_curr_addr, element_size, PostIndex)); + } + __ Cmp(src_curr_addr, src_stop_addr); + __ B(&loop, ne); } - __ B(&loop); __ Bind(&done); } } + // We only need one card marking on the destination array. codegen_->MarkGCCard(dest.W(), Register(), /* value_can_be_null */ false); diff --git a/compiler/optimizing/intrinsics_arm_vixl.cc b/compiler/optimizing/intrinsics_arm_vixl.cc index 60bcf2cfd5..19ff49c6ce 100644 --- a/compiler/optimizing/intrinsics_arm_vixl.cc +++ b/compiler/optimizing/intrinsics_arm_vixl.cc @@ -117,6 +117,50 @@ class IntrinsicSlowPathARMVIXL : public SlowPathCodeARMVIXL { DISALLOW_COPY_AND_ASSIGN(IntrinsicSlowPathARMVIXL); }; +// Compute base address for the System.arraycopy intrinsic in `base`. +static void GenSystemArrayCopyBaseAddress(ArmVIXLAssembler* assembler, + Primitive::Type type, + const vixl32::Register& array, + const Location& pos, + const vixl32::Register& base) { + // This routine is only used by the SystemArrayCopy intrinsic at the + // moment. We can allow Primitive::kPrimNot as `type` to implement + // the SystemArrayCopyChar intrinsic. + DCHECK_EQ(type, Primitive::kPrimNot); + const int32_t element_size = Primitive::ComponentSize(type); + const uint32_t element_size_shift = Primitive::ComponentSizeShift(type); + const uint32_t data_offset = mirror::Array::DataOffset(element_size).Uint32Value(); + + if (pos.IsConstant()) { + int32_t constant = Int32ConstantFrom(pos); + __ Add(base, array, element_size * constant + data_offset); + } else { + __ Add(base, array, Operand(RegisterFrom(pos), vixl32::LSL, element_size_shift)); + __ Add(base, base, data_offset); + } +} + +// Compute end address for the System.arraycopy intrinsic in `end`. +static void GenSystemArrayCopyEndAddress(ArmVIXLAssembler* assembler, + Primitive::Type type, + const Location& copy_length, + const vixl32::Register& base, + const vixl32::Register& end) { + // This routine is only used by the SystemArrayCopy intrinsic at the + // moment. We can allow Primitive::kPrimNot as `type` to implement + // the SystemArrayCopyChar intrinsic. + DCHECK_EQ(type, Primitive::kPrimNot); + const int32_t element_size = Primitive::ComponentSize(type); + const uint32_t element_size_shift = Primitive::ComponentSizeShift(type); + + if (copy_length.IsConstant()) { + int32_t constant = Int32ConstantFrom(copy_length); + __ Add(end, base, element_size * constant); + } else { + __ Add(end, base, Operand(RegisterFrom(copy_length), vixl32::LSL, element_size_shift)); + } +} + // Slow path implementing the SystemArrayCopy intrinsic copy loop with read barriers. class ReadBarrierSystemArrayCopySlowPathARMVIXL : public SlowPathCodeARMVIXL { public: @@ -137,9 +181,8 @@ class ReadBarrierSystemArrayCopySlowPathARMVIXL : public SlowPathCodeARMVIXL { DCHECK(instruction_->GetLocations()->Intrinsified()); DCHECK_EQ(instruction_->AsInvoke()->GetIntrinsic(), Intrinsics::kSystemArrayCopy); - int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot); - uint32_t element_size_shift = Primitive::ComponentSizeShift(Primitive::kPrimNot); - uint32_t offset = mirror::Array::DataOffset(element_size).Uint32Value(); + Primitive::Type type = Primitive::kPrimNot; + const int32_t element_size = Primitive::ComponentSize(type); vixl32::Register dest = InputRegisterAt(instruction_, 2); Location dest_pos = locations->InAt(3); @@ -150,15 +193,7 @@ class ReadBarrierSystemArrayCopySlowPathARMVIXL : public SlowPathCodeARMVIXL { __ Bind(GetEntryLabel()); // Compute the base destination address in `dst_curr_addr`. - if (dest_pos.IsConstant()) { - int32_t constant = Int32ConstantFrom(dest_pos); - __ Add(dst_curr_addr, dest, element_size * constant + offset); - } else { - __ Add(dst_curr_addr, - dest, - Operand(RegisterFrom(dest_pos), vixl32::LSL, element_size_shift)); - __ Add(dst_curr_addr, dst_curr_addr, offset); - } + GenSystemArrayCopyBaseAddress(assembler, type, dest, dest_pos, dst_curr_addr); vixl32::Label loop; __ Bind(&loop); @@ -182,6 +217,8 @@ class ReadBarrierSystemArrayCopySlowPathARMVIXL : public SlowPathCodeARMVIXL { DCHECK(!src_stop_addr.Is(ip)); DCHECK(!tmp.Is(ip)); DCHECK(tmp.IsRegister()) << tmp; + // TODO: Load the entrypoint once before the loop, instead of + // loading it at every iteration. int32_t entry_point_offset = CodeGenerator::GetReadBarrierMarkEntryPointsOffset<kArmPointerSize>(tmp.GetCode()); // This runtime call does not require a stack map. @@ -2243,143 +2280,116 @@ void IntrinsicCodeGeneratorARMVIXL::VisitSystemArrayCopy(HInvoke* invoke) { __ CompareAndBranchIfNonZero(temp3, intrinsic_slow_path->GetEntryLabel()); } - int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot); - uint32_t element_size_shift = Primitive::ComponentSizeShift(Primitive::kPrimNot); - uint32_t offset = mirror::Array::DataOffset(element_size).Uint32Value(); - - // Compute the base source address in `temp1`. - if (src_pos.IsConstant()) { - int32_t constant = Int32ConstantFrom(src_pos); - __ Add(temp1, src, element_size * constant + offset); + if (length.IsConstant() && Int32ConstantFrom(length) == 0) { + // Null constant length: not need to emit the loop code at all. } else { - __ Add(temp1, src, Operand(RegisterFrom(src_pos), vixl32::LSL, element_size_shift)); - __ Add(temp1, temp1, offset); - } - - // Compute the end source address in `temp3`. - if (length.IsConstant()) { - int32_t constant = Int32ConstantFrom(length); - __ Add(temp3, temp1, element_size * constant); - } else { - __ Add(temp3, temp1, Operand(RegisterFrom(length), vixl32::LSL, element_size_shift)); - } - - if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { - // TODO: Also convert this intrinsic to the IsGcMarking strategy? - - // The base destination address is computed later, as `temp2` is - // used for intermediate computations. - - // SystemArrayCopy implementation for Baker read barriers (see - // also CodeGeneratorARM::GenerateReferenceLoadWithBakerReadBarrier): - // - // if (src_ptr != end_ptr) { - // uint32_t rb_state = Lockword(src->monitor_).ReadBarrierState(); - // lfence; // Load fence or artificial data dependency to prevent load-load reordering - // bool is_gray = (rb_state == ReadBarrier::GrayState()); - // if (is_gray) { - // // Slow-path copy. - // do { - // *dest_ptr++ = MaybePoison(ReadBarrier::Mark(MaybeUnpoison(*src_ptr++))); - // } while (src_ptr != end_ptr) - // } else { - // // Fast-path copy. - // do { - // *dest_ptr++ = *src_ptr++; - // } while (src_ptr != end_ptr) - // } - // } - - vixl32::Label loop, done; - - // Don't enter copy loop if `length == 0`. - __ Cmp(temp1, temp3); - __ B(eq, &done, /* far_target */ false); - - // /* int32_t */ monitor = src->monitor_ - __ Ldr(temp2, MemOperand(src, monitor_offset)); - // /* LockWord */ lock_word = LockWord(monitor) - static_assert(sizeof(LockWord) == sizeof(int32_t), - "art::LockWord and int32_t have different sizes."); - - // Introduce a dependency on the lock_word including the rb_state, - // which shall prevent load-load reordering without using - // a memory barrier (which would be more expensive). - // `src` is unchanged by this operation, but its value now depends - // on `temp2`. - __ Add(src, src, Operand(temp2, vixl32::LSR, 32)); - - // Slow path used to copy array when `src` is gray. - SlowPathCodeARMVIXL* read_barrier_slow_path = - new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathARMVIXL(invoke); - codegen_->AddSlowPath(read_barrier_slow_path); - - // Given the numeric representation, it's enough to check the low bit of the - // rb_state. We do that by shifting the bit out of the lock word with LSRS - // which can be a 16-bit instruction unlike the TST immediate. - static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0"); - static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1"); - __ Lsrs(temp2, temp2, LockWord::kReadBarrierStateShift + 1); - // Carry flag is the last bit shifted out by LSRS. - __ B(cs, read_barrier_slow_path->GetEntryLabel()); - - // Fast-path copy. - - // Compute the base destination address in `temp2`. - if (dest_pos.IsConstant()) { - int32_t constant = Int32ConstantFrom(dest_pos); - __ Add(temp2, dest, element_size * constant + offset); - } else { - __ Add(temp2, dest, Operand(RegisterFrom(dest_pos), vixl32::LSL, element_size_shift)); - __ Add(temp2, temp2, offset); - } - - // Iterate over the arrays and do a raw copy of the objects. We don't need to - // poison/unpoison. - __ Bind(&loop); - - { - UseScratchRegisterScope temps(assembler->GetVIXLAssembler()); - const vixl32::Register temp_reg = temps.Acquire(); + vixl32::Label done; + const Primitive::Type type = Primitive::kPrimNot; + const int32_t element_size = Primitive::ComponentSize(type); - __ Ldr(temp_reg, MemOperand(temp1, element_size, PostIndex)); - __ Str(temp_reg, MemOperand(temp2, element_size, PostIndex)); + if (length.IsRegister()) { + // Don't enter the copy loop if the length is null. + __ CompareAndBranchIfZero(RegisterFrom(length), &done, /* is_far_target */ false); } - __ Cmp(temp1, temp3); - __ B(ne, &loop, /* far_target */ false); - - __ Bind(read_barrier_slow_path->GetExitLabel()); - __ Bind(&done); - } else { - // Non read barrier code. + if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + // TODO: Also convert this intrinsic to the IsGcMarking strategy? + + // SystemArrayCopy implementation for Baker read barriers (see + // also CodeGeneratorARM::GenerateReferenceLoadWithBakerReadBarrier): + // + // uint32_t rb_state = Lockword(src->monitor_).ReadBarrierState(); + // lfence; // Load fence or artificial data dependency to prevent load-load reordering + // bool is_gray = (rb_state == ReadBarrier::GrayState()); + // if (is_gray) { + // // Slow-path copy. + // do { + // *dest_ptr++ = MaybePoison(ReadBarrier::Mark(MaybeUnpoison(*src_ptr++))); + // } while (src_ptr != end_ptr) + // } else { + // // Fast-path copy. + // do { + // *dest_ptr++ = *src_ptr++; + // } while (src_ptr != end_ptr) + // } + + // /* int32_t */ monitor = src->monitor_ + __ Ldr(temp2, MemOperand(src, monitor_offset)); + // /* LockWord */ lock_word = LockWord(monitor) + static_assert(sizeof(LockWord) == sizeof(int32_t), + "art::LockWord and int32_t have different sizes."); + + // Introduce a dependency on the lock_word including the rb_state, + // which shall prevent load-load reordering without using + // a memory barrier (which would be more expensive). + // `src` is unchanged by this operation, but its value now depends + // on `temp2`. + __ Add(src, src, Operand(temp2, vixl32::LSR, 32)); + + // Compute the base source address in `temp1`. + // Note that `temp1` (the base source address) is computed from + // `src` (and `src_pos`) here, and thus honors the artificial + // dependency of `src` on `temp2`. + GenSystemArrayCopyBaseAddress(GetAssembler(), type, src, src_pos, temp1); + // Compute the end source address in `temp3`. + GenSystemArrayCopyEndAddress(GetAssembler(), type, length, temp1, temp3); + // The base destination address is computed later, as `temp2` is + // used for intermediate computations. + + // Slow path used to copy array when `src` is gray. + // Note that the base destination address is computed in `temp2` + // by the slow path code. + SlowPathCodeARMVIXL* read_barrier_slow_path = + new (GetAllocator()) ReadBarrierSystemArrayCopySlowPathARMVIXL(invoke); + codegen_->AddSlowPath(read_barrier_slow_path); + + // Given the numeric representation, it's enough to check the low bit of the + // rb_state. We do that by shifting the bit out of the lock word with LSRS + // which can be a 16-bit instruction unlike the TST immediate. + static_assert(ReadBarrier::WhiteState() == 0, "Expecting white to have value 0"); + static_assert(ReadBarrier::GrayState() == 1, "Expecting gray to have value 1"); + __ Lsrs(temp2, temp2, LockWord::kReadBarrierStateShift + 1); + // Carry flag is the last bit shifted out by LSRS. + __ B(cs, read_barrier_slow_path->GetEntryLabel()); + + // Fast-path copy. + // Compute the base destination address in `temp2`. + GenSystemArrayCopyBaseAddress(GetAssembler(), type, dest, dest_pos, temp2); + // Iterate over the arrays and do a raw copy of the objects. We don't need to + // poison/unpoison. + vixl32::Label loop; + __ Bind(&loop); + { + UseScratchRegisterScope temps(assembler->GetVIXLAssembler()); + const vixl32::Register temp_reg = temps.Acquire(); + __ Ldr(temp_reg, MemOperand(temp1, element_size, PostIndex)); + __ Str(temp_reg, MemOperand(temp2, element_size, PostIndex)); + } + __ Cmp(temp1, temp3); + __ B(ne, &loop, /* far_target */ false); - // Compute the base destination address in `temp2`. - if (dest_pos.IsConstant()) { - int32_t constant = Int32ConstantFrom(dest_pos); - __ Add(temp2, dest, element_size * constant + offset); + __ Bind(read_barrier_slow_path->GetExitLabel()); } else { - __ Add(temp2, dest, Operand(RegisterFrom(dest_pos), vixl32::LSL, element_size_shift)); - __ Add(temp2, temp2, offset); - } - - // Iterate over the arrays and do a raw copy of the objects. We don't need to - // poison/unpoison. - vixl32::Label loop, done; - __ Cmp(temp1, temp3); - __ B(eq, &done, /* far_target */ false); - __ Bind(&loop); - - { - UseScratchRegisterScope temps(assembler->GetVIXLAssembler()); - const vixl32::Register temp_reg = temps.Acquire(); - - __ Ldr(temp_reg, MemOperand(temp1, element_size, PostIndex)); - __ Str(temp_reg, MemOperand(temp2, element_size, PostIndex)); + // Non read barrier code. + // Compute the base source address in `temp1`. + GenSystemArrayCopyBaseAddress(GetAssembler(), type, src, src_pos, temp1); + // Compute the base destination address in `temp2`. + GenSystemArrayCopyBaseAddress(GetAssembler(), type, dest, dest_pos, temp2); + // Compute the end source address in `temp3`. + GenSystemArrayCopyEndAddress(GetAssembler(), type, length, temp1, temp3); + // Iterate over the arrays and do a raw copy of the objects. We don't need to + // poison/unpoison. + vixl32::Label loop; + __ Bind(&loop); + { + UseScratchRegisterScope temps(assembler->GetVIXLAssembler()); + const vixl32::Register temp_reg = temps.Acquire(); + __ Ldr(temp_reg, MemOperand(temp1, element_size, PostIndex)); + __ Str(temp_reg, MemOperand(temp2, element_size, PostIndex)); + } + __ Cmp(temp1, temp3); + __ B(ne, &loop, /* far_target */ false); } - - __ Cmp(temp1, temp3); - __ B(ne, &loop, /* far_target */ false); __ Bind(&done); } diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc index a671788ff5..ecf919bceb 100644 --- a/compiler/optimizing/intrinsics_x86.cc +++ b/compiler/optimizing/intrinsics_x86.cc @@ -2878,6 +2878,49 @@ static bool IsSameInput(HInstruction* instruction, size_t input0, size_t input1) return instruction->InputAt(input0) == instruction->InputAt(input1); } +// Compute base address for the System.arraycopy intrinsic in `base`. +static void GenSystemArrayCopyBaseAddress(X86Assembler* assembler, + Primitive::Type type, + const Register& array, + const Location& pos, + const Register& base) { + // This routine is only used by the SystemArrayCopy intrinsic at the + // moment. We can allow Primitive::kPrimNot as `type` to implement + // the SystemArrayCopyChar intrinsic. + DCHECK_EQ(type, Primitive::kPrimNot); + const int32_t element_size = Primitive::ComponentSize(type); + const ScaleFactor scale_factor = static_cast<ScaleFactor>(Primitive::ComponentSizeShift(type)); + const uint32_t data_offset = mirror::Array::DataOffset(element_size).Uint32Value(); + + if (pos.IsConstant()) { + int32_t constant = pos.GetConstant()->AsIntConstant()->GetValue(); + __ leal(base, Address(array, element_size * constant + data_offset)); + } else { + __ leal(base, Address(array, pos.AsRegister<Register>(), scale_factor, data_offset)); + } +} + +// Compute end source address for the System.arraycopy intrinsic in `end`. +static void GenSystemArrayCopyEndAddress(X86Assembler* assembler, + Primitive::Type type, + const Location& copy_length, + const Register& base, + const Register& end) { + // This routine is only used by the SystemArrayCopy intrinsic at the + // moment. We can allow Primitive::kPrimNot as `type` to implement + // the SystemArrayCopyChar intrinsic. + DCHECK_EQ(type, Primitive::kPrimNot); + const int32_t element_size = Primitive::ComponentSize(type); + const ScaleFactor scale_factor = static_cast<ScaleFactor>(Primitive::ComponentSizeShift(type)); + + if (copy_length.IsConstant()) { + int32_t constant = copy_length.GetConstant()->AsIntConstant()->GetValue(); + __ leal(end, Address(base, element_size * constant)); + } else { + __ leal(end, Address(base, copy_length.AsRegister<Register>(), scale_factor, 0)); + } +} + void IntrinsicLocationsBuilderX86::VisitSystemArrayCopy(HInvoke* invoke) { // The only read barrier implementation supporting the // SystemArrayCopy intrinsic is the Baker-style read barriers. @@ -3182,16 +3225,11 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) { __ j(kNotEqual, intrinsic_slow_path->GetEntryLabel()); } + const Primitive::Type type = Primitive::kPrimNot; + const int32_t element_size = Primitive::ComponentSize(type); + // Compute the base source address in `temp1`. - int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot); - DCHECK_EQ(element_size, 4); - uint32_t offset = mirror::Array::DataOffset(element_size).Uint32Value(); - if (src_pos.IsConstant()) { - int32_t constant = src_pos.GetConstant()->AsIntConstant()->GetValue(); - __ leal(temp1, Address(src, element_size * constant + offset)); - } else { - __ leal(temp1, Address(src, src_pos.AsRegister<Register>(), ScaleFactor::TIMES_4, offset)); - } + GenSystemArrayCopyBaseAddress(GetAssembler(), type, src, src_pos, temp1); if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { // If it is needed (in the case of the fast-path loop), the base @@ -3199,20 +3237,15 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) { // intermediate computations. // Compute the end source address in `temp3`. - if (length.IsConstant()) { - int32_t constant = length.GetConstant()->AsIntConstant()->GetValue(); - __ leal(temp3, Address(temp1, element_size * constant)); - } else { - if (length.IsStackSlot()) { - // Location `length` is again pointing at a stack slot, as - // register `temp3` (which was containing the length parameter - // earlier) has been overwritten; restore it now - DCHECK(length.Equals(length_arg)); - __ movl(temp3, Address(ESP, length.GetStackIndex())); - length = Location::RegisterLocation(temp3); - } - __ leal(temp3, Address(temp1, length.AsRegister<Register>(), ScaleFactor::TIMES_4, 0)); + if (length.IsStackSlot()) { + // Location `length` is again pointing at a stack slot, as + // register `temp3` (which was containing the length parameter + // earlier) has been overwritten; restore it now + DCHECK(length.Equals(length_arg)); + __ movl(temp3, Address(ESP, length.GetStackIndex())); + length = Location::RegisterLocation(temp3); } + GenSystemArrayCopyEndAddress(GetAssembler(), type, length, temp1, temp3); // SystemArrayCopy implementation for Baker read barriers (see // also CodeGeneratorX86::GenerateReferenceLoadWithBakerReadBarrier): @@ -3266,15 +3299,8 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) { __ j(kNotZero, read_barrier_slow_path->GetEntryLabel()); // Fast-path copy. - - // Set the base destination address in `temp2`. - if (dest_pos.IsConstant()) { - int32_t constant = dest_pos.GetConstant()->AsIntConstant()->GetValue(); - __ leal(temp2, Address(dest, element_size * constant + offset)); - } else { - __ leal(temp2, Address(dest, dest_pos.AsRegister<Register>(), ScaleFactor::TIMES_4, offset)); - } - + // Compute the base destination address in `temp2`. + GenSystemArrayCopyBaseAddress(GetAssembler(), type, dest, dest_pos, temp2); // Iterate over the arrays and do a raw copy of the objects. We don't need to // poison/unpoison. __ Bind(&loop); @@ -3291,23 +3317,10 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) { __ Bind(&done); } else { // Non read barrier code. - // Compute the base destination address in `temp2`. - if (dest_pos.IsConstant()) { - int32_t constant = dest_pos.GetConstant()->AsIntConstant()->GetValue(); - __ leal(temp2, Address(dest, element_size * constant + offset)); - } else { - __ leal(temp2, Address(dest, dest_pos.AsRegister<Register>(), ScaleFactor::TIMES_4, offset)); - } - + GenSystemArrayCopyBaseAddress(GetAssembler(), type, dest, dest_pos, temp2); // Compute the end source address in `temp3`. - if (length.IsConstant()) { - int32_t constant = length.GetConstant()->AsIntConstant()->GetValue(); - __ leal(temp3, Address(temp1, element_size * constant)); - } else { - __ leal(temp3, Address(temp1, length.AsRegister<Register>(), ScaleFactor::TIMES_4, 0)); - } - + GenSystemArrayCopyEndAddress(GetAssembler(), type, length, temp1, temp3); // Iterate over the arrays and do a raw copy of the objects. We don't need to // poison/unpoison. NearLabel loop, done; @@ -3326,11 +3339,7 @@ void IntrinsicCodeGeneratorX86::VisitSystemArrayCopy(HInvoke* invoke) { } // We only need one card marking on the destination array. - codegen_->MarkGCCard(temp1, - temp2, - dest, - Register(kNoRegister), - /* value_can_be_null */ false); + codegen_->MarkGCCard(temp1, temp2, dest, Register(kNoRegister), /* value_can_be_null */ false); __ Bind(intrinsic_slow_path->GetExitLabel()); } diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc index 9a6dd985a4..13956dfb8e 100644 --- a/compiler/optimizing/intrinsics_x86_64.cc +++ b/compiler/optimizing/intrinsics_x86_64.cc @@ -1118,6 +1118,47 @@ void IntrinsicLocationsBuilderX86_64::VisitSystemArrayCopy(HInvoke* invoke) { CodeGenerator::CreateSystemArrayCopyLocationSummary(invoke); } +// Compute base source address, base destination address, and end +// source address for the System.arraycopy intrinsic in `src_base`, +// `dst_base` and `src_end` respectively. +static void GenSystemArrayCopyAddresses(X86_64Assembler* assembler, + Primitive::Type type, + const CpuRegister& src, + const Location& src_pos, + const CpuRegister& dst, + const Location& dst_pos, + const Location& copy_length, + const CpuRegister& src_base, + const CpuRegister& dst_base, + const CpuRegister& src_end) { + // This routine is only used by the SystemArrayCopy intrinsic. + DCHECK_EQ(type, Primitive::kPrimNot); + const int32_t element_size = Primitive::ComponentSize(type); + const ScaleFactor scale_factor = static_cast<ScaleFactor>(Primitive::ComponentSizeShift(type)); + const uint32_t data_offset = mirror::Array::DataOffset(element_size).Uint32Value(); + + if (src_pos.IsConstant()) { + int32_t constant = src_pos.GetConstant()->AsIntConstant()->GetValue(); + __ leal(src_base, Address(src, element_size * constant + data_offset)); + } else { + __ leal(src_base, Address(src, src_pos.AsRegister<CpuRegister>(), scale_factor, data_offset)); + } + + if (dst_pos.IsConstant()) { + int32_t constant = dst_pos.GetConstant()->AsIntConstant()->GetValue(); + __ leal(dst_base, Address(dst, element_size * constant + data_offset)); + } else { + __ leal(dst_base, Address(dst, dst_pos.AsRegister<CpuRegister>(), scale_factor, data_offset)); + } + + if (copy_length.IsConstant()) { + int32_t constant = copy_length.GetConstant()->AsIntConstant()->GetValue(); + __ leal(src_end, Address(src_base, element_size * constant)); + } else { + __ leal(src_end, Address(src_base, copy_length.AsRegister<CpuRegister>(), scale_factor, 0)); + } +} + void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { // The only read barrier implementation supporting the // SystemArrayCopy intrinsic is the Baker-style read barriers. @@ -1366,30 +1407,13 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { __ j(kNotEqual, intrinsic_slow_path->GetEntryLabel()); } - // Compute base source address, base destination address, and end source address. + const Primitive::Type type = Primitive::kPrimNot; + const int32_t element_size = Primitive::ComponentSize(type); - int32_t element_size = Primitive::ComponentSize(Primitive::kPrimNot); - uint32_t offset = mirror::Array::DataOffset(element_size).Uint32Value(); - if (src_pos.IsConstant()) { - int32_t constant = src_pos.GetConstant()->AsIntConstant()->GetValue(); - __ leal(temp1, Address(src, element_size * constant + offset)); - } else { - __ leal(temp1, Address(src, src_pos.AsRegister<CpuRegister>(), ScaleFactor::TIMES_4, offset)); - } - - if (dest_pos.IsConstant()) { - int32_t constant = dest_pos.GetConstant()->AsIntConstant()->GetValue(); - __ leal(temp2, Address(dest, element_size * constant + offset)); - } else { - __ leal(temp2, Address(dest, dest_pos.AsRegister<CpuRegister>(), ScaleFactor::TIMES_4, offset)); - } - - if (length.IsConstant()) { - int32_t constant = length.GetConstant()->AsIntConstant()->GetValue(); - __ leal(temp3, Address(temp1, element_size * constant)); - } else { - __ leal(temp3, Address(temp1, length.AsRegister<CpuRegister>(), ScaleFactor::TIMES_4, 0)); - } + // Compute base source address, base destination address, and end + // source address in `temp1`, `temp2` and `temp3` respectively. + GenSystemArrayCopyAddresses( + GetAssembler(), type, src, src_pos, dest, dest_pos, length, temp1, temp2, temp3); if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { // SystemArrayCopy implementation for Baker read barriers (see @@ -1474,11 +1498,7 @@ void IntrinsicCodeGeneratorX86_64::VisitSystemArrayCopy(HInvoke* invoke) { } // We only need one card marking on the destination array. - codegen_->MarkGCCard(temp1, - temp2, - dest, - CpuRegister(kNoRegister), - /* value_can_be_null */ false); + codegen_->MarkGCCard(temp1, temp2, dest, CpuRegister(kNoRegister), /* value_can_be_null */ false); __ Bind(intrinsic_slow_path->GetExitLabel()); } @@ -3018,13 +3038,14 @@ void IntrinsicCodeGeneratorX86_64::VisitIntegerValueOf(HInvoke* invoke) { mirror::Object* boxed = info.cache->Get(value + (-info.low)); DCHECK(boxed != nullptr && Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(boxed)); uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(boxed)); - __ movl(out, Immediate(address)); + __ movl(out, Immediate(static_cast<int32_t>(address))); } else { // Allocate and initialize a new j.l.Integer. // TODO: If we JIT, we could allocate the j.l.Integer now, and store it in the // JIT object table. + CpuRegister argument = CpuRegister(calling_convention.GetRegisterAt(0)); uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer)); - __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(address)); + __ movl(argument, Immediate(static_cast<int32_t>(address))); codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); __ movl(Address(out, info.value_offset), Immediate(value)); @@ -3039,13 +3060,20 @@ void IntrinsicCodeGeneratorX86_64::VisitIntegerValueOf(HInvoke* invoke) { // If the value is within the bounds, load the j.l.Integer directly from the array. uint32_t data_offset = mirror::Array::DataOffset(kHeapReferenceSize).Uint32Value(); uint32_t address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.cache)); - __ movl(out, Address(out, TIMES_4, data_offset + address)); + if (data_offset + address <= std::numeric_limits<int32_t>::max()) { + __ movl(out, Address(out, TIMES_4, data_offset + address)); + } else { + CpuRegister temp = CpuRegister(calling_convention.GetRegisterAt(0)); + __ movl(temp, Immediate(static_cast<int32_t>(data_offset + address))); + __ movl(out, Address(temp, out, TIMES_4, 0)); + } __ MaybeUnpoisonHeapReference(out); __ jmp(&done); __ Bind(&allocate); // Otherwise allocate and initialize a new j.l.Integer. + CpuRegister argument = CpuRegister(calling_convention.GetRegisterAt(0)); address = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(info.integer)); - __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(address)); + __ movl(argument, Immediate(static_cast<int32_t>(address))); codegen_->InvokeRuntime(kQuickAllocObjectInitialized, invoke, invoke->GetDexPc()); CheckEntrypointTypes<kQuickAllocObjectWithChecks, void*, mirror::Class*>(); __ movl(Address(out, info.value_offset), in); diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index d6153b091c..23ccd9e953 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -856,8 +856,15 @@ CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* arena, const DexFile::CodeItem* code_item) const { ArenaVector<LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen); ArenaVector<uint8_t> stack_map(arena->Adapter(kArenaAllocStackMaps)); - stack_map.resize(codegen->ComputeStackMapsSize()); - codegen->BuildStackMaps(MemoryRegion(stack_map.data(), stack_map.size()), *code_item); + ArenaVector<uint8_t> method_info(arena->Adapter(kArenaAllocStackMaps)); + size_t stack_map_size = 0; + size_t method_info_size = 0; + codegen->ComputeStackMapAndMethodInfoSize(&stack_map_size, &method_info_size); + stack_map.resize(stack_map_size); + method_info.resize(method_info_size); + codegen->BuildStackMaps(MemoryRegion(stack_map.data(), stack_map.size()), + MemoryRegion(method_info.data(), method_info.size()), + *code_item); CompiledMethod* compiled_method = CompiledMethod::SwapAllocCompiledMethod( compiler_driver, @@ -869,7 +876,7 @@ CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* arena, codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(), codegen->GetCoreSpillMask(), codegen->GetFpuSpillMask(), - ArrayRef<const SrcMapElem>(), + ArrayRef<const uint8_t>(method_info), ArrayRef<const uint8_t>(stack_map), ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()), ArrayRef<const LinkerPatch>(linker_patches)); @@ -1200,7 +1207,9 @@ bool OptimizingCompiler::JitCompile(Thread* self, } } - size_t stack_map_size = codegen->ComputeStackMapsSize(); + size_t stack_map_size = 0; + size_t method_info_size = 0; + codegen->ComputeStackMapAndMethodInfoSize(&stack_map_size, &method_info_size); size_t number_of_roots = codegen->GetNumberOfJitRoots(); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); // We allocate an object array to ensure the JIT roots that we will collect in EmitJitRoots @@ -1216,20 +1225,30 @@ bool OptimizingCompiler::JitCompile(Thread* self, return false; } uint8_t* stack_map_data = nullptr; + uint8_t* method_info_data = nullptr; uint8_t* roots_data = nullptr; - uint32_t data_size = code_cache->ReserveData( - self, stack_map_size, number_of_roots, method, &stack_map_data, &roots_data); + uint32_t data_size = code_cache->ReserveData(self, + stack_map_size, + method_info_size, + number_of_roots, + method, + &stack_map_data, + &method_info_data, + &roots_data); if (stack_map_data == nullptr || roots_data == nullptr) { return false; } MaybeRecordStat(MethodCompilationStat::kCompiled); - codegen->BuildStackMaps(MemoryRegion(stack_map_data, stack_map_size), *code_item); + codegen->BuildStackMaps(MemoryRegion(stack_map_data, stack_map_size), + MemoryRegion(method_info_data, method_info_size), + *code_item); codegen->EmitJitRoots(code_allocator.GetData(), roots, roots_data); const void* code = code_cache->CommitCode( self, method, stack_map_data, + method_info_data, roots_data, codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(), codegen->GetCoreSpillMask(), diff --git a/compiler/optimizing/register_allocation_resolver.cc b/compiler/optimizing/register_allocation_resolver.cc index 59523a93a0..8a9c1ccaff 100644 --- a/compiler/optimizing/register_allocation_resolver.cc +++ b/compiler/optimizing/register_allocation_resolver.cc @@ -306,7 +306,7 @@ void RegisterAllocationResolver::ConnectSiblings(LiveInterval* interval) { : Location::StackSlot(interval->GetParent()->GetSpillSlot())); } UsePosition* use = current->GetFirstUse(); - UsePosition* env_use = current->GetFirstEnvironmentUse(); + EnvUsePosition* env_use = current->GetFirstEnvironmentUse(); // Walk over all siblings, updating locations of use positions, and // connecting them when they are adjacent. @@ -323,7 +323,6 @@ void RegisterAllocationResolver::ConnectSiblings(LiveInterval* interval) { use = use->GetNext(); } while (use != nullptr && use->GetPosition() <= range->GetEnd()) { - DCHECK(!use->GetIsEnvironment()); DCHECK(current->CoversSlow(use->GetPosition()) || (use->GetPosition() == range->GetEnd())); if (!use->IsSynthesized()) { LocationSummary* locations = use->GetUser()->GetLocations(); diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc index 2227872f76..667afb1ec3 100644 --- a/compiler/optimizing/register_allocator_test.cc +++ b/compiler/optimizing/register_allocator_test.cc @@ -912,9 +912,9 @@ TEST_F(RegisterAllocatorTest, SpillInactive) { // Create an interval with lifetime holes. static constexpr size_t ranges1[][2] = {{0, 2}, {4, 6}, {8, 10}}; LiveInterval* first = BuildInterval(ranges1, arraysize(ranges1), &allocator, -1, one); - first->first_use_ = new(&allocator) UsePosition(user, 0, false, 8, first->first_use_); - first->first_use_ = new(&allocator) UsePosition(user, 0, false, 7, first->first_use_); - first->first_use_ = new(&allocator) UsePosition(user, 0, false, 6, first->first_use_); + first->first_use_ = new(&allocator) UsePosition(user, false, 8, first->first_use_); + first->first_use_ = new(&allocator) UsePosition(user, false, 7, first->first_use_); + first->first_use_ = new(&allocator) UsePosition(user, false, 6, first->first_use_); locations = new (&allocator) LocationSummary(first->GetDefinedBy(), LocationSummary::kNoCall); locations->SetOut(Location::RequiresRegister()); @@ -934,9 +934,9 @@ TEST_F(RegisterAllocatorTest, SpillInactive) { // before lifetime position 6 yet. static constexpr size_t ranges3[][2] = {{2, 4}, {8, 10}}; LiveInterval* third = BuildInterval(ranges3, arraysize(ranges3), &allocator, -1, three); - third->first_use_ = new(&allocator) UsePosition(user, 0, false, 8, third->first_use_); - third->first_use_ = new(&allocator) UsePosition(user, 0, false, 4, third->first_use_); - third->first_use_ = new(&allocator) UsePosition(user, 0, false, 3, third->first_use_); + third->first_use_ = new(&allocator) UsePosition(user, false, 8, third->first_use_); + third->first_use_ = new(&allocator) UsePosition(user, false, 4, third->first_use_); + third->first_use_ = new(&allocator) UsePosition(user, false, 3, third->first_use_); locations = new (&allocator) LocationSummary(third->GetDefinedBy(), LocationSummary::kNoCall); locations->SetOut(Location::RequiresRegister()); third = third->SplitAt(3); diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h index a239bd50c2..340d0ccefe 100644 --- a/compiler/optimizing/ssa_liveness_analysis.h +++ b/compiler/optimizing/ssa_liveness_analysis.h @@ -17,9 +17,10 @@ #ifndef ART_COMPILER_OPTIMIZING_SSA_LIVENESS_ANALYSIS_H_ #define ART_COMPILER_OPTIMIZING_SSA_LIVENESS_ANALYSIS_H_ -#include "nodes.h" #include <iostream> +#include "nodes.h" + namespace art { class CodeGenerator; @@ -103,21 +104,20 @@ class LiveRange FINAL : public ArenaObject<kArenaAllocSsaLiveness> { */ class UsePosition : public ArenaObject<kArenaAllocSsaLiveness> { public: - UsePosition(HInstruction* user, - HEnvironment* environment, - size_t input_index, - size_t position, - UsePosition* next) + UsePosition(HInstruction* user, size_t input_index, size_t position, UsePosition* next) : user_(user), - environment_(environment), input_index_(input_index), position_(position), next_(next) { - DCHECK(environment == nullptr || user == nullptr); DCHECK(next_ == nullptr || next->GetPosition() >= GetPosition()); } - static constexpr size_t kNoInput = -1; + explicit UsePosition(size_t position) + : user_(nullptr), + input_index_(kNoInput), + position_(dchecked_integral_cast<uint32_t>(position)), + next_(nullptr) { + } size_t GetPosition() const { return position_; } @@ -125,9 +125,7 @@ class UsePosition : public ArenaObject<kArenaAllocSsaLiveness> { void SetNext(UsePosition* next) { next_ = next; } HInstruction* GetUser() const { return user_; } - HEnvironment* GetEnvironment() const { return environment_; } - bool GetIsEnvironment() const { return environment_ != nullptr; } bool IsSynthesized() const { return user_ == nullptr; } size_t GetInputIndex() const { return input_index_; } @@ -142,20 +140,20 @@ class UsePosition : public ArenaObject<kArenaAllocSsaLiveness> { UsePosition* Dup(ArenaAllocator* allocator) const { return new (allocator) UsePosition( - user_, environment_, input_index_, position_, + user_, input_index_, position_, next_ == nullptr ? nullptr : next_->Dup(allocator)); } bool RequiresRegister() const { - if (GetIsEnvironment()) return false; if (IsSynthesized()) return false; Location location = GetUser()->GetLocations()->InAt(GetInputIndex()); return location.IsUnallocated() && location.RequiresRegisterKind(); } private: + static constexpr uint32_t kNoInput = static_cast<uint32_t>(-1); + HInstruction* const user_; - HEnvironment* const environment_; const size_t input_index_; const size_t position_; UsePosition* next_; @@ -163,6 +161,50 @@ class UsePosition : public ArenaObject<kArenaAllocSsaLiveness> { DISALLOW_COPY_AND_ASSIGN(UsePosition); }; +/** + * An environment use position represents a live interval for environment use at a given position. + */ +class EnvUsePosition : public ArenaObject<kArenaAllocSsaLiveness> { + public: + EnvUsePosition(HEnvironment* environment, + size_t input_index, + size_t position, + EnvUsePosition* next) + : environment_(environment), + input_index_(input_index), + position_(position), + next_(next) { + DCHECK(environment != nullptr); + DCHECK(next_ == nullptr || next->GetPosition() >= GetPosition()); + } + + size_t GetPosition() const { return position_; } + + EnvUsePosition* GetNext() const { return next_; } + void SetNext(EnvUsePosition* next) { next_ = next; } + + HEnvironment* GetEnvironment() const { return environment_; } + size_t GetInputIndex() const { return input_index_; } + + void Dump(std::ostream& stream) const { + stream << position_; + } + + EnvUsePosition* Dup(ArenaAllocator* allocator) const { + return new (allocator) EnvUsePosition( + environment_, input_index_, position_, + next_ == nullptr ? nullptr : next_->Dup(allocator)); + } + + private: + HEnvironment* const environment_; + const size_t input_index_; + const size_t position_; + EnvUsePosition* next_; + + DISALLOW_COPY_AND_ASSIGN(EnvUsePosition); +}; + class SafepointPosition : public ArenaObject<kArenaAllocSsaLiveness> { public: explicit SafepointPosition(HInstruction* instruction) @@ -227,7 +269,7 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> { DCHECK(first_env_use_ == nullptr) << "A temporary cannot have environment user"; size_t position = instruction->GetLifetimePosition(); first_use_ = new (allocator_) UsePosition( - instruction, /* environment */ nullptr, temp_index, position, first_use_); + instruction, temp_index, position, first_use_); AddRange(position, position + 1); } @@ -276,7 +318,7 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> { } DCHECK(first_use_->GetPosition() + 1 == position); UsePosition* new_use = new (allocator_) UsePosition( - instruction, nullptr /* environment */, input_index, position, cursor->GetNext()); + instruction, input_index, position, cursor->GetNext()); cursor->SetNext(new_use); if (first_range_->GetEnd() == first_use_->GetPosition()) { first_range_->end_ = position; @@ -285,11 +327,11 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> { } if (is_environment) { - first_env_use_ = new (allocator_) UsePosition( - nullptr /* instruction */, environment, input_index, position, first_env_use_); + first_env_use_ = new (allocator_) EnvUsePosition( + environment, input_index, position, first_env_use_); } else { first_use_ = new (allocator_) UsePosition( - instruction, nullptr /* environment */, input_index, position, first_use_); + instruction, input_index, position, first_use_); } if (is_environment && !keep_alive) { @@ -328,7 +370,7 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> { AddBackEdgeUses(*block); } first_use_ = new (allocator_) UsePosition( - instruction, /* environment */ nullptr, input_index, block->GetLifetimeEnd(), first_use_); + instruction, input_index, block->GetLifetimeEnd(), first_use_); } ALWAYS_INLINE void AddRange(size_t start, size_t end) { @@ -538,7 +580,7 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> { return first_use_; } - UsePosition* GetFirstEnvironmentUse() const { + EnvUsePosition* GetFirstEnvironmentUse() const { return first_env_use_; } @@ -676,7 +718,7 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> { current = current->GetNext(); } stream << "}, uses: { "; - UsePosition* use = first_use_; + const UsePosition* use = first_use_; if (use != nullptr) { do { use->Dump(stream); @@ -684,12 +726,12 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> { } while ((use = use->GetNext()) != nullptr); } stream << "}, { "; - use = first_env_use_; - if (use != nullptr) { + const EnvUsePosition* env_use = first_env_use_; + if (env_use != nullptr) { do { - use->Dump(stream); + env_use->Dump(stream); stream << " "; - } while ((use = use->GetNext()) != nullptr); + } while ((env_use = env_use->GetNext()) != nullptr); } stream << "}"; stream << " is_fixed: " << is_fixed_ << ", is_split: " << IsSplit(); @@ -1015,12 +1057,7 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> { DCHECK(last_in_new_list == nullptr || back_edge_use_position > last_in_new_list->GetPosition()); - UsePosition* new_use = new (allocator_) UsePosition( - /* user */ nullptr, - /* environment */ nullptr, - UsePosition::kNoInput, - back_edge_use_position, - /* next */ nullptr); + UsePosition* new_use = new (allocator_) UsePosition(back_edge_use_position); if (last_in_new_list != nullptr) { // Going outward. The latest created use needs to point to the new use. @@ -1056,7 +1093,7 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> { // Uses of this interval. Note that this linked list is shared amongst siblings. UsePosition* first_use_; - UsePosition* first_env_use_; + EnvUsePosition* first_env_use_; // The instruction type this interval corresponds to. const Primitive::Type type_; @@ -1210,8 +1247,7 @@ class SsaLivenessAnalysis : public ValueObject { // Returns whether `instruction` in an HEnvironment held by `env_holder` // should be kept live by the HEnvironment. - static bool ShouldBeLiveForEnvironment(HInstruction* env_holder, - HInstruction* instruction) { + static bool ShouldBeLiveForEnvironment(HInstruction* env_holder, HInstruction* instruction) { if (instruction == nullptr) return false; // A value that's not live in compiled code may still be needed in interpreter, // due to code motion, etc. diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc new file mode 100644 index 0000000000..1916c73ca4 --- /dev/null +++ b/compiler/optimizing/ssa_liveness_analysis_test.cc @@ -0,0 +1,232 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "arch/instruction_set.h" +#include "arch/instruction_set_features.h" +#include "base/arena_allocator.h" +#include "base/arena_containers.h" +#include "driver/compiler_options.h" +#include "code_generator.h" +#include "nodes.h" +#include "optimizing_unit_test.h" +#include "ssa_liveness_analysis.h" + +namespace art { + +class SsaLivenessAnalysisTest : public testing::Test { + public: + SsaLivenessAnalysisTest() + : pool_(), + allocator_(&pool_), + graph_(CreateGraph(&allocator_)), + compiler_options_(), + instruction_set_(kRuntimeISA) { + std::string error_msg; + instruction_set_features_ = + InstructionSetFeatures::FromVariant(instruction_set_, "default", &error_msg); + codegen_ = CodeGenerator::Create(graph_, + instruction_set_, + *instruction_set_features_, + compiler_options_); + CHECK(codegen_ != nullptr) << instruction_set_ << " is not a supported target architecture."; + // Create entry block. + entry_ = new (&allocator_) HBasicBlock(graph_); + graph_->AddBlock(entry_); + graph_->SetEntryBlock(entry_); + } + + protected: + HBasicBlock* CreateSuccessor(HBasicBlock* block) { + HGraph* graph = block->GetGraph(); + HBasicBlock* successor = new (&allocator_) HBasicBlock(graph); + graph->AddBlock(successor); + block->AddSuccessor(successor); + return successor; + } + + ArenaPool pool_; + ArenaAllocator allocator_; + HGraph* graph_; + CompilerOptions compiler_options_; + InstructionSet instruction_set_; + std::unique_ptr<const InstructionSetFeatures> instruction_set_features_; + std::unique_ptr<CodeGenerator> codegen_; + HBasicBlock* entry_; +}; + +TEST_F(SsaLivenessAnalysisTest, TestReturnArg) { + HInstruction* arg = new (&allocator_) HParameterValue( + graph_->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimInt); + entry_->AddInstruction(arg); + + HBasicBlock* block = CreateSuccessor(entry_); + HInstruction* ret = new (&allocator_) HReturn(arg); + block->AddInstruction(ret); + block->AddInstruction(new (&allocator_) HExit()); + + graph_->BuildDominatorTree(); + SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get()); + ssa_analysis.Analyze(); + + std::ostringstream arg_dump; + arg->GetLiveInterval()->Dump(arg_dump); + EXPECT_STREQ("ranges: { [2,6) }, uses: { 6 }, { } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0", + arg_dump.str().c_str()); +} + +TEST_F(SsaLivenessAnalysisTest, TestAput) { + HInstruction* array = new (&allocator_) HParameterValue( + graph_->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimNot); + HInstruction* index = new (&allocator_) HParameterValue( + graph_->GetDexFile(), dex::TypeIndex(1), 1, Primitive::kPrimInt); + HInstruction* value = new (&allocator_) HParameterValue( + graph_->GetDexFile(), dex::TypeIndex(2), 2, Primitive::kPrimInt); + HInstruction* extra_arg1 = new (&allocator_) HParameterValue( + graph_->GetDexFile(), dex::TypeIndex(3), 3, Primitive::kPrimInt); + HInstruction* extra_arg2 = new (&allocator_) HParameterValue( + graph_->GetDexFile(), dex::TypeIndex(4), 4, Primitive::kPrimNot); + ArenaVector<HInstruction*> args({ array, index, value, extra_arg1, extra_arg2 }, + allocator_.Adapter()); + for (HInstruction* insn : args) { + entry_->AddInstruction(insn); + } + + HBasicBlock* block = CreateSuccessor(entry_); + HInstruction* null_check = new (&allocator_) HNullCheck(array, 0); + block->AddInstruction(null_check); + HEnvironment* null_check_env = new (&allocator_) HEnvironment(&allocator_, + /* number_of_vregs */ 5, + /* method */ nullptr, + /* dex_pc */ 0u, + null_check); + null_check_env->CopyFrom(args); + null_check->SetRawEnvironment(null_check_env); + HInstruction* length = new (&allocator_) HArrayLength(array, 0); + block->AddInstruction(length); + HInstruction* bounds_check = new (&allocator_) HBoundsCheck(index, length, /* dex_pc */ 0u); + block->AddInstruction(bounds_check); + HEnvironment* bounds_check_env = new (&allocator_) HEnvironment(&allocator_, + /* number_of_vregs */ 5, + /* method */ nullptr, + /* dex_pc */ 0u, + bounds_check); + bounds_check_env->CopyFrom(args); + bounds_check->SetRawEnvironment(bounds_check_env); + HInstruction* array_set = + new (&allocator_) HArraySet(array, index, value, Primitive::kPrimInt, /* dex_pc */ 0); + block->AddInstruction(array_set); + + graph_->BuildDominatorTree(); + SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get()); + ssa_analysis.Analyze(); + + EXPECT_FALSE(graph_->IsDebuggable()); + EXPECT_EQ(18u, bounds_check->GetLifetimePosition()); + static const char* const expected[] = { + "ranges: { [2,21) }, uses: { 15 17 21 }, { 15 19 } is_fixed: 0, is_split: 0 is_low: 0 " + "is_high: 0", + "ranges: { [4,21) }, uses: { 19 21 }, { 15 19 } is_fixed: 0, is_split: 0 is_low: 0 " + "is_high: 0", + "ranges: { [6,21) }, uses: { 21 }, { 15 19 } is_fixed: 0, is_split: 0 is_low: 0 " + "is_high: 0", + // Environment uses do not keep the non-reference argument alive. + "ranges: { [8,10) }, uses: { }, { 15 19 } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0", + // Environment uses keep the reference argument alive. + "ranges: { [10,19) }, uses: { }, { 15 19 } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0", + }; + ASSERT_EQ(arraysize(expected), args.size()); + size_t arg_index = 0u; + for (HInstruction* arg : args) { + std::ostringstream arg_dump; + arg->GetLiveInterval()->Dump(arg_dump); + EXPECT_STREQ(expected[arg_index], arg_dump.str().c_str()) << arg_index; + ++arg_index; + } +} + +TEST_F(SsaLivenessAnalysisTest, TestDeoptimize) { + HInstruction* array = new (&allocator_) HParameterValue( + graph_->GetDexFile(), dex::TypeIndex(0), 0, Primitive::kPrimNot); + HInstruction* index = new (&allocator_) HParameterValue( + graph_->GetDexFile(), dex::TypeIndex(1), 1, Primitive::kPrimInt); + HInstruction* value = new (&allocator_) HParameterValue( + graph_->GetDexFile(), dex::TypeIndex(2), 2, Primitive::kPrimInt); + HInstruction* extra_arg1 = new (&allocator_) HParameterValue( + graph_->GetDexFile(), dex::TypeIndex(3), 3, Primitive::kPrimInt); + HInstruction* extra_arg2 = new (&allocator_) HParameterValue( + graph_->GetDexFile(), dex::TypeIndex(4), 4, Primitive::kPrimNot); + ArenaVector<HInstruction*> args({ array, index, value, extra_arg1, extra_arg2 }, + allocator_.Adapter()); + for (HInstruction* insn : args) { + entry_->AddInstruction(insn); + } + + HBasicBlock* block = CreateSuccessor(entry_); + HInstruction* null_check = new (&allocator_) HNullCheck(array, 0); + block->AddInstruction(null_check); + HEnvironment* null_check_env = new (&allocator_) HEnvironment(&allocator_, + /* number_of_vregs */ 5, + /* method */ nullptr, + /* dex_pc */ 0u, + null_check); + null_check_env->CopyFrom(args); + null_check->SetRawEnvironment(null_check_env); + HInstruction* length = new (&allocator_) HArrayLength(array, 0); + block->AddInstruction(length); + // Use HAboveOrEqual+HDeoptimize as the bounds check. + HInstruction* ae = new (&allocator_) HAboveOrEqual(index, length); + block->AddInstruction(ae); + HInstruction* deoptimize = new(&allocator_) HDeoptimize(ae, /* dex_pc */ 0u); + block->AddInstruction(deoptimize); + HEnvironment* deoptimize_env = new (&allocator_) HEnvironment(&allocator_, + /* number_of_vregs */ 5, + /* method */ nullptr, + /* dex_pc */ 0u, + deoptimize); + deoptimize_env->CopyFrom(args); + deoptimize->SetRawEnvironment(deoptimize_env); + HInstruction* array_set = + new (&allocator_) HArraySet(array, index, value, Primitive::kPrimInt, /* dex_pc */ 0); + block->AddInstruction(array_set); + + graph_->BuildDominatorTree(); + SsaLivenessAnalysis ssa_analysis(graph_, codegen_.get()); + ssa_analysis.Analyze(); + + EXPECT_FALSE(graph_->IsDebuggable()); + EXPECT_EQ(20u, deoptimize->GetLifetimePosition()); + static const char* const expected[] = { + "ranges: { [2,23) }, uses: { 15 17 23 }, { 15 21 } is_fixed: 0, is_split: 0 is_low: 0 " + "is_high: 0", + "ranges: { [4,23) }, uses: { 19 23 }, { 15 21 } is_fixed: 0, is_split: 0 is_low: 0 " + "is_high: 0", + "ranges: { [6,23) }, uses: { 23 }, { 15 21 } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0", + // Environment use in HDeoptimize keeps even the non-reference argument alive. + "ranges: { [8,21) }, uses: { }, { 15 21 } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0", + // Environment uses keep the reference argument alive. + "ranges: { [10,21) }, uses: { }, { 15 21 } is_fixed: 0, is_split: 0 is_low: 0 is_high: 0", + }; + ASSERT_EQ(arraysize(expected), args.size()); + size_t arg_index = 0u; + for (HInstruction* arg : args) { + std::ostringstream arg_dump; + arg->GetLiveInterval()->Dump(arg_dump); + EXPECT_STREQ(expected[arg_index], arg_dump.str().c_str()) << arg_index; + ++arg_index; + } +} + +} // namespace art diff --git a/compiler/optimizing/stack_map_stream.cc b/compiler/optimizing/stack_map_stream.cc index 4d12ad6eb6..b7840d73db 100644 --- a/compiler/optimizing/stack_map_stream.cc +++ b/compiler/optimizing/stack_map_stream.cc @@ -152,6 +152,9 @@ size_t StackMapStream::PrepareForFillIn() { encoding.location_catalog.num_entries = location_catalog_entries_.size(); encoding.location_catalog.num_bytes = ComputeDexRegisterLocationCatalogSize(); encoding.inline_info.num_entries = inline_infos_.size(); + // Must be done before calling ComputeInlineInfoEncoding since ComputeInlineInfoEncoding requires + // dex_method_index_idx to be filled in. + PrepareMethodIndices(); ComputeInlineInfoEncoding(&encoding.inline_info.encoding, encoding.dex_register_map.num_bytes); CodeOffset max_native_pc_offset = ComputeMaxNativePcCodeOffset(); @@ -245,7 +248,7 @@ void StackMapStream::ComputeInlineInfoEncoding(InlineInfoEncoding* encoding, for (size_t j = 0; j < entry.inlining_depth; ++j) { InlineInfoEntry inline_entry = inline_infos_[inline_info_index++]; if (inline_entry.method == nullptr) { - method_index_max = std::max(method_index_max, inline_entry.method_index); + method_index_max = std::max(method_index_max, inline_entry.dex_method_index_idx); extra_data_max = std::max(extra_data_max, 1u); } else { method_index_max = std::max( @@ -288,7 +291,25 @@ size_t StackMapStream::MaybeCopyDexRegisterMap(DexRegisterMapEntry& entry, return entry.offset; } -void StackMapStream::FillIn(MemoryRegion region) { +void StackMapStream::FillInMethodInfo(MemoryRegion region) { + { + MethodInfo info(region.begin(), method_indices_.size()); + for (size_t i = 0; i < method_indices_.size(); ++i) { + info.SetMethodIndex(i, method_indices_[i]); + } + } + if (kIsDebugBuild) { + // Check the data matches. + MethodInfo info(region.begin()); + const size_t count = info.NumMethodIndices(); + DCHECK_EQ(count, method_indices_.size()); + for (size_t i = 0; i < count; ++i) { + DCHECK_EQ(info.GetMethodIndex(i), method_indices_[i]); + } + } +} + +void StackMapStream::FillInCodeInfo(MemoryRegion region) { DCHECK_EQ(0u, current_entry_.dex_pc) << "EndStackMapEntry not called after BeginStackMapEntry"; DCHECK_NE(0u, needed_size_) << "PrepareForFillIn not called before FillIn"; @@ -345,7 +366,7 @@ void StackMapStream::FillIn(MemoryRegion region) { InvokeInfo invoke_info(code_info.GetInvokeInfo(encoding, invoke_info_idx)); invoke_info.SetNativePcCodeOffset(encoding.invoke_info.encoding, entry.native_pc_code_offset); invoke_info.SetInvokeType(encoding.invoke_info.encoding, entry.invoke_type); - invoke_info.SetMethodIndex(encoding.invoke_info.encoding, entry.dex_method_index); + invoke_info.SetMethodIndexIdx(encoding.invoke_info.encoding, entry.dex_method_index_idx); ++invoke_info_idx; } @@ -364,7 +385,7 @@ void StackMapStream::FillIn(MemoryRegion region) { for (size_t depth = 0; depth < entry.inlining_depth; ++depth) { InlineInfoEntry inline_entry = inline_infos_[depth + entry.inline_infos_start_index]; if (inline_entry.method != nullptr) { - inline_info.SetMethodIndexAtDepth( + inline_info.SetMethodIndexIdxAtDepth( encoding.inline_info.encoding, depth, High32Bits(reinterpret_cast<uintptr_t>(inline_entry.method))); @@ -373,9 +394,9 @@ void StackMapStream::FillIn(MemoryRegion region) { depth, Low32Bits(reinterpret_cast<uintptr_t>(inline_entry.method))); } else { - inline_info.SetMethodIndexAtDepth(encoding.inline_info.encoding, - depth, - inline_entry.method_index); + inline_info.SetMethodIndexIdxAtDepth(encoding.inline_info.encoding, + depth, + inline_entry.dex_method_index_idx); inline_info.SetExtraDataAtDepth(encoding.inline_info.encoding, depth, 1); } inline_info.SetDexPcAtDepth(encoding.inline_info.encoding, depth, inline_entry.dex_pc); @@ -533,6 +554,29 @@ size_t StackMapStream::PrepareRegisterMasks() { return dedupe.size(); } +void StackMapStream::PrepareMethodIndices() { + CHECK(method_indices_.empty()); + method_indices_.resize(stack_maps_.size() + inline_infos_.size()); + ArenaUnorderedMap<uint32_t, size_t> dedupe(allocator_->Adapter(kArenaAllocStackMapStream)); + for (StackMapEntry& stack_map : stack_maps_) { + const size_t index = dedupe.size(); + const uint32_t method_index = stack_map.dex_method_index; + if (method_index != DexFile::kDexNoIndex) { + stack_map.dex_method_index_idx = dedupe.emplace(method_index, index).first->second; + method_indices_[index] = method_index; + } + } + for (InlineInfoEntry& inline_info : inline_infos_) { + const size_t index = dedupe.size(); + const uint32_t method_index = inline_info.method_index; + CHECK_NE(method_index, DexFile::kDexNoIndex); + inline_info.dex_method_index_idx = dedupe.emplace(method_index, index).first->second; + method_indices_[index] = method_index; + } + method_indices_.resize(dedupe.size()); +} + + size_t StackMapStream::PrepareStackMasks(size_t entry_size_in_bits) { // Preallocate memory since we do not want it to move (the dedup map will point into it). const size_t byte_entry_size = RoundUp(entry_size_in_bits, kBitsPerByte) / kBitsPerByte; @@ -590,7 +634,8 @@ void StackMapStream::CheckCodeInfo(MemoryRegion region) const { DCHECK_EQ(invoke_info.GetNativePcOffset(encoding.invoke_info.encoding, instruction_set_), entry.native_pc_code_offset.Uint32Value(instruction_set_)); DCHECK_EQ(invoke_info.GetInvokeType(encoding.invoke_info.encoding), entry.invoke_type); - DCHECK_EQ(invoke_info.GetMethodIndex(encoding.invoke_info.encoding), entry.dex_method_index); + DCHECK_EQ(invoke_info.GetMethodIndexIdx(encoding.invoke_info.encoding), + entry.dex_method_index_idx); invoke_info_index++; } CheckDexRegisterMap(code_info, @@ -615,8 +660,10 @@ void StackMapStream::CheckCodeInfo(MemoryRegion region) const { DCHECK_EQ(inline_info.GetArtMethodAtDepth(encoding.inline_info.encoding, d), inline_entry.method); } else { - DCHECK_EQ(inline_info.GetMethodIndexAtDepth(encoding.inline_info.encoding, d), - inline_entry.method_index); + const size_t method_index_idx = + inline_info.GetMethodIndexIdxAtDepth(encoding.inline_info.encoding, d); + DCHECK_EQ(method_index_idx, inline_entry.dex_method_index_idx); + DCHECK_EQ(method_indices_[method_index_idx], inline_entry.method_index); } CheckDexRegisterMap(code_info, @@ -633,4 +680,9 @@ void StackMapStream::CheckCodeInfo(MemoryRegion region) const { } } +size_t StackMapStream::ComputeMethodInfoSize() const { + DCHECK_NE(0u, needed_size_) << "PrepareForFillIn not called before " << __FUNCTION__; + return MethodInfo::ComputeSize(method_indices_.size()); +} + } // namespace art diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h index 4225a875b9..e6471e1bc5 100644 --- a/compiler/optimizing/stack_map_stream.h +++ b/compiler/optimizing/stack_map_stream.h @@ -22,6 +22,7 @@ #include "base/hash_map.h" #include "base/value_object.h" #include "memory_region.h" +#include "method_info.h" #include "nodes.h" #include "stack_map.h" @@ -70,6 +71,7 @@ class StackMapStream : public ValueObject { inline_infos_(allocator->Adapter(kArenaAllocStackMapStream)), stack_masks_(allocator->Adapter(kArenaAllocStackMapStream)), register_masks_(allocator->Adapter(kArenaAllocStackMapStream)), + method_indices_(allocator->Adapter(kArenaAllocStackMapStream)), dex_register_entries_(allocator->Adapter(kArenaAllocStackMapStream)), stack_mask_max_(-1), dex_pc_max_(0), @@ -120,6 +122,7 @@ class StackMapStream : public ValueObject { size_t dex_register_map_index; InvokeType invoke_type; uint32_t dex_method_index; + uint32_t dex_method_index_idx; // Index into dex method index table. }; struct InlineInfoEntry { @@ -128,6 +131,7 @@ class StackMapStream : public ValueObject { uint32_t method_index; DexRegisterMapEntry dex_register_entry; size_t dex_register_map_index; + uint32_t dex_method_index_idx; // Index into the dex method index table. }; void BeginStackMapEntry(uint32_t dex_pc, @@ -164,7 +168,10 @@ class StackMapStream : public ValueObject { // Prepares the stream to fill in a memory region. Must be called before FillIn. // Returns the size (in bytes) needed to store this stream. size_t PrepareForFillIn(); - void FillIn(MemoryRegion region); + void FillInCodeInfo(MemoryRegion region); + void FillInMethodInfo(MemoryRegion region); + + size_t ComputeMethodInfoSize() const; private: size_t ComputeDexRegisterLocationCatalogSize() const; @@ -180,6 +187,9 @@ class StackMapStream : public ValueObject { // Returns the number of unique register masks. size_t PrepareRegisterMasks(); + // Prepare and deduplicate method indices. + void PrepareMethodIndices(); + // Deduplicate entry if possible and return the corresponding index into dex_register_entries_ // array. If entry is not a duplicate, a new entry is added to dex_register_entries_. size_t AddDexRegisterMapEntry(const DexRegisterMapEntry& entry); @@ -232,6 +242,7 @@ class StackMapStream : public ValueObject { ArenaVector<InlineInfoEntry> inline_infos_; ArenaVector<uint8_t> stack_masks_; ArenaVector<uint32_t> register_masks_; + ArenaVector<uint32_t> method_indices_; ArenaVector<DexRegisterMapEntry> dex_register_entries_; int stack_mask_max_; uint32_t dex_pc_max_; diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc index 330f7f28b6..a842c6e452 100644 --- a/compiler/optimizing/stack_map_test.cc +++ b/compiler/optimizing/stack_map_test.cc @@ -60,7 +60,7 @@ TEST(StackMapTest, Test1) { size_t size = stream.PrepareForFillIn(); void* memory = arena.Alloc(size, kArenaAllocMisc); MemoryRegion region(memory, size); - stream.FillIn(region); + stream.FillInCodeInfo(region); CodeInfo code_info(region); CodeInfoEncoding encoding = code_info.ExtractEncoding(); @@ -173,7 +173,7 @@ TEST(StackMapTest, Test2) { size_t size = stream.PrepareForFillIn(); void* memory = arena.Alloc(size, kArenaAllocMisc); MemoryRegion region(memory, size); - stream.FillIn(region); + stream.FillInCodeInfo(region); CodeInfo code_info(region); CodeInfoEncoding encoding = code_info.ExtractEncoding(); @@ -433,7 +433,7 @@ TEST(StackMapTest, TestDeduplicateInlineInfoDexRegisterMap) { size_t size = stream.PrepareForFillIn(); void* memory = arena.Alloc(size, kArenaAllocMisc); MemoryRegion region(memory, size); - stream.FillIn(region); + stream.FillInCodeInfo(region); CodeInfo code_info(region); CodeInfoEncoding encoding = code_info.ExtractEncoding(); @@ -519,7 +519,7 @@ TEST(StackMapTest, TestNonLiveDexRegisters) { size_t size = stream.PrepareForFillIn(); void* memory = arena.Alloc(size, kArenaAllocMisc); MemoryRegion region(memory, size); - stream.FillIn(region); + stream.FillInCodeInfo(region); CodeInfo code_info(region); CodeInfoEncoding encoding = code_info.ExtractEncoding(); @@ -611,7 +611,7 @@ TEST(StackMapTest, DexRegisterMapOffsetOverflow) { size_t size = stream.PrepareForFillIn(); void* memory = arena.Alloc(size, kArenaAllocMisc); MemoryRegion region(memory, size); - stream.FillIn(region); + stream.FillInCodeInfo(region); CodeInfo code_info(region); CodeInfoEncoding encoding = code_info.ExtractEncoding(); @@ -672,7 +672,7 @@ TEST(StackMapTest, TestShareDexRegisterMap) { size_t size = stream.PrepareForFillIn(); void* memory = arena.Alloc(size, kArenaAllocMisc); MemoryRegion region(memory, size); - stream.FillIn(region); + stream.FillInCodeInfo(region); CodeInfo ci(region); CodeInfoEncoding encoding = ci.ExtractEncoding(); @@ -721,7 +721,7 @@ TEST(StackMapTest, TestNoDexRegisterMap) { size_t size = stream.PrepareForFillIn(); void* memory = arena.Alloc(size, kArenaAllocMisc); MemoryRegion region(memory, size); - stream.FillIn(region); + stream.FillInCodeInfo(region); CodeInfo code_info(region); CodeInfoEncoding encoding = code_info.ExtractEncoding(); @@ -823,7 +823,7 @@ TEST(StackMapTest, InlineTest) { size_t size = stream.PrepareForFillIn(); void* memory = arena.Alloc(size, kArenaAllocMisc); MemoryRegion region(memory, size); - stream.FillIn(region); + stream.FillInCodeInfo(region); CodeInfo ci(region); CodeInfoEncoding encoding = ci.ExtractEncoding(); @@ -950,7 +950,7 @@ TEST(StackMapTest, TestDeduplicateStackMask) { size_t size = stream.PrepareForFillIn(); void* memory = arena.Alloc(size, kArenaAllocMisc); MemoryRegion region(memory, size); - stream.FillIn(region); + stream.FillInCodeInfo(region); CodeInfo code_info(region); CodeInfoEncoding encoding = code_info.ExtractEncoding(); @@ -979,11 +979,16 @@ TEST(StackMapTest, TestInvokeInfo) { stream.AddInvoke(kDirect, 65535); stream.EndStackMapEntry(); - const size_t size = stream.PrepareForFillIn(); - MemoryRegion region(arena.Alloc(size, kArenaAllocMisc), size); - stream.FillIn(region); + const size_t code_info_size = stream.PrepareForFillIn(); + MemoryRegion code_info_region(arena.Alloc(code_info_size, kArenaAllocMisc), code_info_size); + stream.FillInCodeInfo(code_info_region); - CodeInfo code_info(region); + const size_t method_info_size = stream.ComputeMethodInfoSize(); + MemoryRegion method_info_region(arena.Alloc(method_info_size, kArenaAllocMisc), method_info_size); + stream.FillInMethodInfo(method_info_region); + + CodeInfo code_info(code_info_region); + MethodInfo method_info(method_info_region.begin()); CodeInfoEncoding encoding = code_info.ExtractEncoding(); ASSERT_EQ(3u, code_info.GetNumberOfStackMaps(encoding)); @@ -996,13 +1001,13 @@ TEST(StackMapTest, TestInvokeInfo) { EXPECT_TRUE(invoke2.IsValid()); EXPECT_TRUE(invoke3.IsValid()); EXPECT_EQ(invoke1.GetInvokeType(encoding.invoke_info.encoding), kSuper); - EXPECT_EQ(invoke1.GetMethodIndex(encoding.invoke_info.encoding), 1u); + EXPECT_EQ(invoke1.GetMethodIndex(encoding.invoke_info.encoding, method_info), 1u); EXPECT_EQ(invoke1.GetNativePcOffset(encoding.invoke_info.encoding, kRuntimeISA), 4u); EXPECT_EQ(invoke2.GetInvokeType(encoding.invoke_info.encoding), kStatic); - EXPECT_EQ(invoke2.GetMethodIndex(encoding.invoke_info.encoding), 3u); + EXPECT_EQ(invoke2.GetMethodIndex(encoding.invoke_info.encoding, method_info), 3u); EXPECT_EQ(invoke2.GetNativePcOffset(encoding.invoke_info.encoding, kRuntimeISA), 8u); EXPECT_EQ(invoke3.GetInvokeType(encoding.invoke_info.encoding), kDirect); - EXPECT_EQ(invoke3.GetMethodIndex(encoding.invoke_info.encoding), 65535u); + EXPECT_EQ(invoke3.GetMethodIndex(encoding.invoke_info.encoding, method_info), 65535u); EXPECT_EQ(invoke3.GetNativePcOffset(encoding.invoke_info.encoding, kRuntimeISA), 16u); } diff --git a/compiler/utils/arm/assembler_arm_vixl.cc b/compiler/utils/arm/assembler_arm_vixl.cc index e5eef37b7b..6afc3ddecb 100644 --- a/compiler/utils/arm/assembler_arm_vixl.cc +++ b/compiler/utils/arm/assembler_arm_vixl.cc @@ -230,6 +230,7 @@ void ArmVIXLAssembler::StoreToOffset(StoreOperandType type, if (!CanHoldStoreOffsetThumb(type, offset)) { CHECK_NE(base.GetCode(), kIpCode); if ((reg.GetCode() != kIpCode) && + (!vixl_masm_.GetScratchRegisterList()->IsEmpty()) && ((type != kStoreWordPair) || (reg.GetCode() + 1 != kIpCode))) { tmp_reg = temps.Acquire(); } else { diff --git a/compiler/utils/assembler_test.h b/compiler/utils/assembler_test.h index 5c4875951b..d265a44092 100644 --- a/compiler/utils/assembler_test.h +++ b/compiler/utils/assembler_test.h @@ -42,7 +42,10 @@ enum class RegisterView { // private kUseQuaternaryName, }; -template<typename Ass, typename Reg, typename FPReg, typename Imm> +// For use in the template as the default type to get a nonvector registers version. +struct NoVectorRegs {}; + +template<typename Ass, typename Reg, typename FPReg, typename Imm, typename VecReg = NoVectorRegs> class AssemblerTest : public testing::Test { public: Ass* GetAssembler() { @@ -146,7 +149,8 @@ class AssemblerTest : public testing::Test { std::string (AssemblerTest::*GetName1)(const Reg1&), std::string (AssemblerTest::*GetName2)(const Reg2&), const std::string& fmt, - int bias = 0) { + int bias = 0, + int multiplier = 1) { std::string str; std::vector<int64_t> imms = CreateImmediateValuesBits(abs(imm_bits), (imm_bits > 0)); @@ -154,7 +158,7 @@ class AssemblerTest : public testing::Test { for (auto reg2 : reg2_registers) { for (int64_t imm : imms) { ImmType new_imm = CreateImmediate(imm); - (assembler_.get()->*f)(*reg1, *reg2, new_imm + bias); + (assembler_.get()->*f)(*reg1, *reg2, new_imm * multiplier + bias); std::string base = fmt; std::string reg1_string = (this->*GetName1)(*reg1); @@ -172,7 +176,7 @@ class AssemblerTest : public testing::Test { size_t imm_index = base.find(IMM_TOKEN); if (imm_index != std::string::npos) { std::ostringstream sreg; - sreg << imm + bias; + sreg << imm * multiplier + bias; std::string imm_string = sreg.str(); base.replace(imm_index, ConstexprStrLen(IMM_TOKEN), imm_string); } @@ -538,6 +542,69 @@ class AssemblerTest : public testing::Test { return str; } + std::string RepeatVV(void (Ass::*f)(VecReg, VecReg), const std::string& fmt) { + return RepeatTemplatedRegisters<VecReg, VecReg>(f, + GetVectorRegisters(), + GetVectorRegisters(), + &AssemblerTest::GetVecRegName, + &AssemblerTest::GetVecRegName, + fmt); + } + + std::string RepeatVVV(void (Ass::*f)(VecReg, VecReg, VecReg), const std::string& fmt) { + return RepeatTemplatedRegisters<VecReg, VecReg, VecReg>(f, + GetVectorRegisters(), + GetVectorRegisters(), + GetVectorRegisters(), + &AssemblerTest::GetVecRegName, + &AssemblerTest::GetVecRegName, + &AssemblerTest::GetVecRegName, + fmt); + } + + std::string RepeatVR(void (Ass::*f)(VecReg, Reg), const std::string& fmt) { + return RepeatTemplatedRegisters<VecReg, Reg>( + f, + GetVectorRegisters(), + GetRegisters(), + &AssemblerTest::GetVecRegName, + &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>, + fmt); + } + + template <typename ImmType> + std::string RepeatVRIb(void (Ass::*f)(VecReg, Reg, ImmType), + int imm_bits, + const std::string& fmt, + int bias = 0, + int multiplier = 1) { + return RepeatTemplatedRegistersImmBits<VecReg, Reg, ImmType>( + f, + imm_bits, + GetVectorRegisters(), + GetRegisters(), + &AssemblerTest::GetVecRegName, + &AssemblerTest::GetRegName<RegisterView::kUsePrimaryName>, + fmt, + bias, + multiplier); + } + + template <typename ImmType> + std::string RepeatVVIb(void (Ass::*f)(VecReg, VecReg, ImmType), + int imm_bits, + const std::string& fmt, + int bias = 0) { + return RepeatTemplatedRegistersImmBits<VecReg, VecReg, ImmType>(f, + imm_bits, + GetVectorRegisters(), + GetVectorRegisters(), + &AssemblerTest::GetVecRegName, + &AssemblerTest::GetVecRegName, + fmt, + bias); + } + // This is intended to be run as a test. bool CheckTools() { return test_helper_->CheckTools(); @@ -552,6 +619,11 @@ class AssemblerTest : public testing::Test { UNREACHABLE(); } + virtual std::vector<VecReg*> GetVectorRegisters() { + UNIMPLEMENTED(FATAL) << "Architecture does not support vector registers"; + UNREACHABLE(); + } + // Secondary register names are the secondary view on registers, e.g., 32b on 64b systems. virtual std::string GetSecondaryRegisterName(const Reg& reg ATTRIBUTE_UNUSED) { UNIMPLEMENTED(FATAL) << "Architecture does not support secondary registers"; @@ -971,6 +1043,12 @@ class AssemblerTest : public testing::Test { return sreg.str(); } + std::string GetVecRegName(const VecReg& reg) { + std::ostringstream sreg; + sreg << reg; + return sreg.str(); + } + // If the assembly file needs a header, return it in a sub-class. virtual const char* GetAssemblyHeader() { return nullptr; diff --git a/compiler/utils/mips64/assembler_mips64.cc b/compiler/utils/mips64/assembler_mips64.cc index 39eb5893d8..4e7f635246 100644 --- a/compiler/utils/mips64/assembler_mips64.cc +++ b/compiler/utils/mips64/assembler_mips64.cc @@ -184,6 +184,106 @@ void Mips64Assembler::EmitFI(int opcode, int fmt, FpuRegister ft, uint16_t imm) Emit(encoding); } +void Mips64Assembler::EmitMsa3R(int operation, + int df, + VectorRegister wt, + VectorRegister ws, + VectorRegister wd, + int minor_opcode) { + CHECK_NE(wt, kNoVectorRegister); + CHECK_NE(ws, kNoVectorRegister); + CHECK_NE(wd, kNoVectorRegister); + uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift | + operation << kMsaOperationShift | + df << kDfShift | + static_cast<uint32_t>(wt) << kWtShift | + static_cast<uint32_t>(ws) << kWsShift | + static_cast<uint32_t>(wd) << kWdShift | + minor_opcode; + Emit(encoding); +} + +void Mips64Assembler::EmitMsaBIT(int operation, + int df_m, + VectorRegister ws, + VectorRegister wd, + int minor_opcode) { + CHECK_NE(ws, kNoVectorRegister); + CHECK_NE(wd, kNoVectorRegister); + uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift | + operation << kMsaOperationShift | + df_m << kDfMShift | + static_cast<uint32_t>(ws) << kWsShift | + static_cast<uint32_t>(wd) << kWdShift | + minor_opcode; + Emit(encoding); +} + +void Mips64Assembler::EmitMsaELM(int operation, + int df_n, + VectorRegister ws, + VectorRegister wd, + int minor_opcode) { + CHECK_NE(ws, kNoVectorRegister); + CHECK_NE(wd, kNoVectorRegister); + uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift | + operation << kMsaELMOperationShift | + df_n << kDfNShift | + static_cast<uint32_t>(ws) << kWsShift | + static_cast<uint32_t>(wd) << kWdShift | + minor_opcode; + Emit(encoding); +} + +void Mips64Assembler::EmitMsaMI10(int s10, + GpuRegister rs, + VectorRegister wd, + int minor_opcode, + int df) { + CHECK_NE(rs, kNoGpuRegister); + CHECK_NE(wd, kNoVectorRegister); + CHECK(IsUint<10>(s10)) << s10; + uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift | + s10 << kS10Shift | + static_cast<uint32_t>(rs) << kWsShift | + static_cast<uint32_t>(wd) << kWdShift | + minor_opcode << kS10MinorShift | + df; + Emit(encoding); +} + +void Mips64Assembler::EmitMsa2R(int operation, + int df, + VectorRegister ws, + VectorRegister wd, + int minor_opcode) { + CHECK_NE(ws, kNoVectorRegister); + CHECK_NE(wd, kNoVectorRegister); + uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift | + operation << kMsa2ROperationShift | + df << kDf2RShift | + static_cast<uint32_t>(ws) << kWsShift | + static_cast<uint32_t>(wd) << kWdShift | + minor_opcode; + Emit(encoding); +} + +void Mips64Assembler::EmitMsa2RF(int operation, + int df, + VectorRegister ws, + VectorRegister wd, + int minor_opcode) { + CHECK_NE(ws, kNoVectorRegister); + CHECK_NE(wd, kNoVectorRegister); + uint32_t encoding = static_cast<uint32_t>(kMsaMajorOpcode) << kOpcodeShift | + operation << kMsa2RFOperationShift | + df << kDf2RShift | + static_cast<uint32_t>(ws) << kWsShift | + static_cast<uint32_t>(wd) << kWdShift | + minor_opcode; + Emit(encoding); +} + void Mips64Assembler::Addu(GpuRegister rd, GpuRegister rs, GpuRegister rt) { EmitR(0, rs, rt, rd, 0, 0x21); } @@ -1080,6 +1180,378 @@ void Mips64Assembler::Not(GpuRegister rd, GpuRegister rs) { Nor(rd, rs, ZERO); } +// TODO: Check for MSA presence in Mips64InstructionSetFeatures for each MSA instruction. + +void Mips64Assembler::AndV(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x1e); +} + +void Mips64Assembler::OrV(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x0, 0x1, wt, ws, wd, 0x1e); +} + +void Mips64Assembler::NorV(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x0, 0x2, wt, ws, wd, 0x1e); +} + +void Mips64Assembler::XorV(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x0, 0x3, wt, ws, wd, 0x1e); +} + +void Mips64Assembler::AddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x0, 0x0, wt, ws, wd, 0xe); +} + +void Mips64Assembler::AddvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x0, 0x1, wt, ws, wd, 0xe); +} + +void Mips64Assembler::AddvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x0, 0x2, wt, ws, wd, 0xe); +} + +void Mips64Assembler::AddvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x0, 0x3, wt, ws, wd, 0xe); +} + +void Mips64Assembler::SubvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x1, 0x0, wt, ws, wd, 0xe); +} + +void Mips64Assembler::SubvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x1, 0x1, wt, ws, wd, 0xe); +} + +void Mips64Assembler::SubvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x1, 0x2, wt, ws, wd, 0xe); +} + +void Mips64Assembler::SubvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x1, 0x3, wt, ws, wd, 0xe); +} + +void Mips64Assembler::MulvB(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x12); +} + +void Mips64Assembler::MulvH(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x0, 0x1, wt, ws, wd, 0x12); +} + +void Mips64Assembler::MulvW(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x0, 0x2, wt, ws, wd, 0x12); +} + +void Mips64Assembler::MulvD(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x0, 0x3, wt, ws, wd, 0x12); +} + +void Mips64Assembler::Div_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x4, 0x0, wt, ws, wd, 0x12); +} + +void Mips64Assembler::Div_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x4, 0x1, wt, ws, wd, 0x12); +} + +void Mips64Assembler::Div_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x4, 0x2, wt, ws, wd, 0x12); +} + +void Mips64Assembler::Div_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x4, 0x3, wt, ws, wd, 0x12); +} + +void Mips64Assembler::Div_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x5, 0x0, wt, ws, wd, 0x12); +} + +void Mips64Assembler::Div_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x5, 0x1, wt, ws, wd, 0x12); +} + +void Mips64Assembler::Div_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x5, 0x2, wt, ws, wd, 0x12); +} + +void Mips64Assembler::Div_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x5, 0x3, wt, ws, wd, 0x12); +} + +void Mips64Assembler::Mod_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x6, 0x0, wt, ws, wd, 0x12); +} + +void Mips64Assembler::Mod_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x6, 0x1, wt, ws, wd, 0x12); +} + +void Mips64Assembler::Mod_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x6, 0x2, wt, ws, wd, 0x12); +} + +void Mips64Assembler::Mod_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x6, 0x3, wt, ws, wd, 0x12); +} + +void Mips64Assembler::Mod_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x7, 0x0, wt, ws, wd, 0x12); +} + +void Mips64Assembler::Mod_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x7, 0x1, wt, ws, wd, 0x12); +} + +void Mips64Assembler::Mod_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x7, 0x2, wt, ws, wd, 0x12); +} + +void Mips64Assembler::Mod_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x7, 0x3, wt, ws, wd, 0x12); +} + +void Mips64Assembler::FaddW(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x0, 0x0, wt, ws, wd, 0x1b); +} + +void Mips64Assembler::FaddD(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x0, 0x1, wt, ws, wd, 0x1b); +} + +void Mips64Assembler::FsubW(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x0, 0x2, wt, ws, wd, 0x1b); +} + +void Mips64Assembler::FsubD(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x0, 0x3, wt, ws, wd, 0x1b); +} + +void Mips64Assembler::FmulW(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x1, 0x0, wt, ws, wd, 0x1b); +} + +void Mips64Assembler::FmulD(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x1, 0x1, wt, ws, wd, 0x1b); +} + +void Mips64Assembler::FdivW(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x1, 0x2, wt, ws, wd, 0x1b); +} + +void Mips64Assembler::FdivD(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x1, 0x3, wt, ws, wd, 0x1b); +} + +void Mips64Assembler::Ffint_sW(VectorRegister wd, VectorRegister ws) { + EmitMsa2RF(0x19e, 0x0, ws, wd, 0x1e); +} + +void Mips64Assembler::Ffint_sD(VectorRegister wd, VectorRegister ws) { + EmitMsa2RF(0x19e, 0x1, ws, wd, 0x1e); +} + +void Mips64Assembler::Ftint_sW(VectorRegister wd, VectorRegister ws) { + EmitMsa2RF(0x19c, 0x0, ws, wd, 0x1e); +} + +void Mips64Assembler::Ftint_sD(VectorRegister wd, VectorRegister ws) { + EmitMsa2RF(0x19c, 0x1, ws, wd, 0x1e); +} + +void Mips64Assembler::SllB(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x0, 0x0, wt, ws, wd, 0xd); +} + +void Mips64Assembler::SllH(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x0, 0x1, wt, ws, wd, 0xd); +} + +void Mips64Assembler::SllW(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x0, 0x2, wt, ws, wd, 0xd); +} + +void Mips64Assembler::SllD(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x0, 0x3, wt, ws, wd, 0xd); +} + +void Mips64Assembler::SraB(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x1, 0x0, wt, ws, wd, 0xd); +} + +void Mips64Assembler::SraH(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x1, 0x1, wt, ws, wd, 0xd); +} + +void Mips64Assembler::SraW(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x1, 0x2, wt, ws, wd, 0xd); +} + +void Mips64Assembler::SraD(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x1, 0x3, wt, ws, wd, 0xd); +} + +void Mips64Assembler::SrlB(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x2, 0x0, wt, ws, wd, 0xd); +} + +void Mips64Assembler::SrlH(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x2, 0x1, wt, ws, wd, 0xd); +} + +void Mips64Assembler::SrlW(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x2, 0x2, wt, ws, wd, 0xd); +} + +void Mips64Assembler::SrlD(VectorRegister wd, VectorRegister ws, VectorRegister wt) { + EmitMsa3R(0x2, 0x3, wt, ws, wd, 0xd); +} + +void Mips64Assembler::SlliB(VectorRegister wd, VectorRegister ws, int shamt3) { + CHECK(IsUint<3>(shamt3)) << shamt3; + EmitMsaBIT(0x0, shamt3 | kMsaDfMByteMask, ws, wd, 0x9); +} + +void Mips64Assembler::SlliH(VectorRegister wd, VectorRegister ws, int shamt4) { + CHECK(IsUint<4>(shamt4)) << shamt4; + EmitMsaBIT(0x0, shamt4 | kMsaDfMHalfwordMask, ws, wd, 0x9); +} + +void Mips64Assembler::SlliW(VectorRegister wd, VectorRegister ws, int shamt5) { + CHECK(IsUint<5>(shamt5)) << shamt5; + EmitMsaBIT(0x0, shamt5 | kMsaDfMWordMask, ws, wd, 0x9); +} + +void Mips64Assembler::SlliD(VectorRegister wd, VectorRegister ws, int shamt6) { + CHECK(IsUint<6>(shamt6)) << shamt6; + EmitMsaBIT(0x0, shamt6 | kMsaDfMDoublewordMask, ws, wd, 0x9); +} + +void Mips64Assembler::SraiB(VectorRegister wd, VectorRegister ws, int shamt3) { + CHECK(IsUint<3>(shamt3)) << shamt3; + EmitMsaBIT(0x1, shamt3 | kMsaDfMByteMask, ws, wd, 0x9); +} + +void Mips64Assembler::SraiH(VectorRegister wd, VectorRegister ws, int shamt4) { + CHECK(IsUint<4>(shamt4)) << shamt4; + EmitMsaBIT(0x1, shamt4 | kMsaDfMHalfwordMask, ws, wd, 0x9); +} + +void Mips64Assembler::SraiW(VectorRegister wd, VectorRegister ws, int shamt5) { + CHECK(IsUint<5>(shamt5)) << shamt5; + EmitMsaBIT(0x1, shamt5 | kMsaDfMWordMask, ws, wd, 0x9); +} + +void Mips64Assembler::SraiD(VectorRegister wd, VectorRegister ws, int shamt6) { + CHECK(IsUint<6>(shamt6)) << shamt6; + EmitMsaBIT(0x1, shamt6 | kMsaDfMDoublewordMask, ws, wd, 0x9); +} + +void Mips64Assembler::SrliB(VectorRegister wd, VectorRegister ws, int shamt3) { + CHECK(IsUint<3>(shamt3)) << shamt3; + EmitMsaBIT(0x2, shamt3 | kMsaDfMByteMask, ws, wd, 0x9); +} + +void Mips64Assembler::SrliH(VectorRegister wd, VectorRegister ws, int shamt4) { + CHECK(IsUint<4>(shamt4)) << shamt4; + EmitMsaBIT(0x2, shamt4 | kMsaDfMHalfwordMask, ws, wd, 0x9); +} + +void Mips64Assembler::SrliW(VectorRegister wd, VectorRegister ws, int shamt5) { + CHECK(IsUint<5>(shamt5)) << shamt5; + EmitMsaBIT(0x2, shamt5 | kMsaDfMWordMask, ws, wd, 0x9); +} + +void Mips64Assembler::SrliD(VectorRegister wd, VectorRegister ws, int shamt6) { + CHECK(IsUint<6>(shamt6)) << shamt6; + EmitMsaBIT(0x2, shamt6 | kMsaDfMDoublewordMask, ws, wd, 0x9); +} + +void Mips64Assembler::MoveV(VectorRegister wd, VectorRegister ws) { + EmitMsaBIT(0x1, 0x3e, ws, wd, 0x19); +} + +void Mips64Assembler::SplatiB(VectorRegister wd, VectorRegister ws, int n4) { + CHECK(IsUint<4>(n4)) << n4; + EmitMsaELM(0x1, n4 | kMsaDfNByteMask, ws, wd, 0x19); +} + +void Mips64Assembler::SplatiH(VectorRegister wd, VectorRegister ws, int n3) { + CHECK(IsUint<3>(n3)) << n3; + EmitMsaELM(0x1, n3 | kMsaDfNHalfwordMask, ws, wd, 0x19); +} + +void Mips64Assembler::SplatiW(VectorRegister wd, VectorRegister ws, int n2) { + CHECK(IsUint<2>(n2)) << n2; + EmitMsaELM(0x1, n2 | kMsaDfNWordMask, ws, wd, 0x19); +} + +void Mips64Assembler::SplatiD(VectorRegister wd, VectorRegister ws, int n1) { + CHECK(IsUint<1>(n1)) << n1; + EmitMsaELM(0x1, n1 | kMsaDfNDoublewordMask, ws, wd, 0x19); +} + +void Mips64Assembler::FillB(VectorRegister wd, GpuRegister rs) { + EmitMsa2R(0xc0, 0x0, static_cast<VectorRegister>(rs), wd, 0x1e); +} + +void Mips64Assembler::FillH(VectorRegister wd, GpuRegister rs) { + EmitMsa2R(0xc0, 0x1, static_cast<VectorRegister>(rs), wd, 0x1e); +} + +void Mips64Assembler::FillW(VectorRegister wd, GpuRegister rs) { + EmitMsa2R(0xc0, 0x2, static_cast<VectorRegister>(rs), wd, 0x1e); +} + +void Mips64Assembler::FillD(VectorRegister wd, GpuRegister rs) { + EmitMsa2R(0xc0, 0x3, static_cast<VectorRegister>(rs), wd, 0x1e); +} + +void Mips64Assembler::LdB(VectorRegister wd, GpuRegister rs, int offset) { + CHECK(IsInt<10>(offset)) << offset; + EmitMsaMI10(offset & kMsaS10Mask, rs, wd, 0x8, 0x0); +} + +void Mips64Assembler::LdH(VectorRegister wd, GpuRegister rs, int offset) { + CHECK(IsInt<11>(offset)) << offset; + CHECK_ALIGNED(offset, kMips64HalfwordSize); + EmitMsaMI10((offset >> TIMES_2) & kMsaS10Mask, rs, wd, 0x8, 0x1); +} + +void Mips64Assembler::LdW(VectorRegister wd, GpuRegister rs, int offset) { + CHECK(IsInt<12>(offset)) << offset; + CHECK_ALIGNED(offset, kMips64WordSize); + EmitMsaMI10((offset >> TIMES_4) & kMsaS10Mask, rs, wd, 0x8, 0x2); +} + +void Mips64Assembler::LdD(VectorRegister wd, GpuRegister rs, int offset) { + CHECK(IsInt<13>(offset)) << offset; + CHECK_ALIGNED(offset, kMips64DoublewordSize); + EmitMsaMI10((offset >> TIMES_8) & kMsaS10Mask, rs, wd, 0x8, 0x3); +} + +void Mips64Assembler::StB(VectorRegister wd, GpuRegister rs, int offset) { + CHECK(IsInt<10>(offset)) << offset; + EmitMsaMI10(offset & kMsaS10Mask, rs, wd, 0x9, 0x0); +} + +void Mips64Assembler::StH(VectorRegister wd, GpuRegister rs, int offset) { + CHECK(IsInt<11>(offset)) << offset; + CHECK_ALIGNED(offset, kMips64HalfwordSize); + EmitMsaMI10((offset >> TIMES_2) & kMsaS10Mask, rs, wd, 0x9, 0x1); +} + +void Mips64Assembler::StW(VectorRegister wd, GpuRegister rs, int offset) { + CHECK(IsInt<12>(offset)) << offset; + CHECK_ALIGNED(offset, kMips64WordSize); + EmitMsaMI10((offset >> TIMES_4) & kMsaS10Mask, rs, wd, 0x9, 0x2); +} + +void Mips64Assembler::StD(VectorRegister wd, GpuRegister rs, int offset) { + CHECK(IsInt<13>(offset)) << offset; + CHECK_ALIGNED(offset, kMips64DoublewordSize); + EmitMsaMI10((offset >> TIMES_8) & kMsaS10Mask, rs, wd, 0x9, 0x3); +} + void Mips64Assembler::LoadConst32(GpuRegister rd, int32_t value) { TemplateLoadConst32(this, rd, value); } @@ -2020,6 +2492,91 @@ void Mips64Assembler::Bc1nez(FpuRegister ft, Mips64Label* label) { Bcond(label, kCondT, static_cast<GpuRegister>(ft), ZERO); } +void Mips64Assembler::AdjustBaseAndOffset(GpuRegister& base, + int32_t& offset, + bool is_doubleword) { + // This method is used to adjust the base register and offset pair + // for a load/store when the offset doesn't fit into int16_t. + // It is assumed that `base + offset` is sufficiently aligned for memory + // operands that are machine word in size or smaller. For doubleword-sized + // operands it's assumed that `base` is a multiple of 8, while `offset` + // may be a multiple of 4 (e.g. 4-byte-aligned long and double arguments + // and spilled variables on the stack accessed relative to the stack + // pointer register). + // We preserve the "alignment" of `offset` by adjusting it by a multiple of 8. + CHECK_NE(base, AT); // Must not overwrite the register `base` while loading `offset`. + + bool doubleword_aligned = IsAligned<kMips64DoublewordSize>(offset); + bool two_accesses = is_doubleword && !doubleword_aligned; + + // IsInt<16> must be passed a signed value, hence the static cast below. + if (IsInt<16>(offset) && + (!two_accesses || IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)))) { + // Nothing to do: `offset` (and, if needed, `offset + 4`) fits into int16_t. + return; + } + + // Remember the "(mis)alignment" of `offset`, it will be checked at the end. + uint32_t misalignment = offset & (kMips64DoublewordSize - 1); + + // First, see if `offset` can be represented as a sum of two 16-bit signed + // offsets. This can save an instruction. + // To simplify matters, only do this for a symmetric range of offsets from + // about -64KB to about +64KB, allowing further addition of 4 when accessing + // 64-bit variables with two 32-bit accesses. + constexpr int32_t kMinOffsetForSimpleAdjustment = 0x7ff8; // Max int16_t that's a multiple of 8. + constexpr int32_t kMaxOffsetForSimpleAdjustment = 2 * kMinOffsetForSimpleAdjustment; + + if (0 <= offset && offset <= kMaxOffsetForSimpleAdjustment) { + Daddiu(AT, base, kMinOffsetForSimpleAdjustment); + offset -= kMinOffsetForSimpleAdjustment; + } else if (-kMaxOffsetForSimpleAdjustment <= offset && offset < 0) { + Daddiu(AT, base, -kMinOffsetForSimpleAdjustment); + offset += kMinOffsetForSimpleAdjustment; + } else { + // In more complex cases take advantage of the daui instruction, e.g.: + // daui AT, base, offset_high + // [dahi AT, 1] // When `offset` is close to +2GB. + // lw reg_lo, offset_low(AT) + // [lw reg_hi, (offset_low+4)(AT)] // If misaligned 64-bit load. + // or when offset_low+4 overflows int16_t: + // daui AT, base, offset_high + // daddiu AT, AT, 8 + // lw reg_lo, (offset_low-8)(AT) + // lw reg_hi, (offset_low-4)(AT) + int16_t offset_low = Low16Bits(offset); + int32_t offset_low32 = offset_low; + int16_t offset_high = High16Bits(offset); + bool increment_hi16 = offset_low < 0; + bool overflow_hi16 = false; + + if (increment_hi16) { + offset_high++; + overflow_hi16 = (offset_high == -32768); + } + Daui(AT, base, offset_high); + + if (overflow_hi16) { + Dahi(AT, 1); + } + + if (two_accesses && !IsInt<16>(static_cast<int32_t>(offset_low32 + kMips64WordSize))) { + // Avoid overflow in the 16-bit offset of the load/store instruction when adding 4. + Daddiu(AT, AT, kMips64DoublewordSize); + offset_low32 -= kMips64DoublewordSize; + } + + offset = offset_low32; + } + base = AT; + + CHECK(IsInt<16>(offset)); + if (two_accesses) { + CHECK(IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize))); + } + CHECK_EQ(misalignment, offset & (kMips64DoublewordSize - 1)); +} + void Mips64Assembler::LoadFromOffset(LoadOperandType type, GpuRegister reg, GpuRegister base, diff --git a/compiler/utils/mips64/assembler_mips64.h b/compiler/utils/mips64/assembler_mips64.h index 8bbe862d19..f42c1626df 100644 --- a/compiler/utils/mips64/assembler_mips64.h +++ b/compiler/utils/mips64/assembler_mips64.h @@ -266,6 +266,7 @@ void TemplateLoadConst64(Asm* a, Rtype rd, Vtype value) { } } +static constexpr size_t kMips64HalfwordSize = 2; static constexpr size_t kMips64WordSize = 4; static constexpr size_t kMips64DoublewordSize = 8; @@ -644,6 +645,101 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer void Clear(GpuRegister rd); void Not(GpuRegister rd, GpuRegister rs); + // MSA instructions. + void AndV(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void OrV(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void NorV(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void XorV(VectorRegister wd, VectorRegister ws, VectorRegister wt); + + void AddvB(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void AddvH(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void AddvW(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void AddvD(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void SubvB(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void SubvH(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void SubvW(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void SubvD(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void MulvB(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void MulvH(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void MulvW(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void MulvD(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void Div_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void Div_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void Div_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void Div_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void Div_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void Div_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void Div_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void Div_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void Mod_sB(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void Mod_sH(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void Mod_sW(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void Mod_sD(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void Mod_uB(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void Mod_uH(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void Mod_uW(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void Mod_uD(VectorRegister wd, VectorRegister ws, VectorRegister wt); + + void FaddW(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void FaddD(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void FsubW(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void FsubD(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void FmulW(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void FmulD(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void FdivW(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void FdivD(VectorRegister wd, VectorRegister ws, VectorRegister wt); + + void Ffint_sW(VectorRegister wd, VectorRegister ws); + void Ffint_sD(VectorRegister wd, VectorRegister ws); + void Ftint_sW(VectorRegister wd, VectorRegister ws); + void Ftint_sD(VectorRegister wd, VectorRegister ws); + + void SllB(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void SllH(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void SllW(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void SllD(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void SraB(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void SraH(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void SraW(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void SraD(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void SrlB(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void SrlH(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void SrlW(VectorRegister wd, VectorRegister ws, VectorRegister wt); + void SrlD(VectorRegister wd, VectorRegister ws, VectorRegister wt); + + // Immediate shift instructions, where shamtN denotes shift amount (must be between 0 and 2^N-1). + void SlliB(VectorRegister wd, VectorRegister ws, int shamt3); + void SlliH(VectorRegister wd, VectorRegister ws, int shamt4); + void SlliW(VectorRegister wd, VectorRegister ws, int shamt5); + void SlliD(VectorRegister wd, VectorRegister ws, int shamt6); + void SraiB(VectorRegister wd, VectorRegister ws, int shamt3); + void SraiH(VectorRegister wd, VectorRegister ws, int shamt4); + void SraiW(VectorRegister wd, VectorRegister ws, int shamt5); + void SraiD(VectorRegister wd, VectorRegister ws, int shamt6); + void SrliB(VectorRegister wd, VectorRegister ws, int shamt3); + void SrliH(VectorRegister wd, VectorRegister ws, int shamt4); + void SrliW(VectorRegister wd, VectorRegister ws, int shamt5); + void SrliD(VectorRegister wd, VectorRegister ws, int shamt6); + + void MoveV(VectorRegister wd, VectorRegister ws); + void SplatiB(VectorRegister wd, VectorRegister ws, int n4); + void SplatiH(VectorRegister wd, VectorRegister ws, int n3); + void SplatiW(VectorRegister wd, VectorRegister ws, int n2); + void SplatiD(VectorRegister wd, VectorRegister ws, int n1); + void FillB(VectorRegister wd, GpuRegister rs); + void FillH(VectorRegister wd, GpuRegister rs); + void FillW(VectorRegister wd, GpuRegister rs); + void FillD(VectorRegister wd, GpuRegister rs); + + void LdB(VectorRegister wd, GpuRegister rs, int offset); + void LdH(VectorRegister wd, GpuRegister rs, int offset); + void LdW(VectorRegister wd, GpuRegister rs, int offset); + void LdD(VectorRegister wd, GpuRegister rs, int offset); + void StB(VectorRegister wd, GpuRegister rs, int offset); + void StH(VectorRegister wd, GpuRegister rs, int offset); + void StW(VectorRegister wd, GpuRegister rs, int offset); + void StD(VectorRegister wd, GpuRegister rs, int offset); + // Higher level composite instructions. int InstrCountForLoadReplicatedConst32(int64_t); void LoadConst32(GpuRegister rd, int32_t value); @@ -772,6 +868,7 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer void Bc1nez(FpuRegister ft, Mips64Label* label); void EmitLoad(ManagedRegister m_dst, GpuRegister src_register, int32_t src_offset, size_t size); + void AdjustBaseAndOffset(GpuRegister& base, int32_t& offset, bool is_doubleword); private: // This will be used as an argument for loads/stores @@ -782,19 +879,85 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer public: template <typename ImplicitNullChecker = NoImplicitNullChecker> + void StoreConstToOffset(StoreOperandType type, + int64_t value, + GpuRegister base, + int32_t offset, + GpuRegister temp, + ImplicitNullChecker null_checker = NoImplicitNullChecker()) { + // We permit `base` and `temp` to coincide (however, we check that neither is AT), + // in which case the `base` register may be overwritten in the process. + CHECK_NE(temp, AT); // Must not use AT as temp, so as not to overwrite the adjusted base. + AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword)); + GpuRegister reg; + // If the adjustment left `base` unchanged and equal to `temp`, we can't use `temp` + // to load and hold the value but we can use AT instead as AT hasn't been used yet. + // Otherwise, `temp` can be used for the value. And if `temp` is the same as the + // original `base` (that is, `base` prior to the adjustment), the original `base` + // register will be overwritten. + if (base == temp) { + temp = AT; + } + + if (type == kStoreDoubleword && IsAligned<kMips64DoublewordSize>(offset)) { + if (value == 0) { + reg = ZERO; + } else { + reg = temp; + LoadConst64(reg, value); + } + Sd(reg, base, offset); + null_checker(); + } else { + uint32_t low = Low32Bits(value); + uint32_t high = High32Bits(value); + if (low == 0) { + reg = ZERO; + } else { + reg = temp; + LoadConst32(reg, low); + } + switch (type) { + case kStoreByte: + Sb(reg, base, offset); + break; + case kStoreHalfword: + Sh(reg, base, offset); + break; + case kStoreWord: + Sw(reg, base, offset); + break; + case kStoreDoubleword: + // not aligned to kMips64DoublewordSize + CHECK_ALIGNED(offset, kMips64WordSize); + Sw(reg, base, offset); + null_checker(); + if (high == 0) { + reg = ZERO; + } else { + reg = temp; + if (high != low) { + LoadConst32(reg, high); + } + } + Sw(reg, base, offset + kMips64WordSize); + break; + default: + LOG(FATAL) << "UNREACHABLE"; + } + if (type != kStoreDoubleword) { + null_checker(); + } + } + } + + template <typename ImplicitNullChecker = NoImplicitNullChecker> void LoadFromOffset(LoadOperandType type, GpuRegister reg, GpuRegister base, int32_t offset, ImplicitNullChecker null_checker = NoImplicitNullChecker()) { - if (!IsInt<16>(offset) || - (type == kLoadDoubleword && !IsAligned<kMips64DoublewordSize>(offset) && - !IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)))) { - LoadConst32(AT, offset & ~(kMips64DoublewordSize - 1)); - Daddu(AT, AT, base); - base = AT; - offset &= (kMips64DoublewordSize - 1); - } + AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kLoadDoubleword)); switch (type) { case kLoadSignedByte: @@ -841,14 +1004,7 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer GpuRegister base, int32_t offset, ImplicitNullChecker null_checker = NoImplicitNullChecker()) { - if (!IsInt<16>(offset) || - (type == kLoadDoubleword && !IsAligned<kMips64DoublewordSize>(offset) && - !IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)))) { - LoadConst32(AT, offset & ~(kMips64DoublewordSize - 1)); - Daddu(AT, AT, base); - base = AT; - offset &= (kMips64DoublewordSize - 1); - } + AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kLoadDoubleword)); switch (type) { case kLoadWord: @@ -879,14 +1035,10 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer GpuRegister base, int32_t offset, ImplicitNullChecker null_checker = NoImplicitNullChecker()) { - if (!IsInt<16>(offset) || - (type == kStoreDoubleword && !IsAligned<kMips64DoublewordSize>(offset) && - !IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)))) { - LoadConst32(AT, offset & ~(kMips64DoublewordSize - 1)); - Daddu(AT, AT, base); - base = AT; - offset &= (kMips64DoublewordSize - 1); - } + // Must not use AT as `reg`, so as not to overwrite the value being stored + // with the adjusted `base`. + CHECK_NE(reg, AT); + AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword)); switch (type) { case kStoreByte: @@ -925,14 +1077,7 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer GpuRegister base, int32_t offset, ImplicitNullChecker null_checker = NoImplicitNullChecker()) { - if (!IsInt<16>(offset) || - (type == kStoreDoubleword && !IsAligned<kMips64DoublewordSize>(offset) && - !IsInt<16>(static_cast<int32_t>(offset + kMips64WordSize)))) { - LoadConst32(AT, offset & ~(kMips64DoublewordSize - 1)); - Daddu(AT, AT, base); - base = AT; - offset &= (kMips64DoublewordSize - 1); - } + AdjustBaseAndOffset(base, offset, /* is_doubleword */ (type == kStoreDoubleword)); switch (type) { case kStoreWord: @@ -1300,6 +1445,17 @@ class Mips64Assembler FINAL : public Assembler, public JNIMacroAssembler<Pointer void EmitFR(int opcode, int fmt, FpuRegister ft, FpuRegister fs, FpuRegister fd, int funct); void EmitFI(int opcode, int fmt, FpuRegister rt, uint16_t imm); void EmitBcondc(BranchCondition cond, GpuRegister rs, GpuRegister rt, uint32_t imm16_21); + void EmitMsa3R(int operation, + int df, + VectorRegister wt, + VectorRegister ws, + VectorRegister wd, + int minor_opcode); + void EmitMsaBIT(int operation, int df_m, VectorRegister ws, VectorRegister wd, int minor_opcode); + void EmitMsaELM(int operation, int df_n, VectorRegister ws, VectorRegister wd, int minor_opcode); + void EmitMsaMI10(int s10, GpuRegister rs, VectorRegister wd, int minor_opcode, int df); + void EmitMsa2R(int operation, int df, VectorRegister ws, VectorRegister wd, int minor_opcode); + void EmitMsa2RF(int operation, int df, VectorRegister ws, VectorRegister wd, int minor_opcode); void Buncond(Mips64Label* label); void Bcond(Mips64Label* label, diff --git a/compiler/utils/mips64/assembler_mips64_test.cc b/compiler/utils/mips64/assembler_mips64_test.cc index 96a02c46d7..12660ce85d 100644 --- a/compiler/utils/mips64/assembler_mips64_test.cc +++ b/compiler/utils/mips64/assembler_mips64_test.cc @@ -37,12 +37,14 @@ struct MIPS64CpuRegisterCompare { class AssemblerMIPS64Test : public AssemblerTest<mips64::Mips64Assembler, mips64::GpuRegister, mips64::FpuRegister, - uint32_t> { + uint32_t, + mips64::VectorRegister> { public: typedef AssemblerTest<mips64::Mips64Assembler, mips64::GpuRegister, mips64::FpuRegister, - uint32_t> Base; + uint32_t, + mips64::VectorRegister> Base; protected: // Get the typically used name for this architecture, e.g., aarch64, x86-64, ... @@ -60,7 +62,7 @@ class AssemblerMIPS64Test : public AssemblerTest<mips64::Mips64Assembler, // (and MIPS32R6) with the GNU assembler don't have correct final offsets in PC-relative // branches in the .text section and so they require a relocation pass (there's a relocation // section, .rela.text, that has the needed info to fix up the branches). - return " -march=mips64r6 -Wa,--no-warn -Wl,-Ttext=0 -Wl,-e0 -nostdlib"; + return " -march=mips64r6 -mmsa -Wa,--no-warn -Wl,-Ttext=0 -Wl,-e0 -nostdlib"; } void Pad(std::vector<uint8_t>& data) OVERRIDE { @@ -176,6 +178,39 @@ class AssemblerMIPS64Test : public AssemblerTest<mips64::Mips64Assembler, fp_registers_.push_back(new mips64::FpuRegister(mips64::F29)); fp_registers_.push_back(new mips64::FpuRegister(mips64::F30)); fp_registers_.push_back(new mips64::FpuRegister(mips64::F31)); + + vec_registers_.push_back(new mips64::VectorRegister(mips64::W0)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W1)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W2)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W3)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W4)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W5)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W6)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W7)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W8)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W9)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W10)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W11)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W12)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W13)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W14)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W15)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W16)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W17)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W18)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W19)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W20)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W21)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W22)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W23)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W24)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W25)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W26)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W27)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W28)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W29)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W30)); + vec_registers_.push_back(new mips64::VectorRegister(mips64::W31)); } } @@ -183,6 +218,7 @@ class AssemblerMIPS64Test : public AssemblerTest<mips64::Mips64Assembler, AssemblerTest::TearDown(); STLDeleteElements(®isters_); STLDeleteElements(&fp_registers_); + STLDeleteElements(&vec_registers_); } std::vector<mips64::GpuRegister*> GetRegisters() OVERRIDE { @@ -193,6 +229,10 @@ class AssemblerMIPS64Test : public AssemblerTest<mips64::Mips64Assembler, return fp_registers_; } + std::vector<mips64::VectorRegister*> GetVectorRegisters() OVERRIDE { + return vec_registers_; + } + uint32_t CreateImmediate(int64_t imm_value) OVERRIDE { return imm_value; } @@ -272,6 +312,7 @@ class AssemblerMIPS64Test : public AssemblerTest<mips64::Mips64Assembler, std::map<mips64::GpuRegister, std::string, MIPS64CpuRegisterCompare> secondary_register_names_; std::vector<mips64::FpuRegister*> fp_registers_; + std::vector<mips64::VectorRegister*> vec_registers_; }; @@ -1560,6 +1601,10 @@ TEST_F(AssemblerMIPS64Test, LoadFromOffset) { __ LoadFromOffset(mips64::kLoadSignedByte, mips64::A0, mips64::A1, -256); __ LoadFromOffset(mips64::kLoadSignedByte, mips64::A0, mips64::A1, -32768); __ LoadFromOffset(mips64::kLoadSignedByte, mips64::A0, mips64::A1, 0xABCDEF00); + __ LoadFromOffset(mips64::kLoadSignedByte, mips64::A0, mips64::A1, 0x7FFFFFFE); + __ LoadFromOffset(mips64::kLoadSignedByte, mips64::A0, mips64::A1, 0x7FFFFFFF); + __ LoadFromOffset(mips64::kLoadSignedByte, mips64::A0, mips64::A1, 0x80000000); + __ LoadFromOffset(mips64::kLoadSignedByte, mips64::A0, mips64::A1, 0x80000001); __ LoadFromOffset(mips64::kLoadUnsignedByte, mips64::A0, mips64::A0, 0); __ LoadFromOffset(mips64::kLoadUnsignedByte, mips64::A0, mips64::A1, 0); @@ -1574,6 +1619,10 @@ TEST_F(AssemblerMIPS64Test, LoadFromOffset) { __ LoadFromOffset(mips64::kLoadUnsignedByte, mips64::A0, mips64::A1, -256); __ LoadFromOffset(mips64::kLoadUnsignedByte, mips64::A0, mips64::A1, -32768); __ LoadFromOffset(mips64::kLoadUnsignedByte, mips64::A0, mips64::A1, 0xABCDEF00); + __ LoadFromOffset(mips64::kLoadUnsignedByte, mips64::A0, mips64::A1, 0x7FFFFFFE); + __ LoadFromOffset(mips64::kLoadUnsignedByte, mips64::A0, mips64::A1, 0x7FFFFFFF); + __ LoadFromOffset(mips64::kLoadUnsignedByte, mips64::A0, mips64::A1, 0x80000000); + __ LoadFromOffset(mips64::kLoadUnsignedByte, mips64::A0, mips64::A1, 0x80000001); __ LoadFromOffset(mips64::kLoadSignedHalfword, mips64::A0, mips64::A0, 0); __ LoadFromOffset(mips64::kLoadSignedHalfword, mips64::A0, mips64::A1, 0); @@ -1588,6 +1637,10 @@ TEST_F(AssemblerMIPS64Test, LoadFromOffset) { __ LoadFromOffset(mips64::kLoadSignedHalfword, mips64::A0, mips64::A1, -256); __ LoadFromOffset(mips64::kLoadSignedHalfword, mips64::A0, mips64::A1, -32768); __ LoadFromOffset(mips64::kLoadSignedHalfword, mips64::A0, mips64::A1, 0xABCDEF00); + __ LoadFromOffset(mips64::kLoadSignedHalfword, mips64::A0, mips64::A1, 0x7FFFFFFC); + __ LoadFromOffset(mips64::kLoadSignedHalfword, mips64::A0, mips64::A1, 0x7FFFFFFE); + __ LoadFromOffset(mips64::kLoadSignedHalfword, mips64::A0, mips64::A1, 0x80000000); + __ LoadFromOffset(mips64::kLoadSignedHalfword, mips64::A0, mips64::A1, 0x80000002); __ LoadFromOffset(mips64::kLoadUnsignedHalfword, mips64::A0, mips64::A0, 0); __ LoadFromOffset(mips64::kLoadUnsignedHalfword, mips64::A0, mips64::A1, 0); @@ -1602,6 +1655,10 @@ TEST_F(AssemblerMIPS64Test, LoadFromOffset) { __ LoadFromOffset(mips64::kLoadUnsignedHalfword, mips64::A0, mips64::A1, -256); __ LoadFromOffset(mips64::kLoadUnsignedHalfword, mips64::A0, mips64::A1, -32768); __ LoadFromOffset(mips64::kLoadUnsignedHalfword, mips64::A0, mips64::A1, 0xABCDEF00); + __ LoadFromOffset(mips64::kLoadUnsignedHalfword, mips64::A0, mips64::A1, 0x7FFFFFFC); + __ LoadFromOffset(mips64::kLoadUnsignedHalfword, mips64::A0, mips64::A1, 0x7FFFFFFE); + __ LoadFromOffset(mips64::kLoadUnsignedHalfword, mips64::A0, mips64::A1, 0x80000000); + __ LoadFromOffset(mips64::kLoadUnsignedHalfword, mips64::A0, mips64::A1, 0x80000002); __ LoadFromOffset(mips64::kLoadWord, mips64::A0, mips64::A0, 0); __ LoadFromOffset(mips64::kLoadWord, mips64::A0, mips64::A1, 0); @@ -1616,6 +1673,10 @@ TEST_F(AssemblerMIPS64Test, LoadFromOffset) { __ LoadFromOffset(mips64::kLoadWord, mips64::A0, mips64::A1, -256); __ LoadFromOffset(mips64::kLoadWord, mips64::A0, mips64::A1, -32768); __ LoadFromOffset(mips64::kLoadWord, mips64::A0, mips64::A1, 0xABCDEF00); + __ LoadFromOffset(mips64::kLoadWord, mips64::A0, mips64::A1, 0x7FFFFFF8); + __ LoadFromOffset(mips64::kLoadWord, mips64::A0, mips64::A1, 0x7FFFFFFC); + __ LoadFromOffset(mips64::kLoadWord, mips64::A0, mips64::A1, 0x80000000); + __ LoadFromOffset(mips64::kLoadWord, mips64::A0, mips64::A1, 0x80000004); __ LoadFromOffset(mips64::kLoadUnsignedWord, mips64::A0, mips64::A0, 0); __ LoadFromOffset(mips64::kLoadUnsignedWord, mips64::A0, mips64::A1, 0); @@ -1630,6 +1691,10 @@ TEST_F(AssemblerMIPS64Test, LoadFromOffset) { __ LoadFromOffset(mips64::kLoadUnsignedWord, mips64::A0, mips64::A1, -256); __ LoadFromOffset(mips64::kLoadUnsignedWord, mips64::A0, mips64::A1, -32768); __ LoadFromOffset(mips64::kLoadUnsignedWord, mips64::A0, mips64::A1, 0xABCDEF00); + __ LoadFromOffset(mips64::kLoadUnsignedWord, mips64::A0, mips64::A1, 0x7FFFFFF8); + __ LoadFromOffset(mips64::kLoadUnsignedWord, mips64::A0, mips64::A1, 0x7FFFFFFC); + __ LoadFromOffset(mips64::kLoadUnsignedWord, mips64::A0, mips64::A1, 0x80000000); + __ LoadFromOffset(mips64::kLoadUnsignedWord, mips64::A0, mips64::A1, 0x80000004); __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A0, 0); __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, 0); @@ -1640,10 +1705,15 @@ TEST_F(AssemblerMIPS64Test, LoadFromOffset) { __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, 0x8000); __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, 0x8004); __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, 0x10000); + __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, 0x27FFC); __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, 0x12345678); __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, -256); __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, -32768); __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, 0xABCDEF00); + __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, 0x7FFFFFF8); + __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, 0x7FFFFFFC); + __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, 0x80000000); + __ LoadFromOffset(mips64::kLoadDoubleword, mips64::A0, mips64::A1, 0x80000004); const char* expected = "lb $a0, 0($a0)\n" @@ -1652,25 +1722,28 @@ TEST_F(AssemblerMIPS64Test, LoadFromOffset) { "lb $a0, 256($a1)\n" "lb $a0, 1000($a1)\n" "lb $a0, 0x7FFF($a1)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a1\n" - "lb $a0, 0($at)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a1\n" - "lb $a0, 1($at)\n" - "lui $at, 1\n" - "daddu $at, $at, $a1\n" - "lb $a0, 0($at)\n" - "lui $at, 0x1234\n" - "ori $at, 0x5678\n" - "daddu $at, $at, $a1\n" + "daddiu $at, $a1, 0x7FF8\n" + "lb $a0, 8($at)\n" + "daddiu $at, $a1, 32760\n" + "lb $a0, 9($at)\n" + "daui $at, $a1, 1\n" "lb $a0, 0($at)\n" + "daui $at, $a1, 0x1234\n" + "lb $a0, 0x5678($at)\n" "lb $a0, -256($a1)\n" "lb $a0, -32768($a1)\n" - "lui $at, 0xABCD\n" - "ori $at, 0xEF00\n" - "daddu $at, $at, $a1\n" + "daui $at, $a1, 0xABCE\n" + "lb $a0, -4352($at)\n" + "daui $at, $a1, 32768\n" + "dahi $at, $at, 1\n" + "lb $a0, -2($at)\n" + "daui $at, $a1, 32768\n" + "dahi $at, $at, 1\n" + "lb $a0, -1($at)\n" + "daui $at, $a1, 32768\n" "lb $a0, 0($at)\n" + "daui $at, $a1, 32768\n" + "lb $a0, 1($at)\n" "lbu $a0, 0($a0)\n" "lbu $a0, 0($a1)\n" @@ -1678,25 +1751,28 @@ TEST_F(AssemblerMIPS64Test, LoadFromOffset) { "lbu $a0, 256($a1)\n" "lbu $a0, 1000($a1)\n" "lbu $a0, 0x7FFF($a1)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a1\n" - "lbu $a0, 0($at)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a1\n" - "lbu $a0, 1($at)\n" - "lui $at, 1\n" - "daddu $at, $at, $a1\n" - "lbu $a0, 0($at)\n" - "lui $at, 0x1234\n" - "ori $at, 0x5678\n" - "daddu $at, $at, $a1\n" + "daddiu $at, $a1, 0x7FF8\n" + "lbu $a0, 8($at)\n" + "daddiu $at, $a1, 32760\n" + "lbu $a0, 9($at)\n" + "daui $at, $a1, 1\n" "lbu $a0, 0($at)\n" + "daui $at, $a1, 0x1234\n" + "lbu $a0, 0x5678($at)\n" "lbu $a0, -256($a1)\n" "lbu $a0, -32768($a1)\n" - "lui $at, 0xABCD\n" - "ori $at, 0xEF00\n" - "daddu $at, $at, $a1\n" + "daui $at, $a1, 0xABCE\n" + "lbu $a0, -4352($at)\n" + "daui $at, $a1, 32768\n" + "dahi $at, $at, 1\n" + "lbu $a0, -2($at)\n" + "daui $at, $a1, 32768\n" + "dahi $at, $at, 1\n" + "lbu $a0, -1($at)\n" + "daui $at, $a1, 32768\n" "lbu $a0, 0($at)\n" + "daui $at, $a1, 32768\n" + "lbu $a0, 1($at)\n" "lh $a0, 0($a0)\n" "lh $a0, 0($a1)\n" @@ -1704,25 +1780,28 @@ TEST_F(AssemblerMIPS64Test, LoadFromOffset) { "lh $a0, 256($a1)\n" "lh $a0, 1000($a1)\n" "lh $a0, 0x7FFE($a1)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a1\n" - "lh $a0, 0($at)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a1\n" - "lh $a0, 2($at)\n" - "lui $at, 1\n" - "daddu $at, $at, $a1\n" - "lh $a0, 0($at)\n" - "lui $at, 0x1234\n" - "ori $at, 0x5678\n" - "daddu $at, $at, $a1\n" + "daddiu $at, $a1, 0x7FF8\n" + "lh $a0, 8($at)\n" + "daddiu $at, $a1, 32760\n" + "lh $a0, 10($at)\n" + "daui $at, $a1, 1\n" "lh $a0, 0($at)\n" + "daui $at, $a1, 0x1234\n" + "lh $a0, 0x5678($at)\n" "lh $a0, -256($a1)\n" "lh $a0, -32768($a1)\n" - "lui $at, 0xABCD\n" - "ori $at, 0xEF00\n" - "daddu $at, $at, $a1\n" + "daui $at, $a1, 0xABCE\n" + "lh $a0, -4352($at)\n" + "daui $at, $a1, 32768\n" + "dahi $at, $at, 1\n" + "lh $a0, -4($at)\n" + "daui $at, $a1, 32768\n" + "dahi $at, $at, 1\n" + "lh $a0, -2($at)\n" + "daui $at, $a1, 32768\n" "lh $a0, 0($at)\n" + "daui $at, $a1, 32768\n" + "lh $a0, 2($at)\n" "lhu $a0, 0($a0)\n" "lhu $a0, 0($a1)\n" @@ -1730,25 +1809,28 @@ TEST_F(AssemblerMIPS64Test, LoadFromOffset) { "lhu $a0, 256($a1)\n" "lhu $a0, 1000($a1)\n" "lhu $a0, 0x7FFE($a1)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a1\n" - "lhu $a0, 0($at)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a1\n" - "lhu $a0, 2($at)\n" - "lui $at, 1\n" - "daddu $at, $at, $a1\n" - "lhu $a0, 0($at)\n" - "lui $at, 0x1234\n" - "ori $at, 0x5678\n" - "daddu $at, $at, $a1\n" + "daddiu $at, $a1, 0x7FF8\n" + "lhu $a0, 8($at)\n" + "daddiu $at, $a1, 32760\n" + "lhu $a0, 10($at)\n" + "daui $at, $a1, 1\n" "lhu $a0, 0($at)\n" + "daui $at, $a1, 0x1234\n" + "lhu $a0, 0x5678($at)\n" "lhu $a0, -256($a1)\n" "lhu $a0, -32768($a1)\n" - "lui $at, 0xABCD\n" - "ori $at, 0xEF00\n" - "daddu $at, $at, $a1\n" + "daui $at, $a1, 0xABCE\n" + "lhu $a0, -4352($at)\n" + "daui $at, $a1, 32768\n" + "dahi $at, $at, 1\n" + "lhu $a0, -4($at)\n" + "daui $at, $a1, 32768\n" + "dahi $at, $at, 1\n" + "lhu $a0, -2($at)\n" + "daui $at, $a1, 32768\n" "lhu $a0, 0($at)\n" + "daui $at, $a1, 32768\n" + "lhu $a0, 2($at)\n" "lw $a0, 0($a0)\n" "lw $a0, 0($a1)\n" @@ -1756,25 +1838,28 @@ TEST_F(AssemblerMIPS64Test, LoadFromOffset) { "lw $a0, 256($a1)\n" "lw $a0, 1000($a1)\n" "lw $a0, 0x7FFC($a1)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a1\n" - "lw $a0, 0($at)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a1\n" - "lw $a0, 4($at)\n" - "lui $at, 1\n" - "daddu $at, $at, $a1\n" - "lw $a0, 0($at)\n" - "lui $at, 0x1234\n" - "ori $at, 0x5678\n" - "daddu $at, $at, $a1\n" + "daddiu $at, $a1, 0x7FF8\n" + "lw $a0, 8($at)\n" + "daddiu $at, $a1, 32760\n" + "lw $a0, 12($at)\n" + "daui $at, $a1, 1\n" "lw $a0, 0($at)\n" + "daui $at, $a1, 0x1234\n" + "lw $a0, 0x5678($at)\n" "lw $a0, -256($a1)\n" "lw $a0, -32768($a1)\n" - "lui $at, 0xABCD\n" - "ori $at, 0xEF00\n" - "daddu $at, $at, $a1\n" + "daui $at, $a1, 0xABCE\n" + "lw $a0, -4352($at)\n" + "daui $at, $a1, 32768\n" + "dahi $at, $at, 1\n" + "lw $a0, -8($at)\n" + "daui $at, $a1, 32768\n" + "dahi $at, $at, 1\n" + "lw $a0, -4($at)\n" + "daui $at, $a1, 32768\n" "lw $a0, 0($at)\n" + "daui $at, $a1, 32768\n" + "lw $a0, 4($at)\n" "lwu $a0, 0($a0)\n" "lwu $a0, 0($a1)\n" @@ -1782,59 +1867,73 @@ TEST_F(AssemblerMIPS64Test, LoadFromOffset) { "lwu $a0, 256($a1)\n" "lwu $a0, 1000($a1)\n" "lwu $a0, 0x7FFC($a1)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a1\n" - "lwu $a0, 0($at)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a1\n" - "lwu $a0, 4($at)\n" - "lui $at, 1\n" - "daddu $at, $at, $a1\n" - "lwu $a0, 0($at)\n" - "lui $at, 0x1234\n" - "ori $at, 0x5678\n" - "daddu $at, $at, $a1\n" + "daddiu $at, $a1, 0x7FF8\n" + "lwu $a0, 8($at)\n" + "daddiu $at, $a1, 32760\n" + "lwu $a0, 12($at)\n" + "daui $at, $a1, 1\n" "lwu $a0, 0($at)\n" + "daui $at, $a1, 0x1234\n" + "lwu $a0, 0x5678($at)\n" "lwu $a0, -256($a1)\n" "lwu $a0, -32768($a1)\n" - "lui $at, 0xABCD\n" - "ori $at, 0xEF00\n" - "daddu $at, $at, $a1\n" + "daui $at, $a1, 0xABCE\n" + "lwu $a0, -4352($at)\n" + "daui $at, $a1, 32768\n" + "dahi $at, $at, 1\n" + "lwu $a0, -8($at)\n" + "daui $at, $a1, 32768\n" + "dahi $at, $at, 1\n" + "lwu $a0, -4($at)\n" + "daui $at, $a1, 32768\n" "lwu $a0, 0($at)\n" + "daui $at, $a1, 32768\n" + "lwu $a0, 4($at)\n" "ld $a0, 0($a0)\n" "ld $a0, 0($a1)\n" "lwu $a0, 4($a1)\n" "lwu $t3, 8($a1)\n" - "dins $a0, $t3, 32, 32\n" + "dinsu $a0, $t3, 32, 32\n" "ld $a0, 256($a1)\n" "ld $a0, 1000($a1)\n" - "ori $at, $zero, 0x7FF8\n" - "daddu $at, $at, $a1\n" - "lwu $a0, 4($at)\n" - "lwu $t3, 8($at)\n" - "dins $a0, $t3, 32, 32\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a1\n" - "ld $a0, 0($at)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a1\n" + "daddiu $at, $a1, 32760\n" "lwu $a0, 4($at)\n" "lwu $t3, 8($at)\n" - "dins $a0, $t3, 32, 32\n" - "lui $at, 1\n" - "daddu $at, $at, $a1\n" - "ld $a0, 0($at)\n" - "lui $at, 0x1234\n" - "ori $at, 0x5678\n" - "daddu $at, $at, $a1\n" + "dinsu $a0, $t3, 32, 32\n" + "daddiu $at, $a1, 32760\n" + "ld $a0, 8($at)\n" + "daddiu $at, $a1, 32760\n" + "lwu $a0, 12($at)\n" + "lwu $t3, 16($at)\n" + "dinsu $a0, $t3, 32, 32\n" + "daui $at, $a1, 1\n" "ld $a0, 0($at)\n" + "daui $at, $a1, 2\n" + "daddiu $at, $at, 8\n" + "lwu $a0, 0x7ff4($at)\n" + "lwu $t3, 0x7ff8($at)\n" + "dinsu $a0, $t3, 32, 32\n" + "daui $at, $a1, 0x1234\n" + "ld $a0, 0x5678($at)\n" "ld $a0, -256($a1)\n" "ld $a0, -32768($a1)\n" - "lui $at, 0xABCD\n" - "ori $at, 0xEF00\n" - "daddu $at, $at, $a1\n" - "ld $a0, 0($at)\n"; + "daui $at, $a1, 0xABCE\n" + "ld $a0, -4352($at)\n" + "daui $at, $a1, 32768\n" + "dahi $at, $at, 1\n" + "ld $a0, -8($at)\n" + "daui $at, $a1, 32768\n" + "dahi $at, $at, 1\n" + "lwu $a0, -4($at)\n" + "lwu $t3, 0($at)\n" + "dinsu $a0, $t3, 32, 32\n" + "daui $at, $a1, 32768\n" + "ld $a0, 0($at)\n" + "daui $at, $a1, 32768\n" + "lwu $a0, 4($at)\n" + "lwu $t3, 8($at)\n" + "dinsu $a0, $t3, 32, 32\n"; DriverStr(expected, "LoadFromOffset"); } @@ -1868,57 +1967,42 @@ TEST_F(AssemblerMIPS64Test, LoadFpuFromOffset) { "lwc1 $f0, 4($a0)\n" "lwc1 $f0, 256($a0)\n" "lwc1 $f0, 0x7FFC($a0)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a0\n" - "lwc1 $f0, 0($at)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a0\n" - "lwc1 $f0, 4($at)\n" - "lui $at, 1\n" - "daddu $at, $at, $a0\n" - "lwc1 $f0, 0($at)\n" - "lui $at, 0x1234\n" - "ori $at, 0x5678\n" - "daddu $at, $at, $a0\n" + "daddiu $at, $a0, 32760 # 0x7FF8\n" + "lwc1 $f0, 8($at)\n" + "daddiu $at, $a0, 32760 # 0x7FF8\n" + "lwc1 $f0, 12($at)\n" + "daui $at, $a0, 1\n" "lwc1 $f0, 0($at)\n" + "daui $at, $a0, 4660 # 0x1234\n" + "lwc1 $f0, 22136($at) # 0x5678\n" "lwc1 $f0, -256($a0)\n" "lwc1 $f0, -32768($a0)\n" - "lui $at, 0xABCD\n" - "ori $at, 0xEF00\n" - "daddu $at, $at, $a0\n" - "lwc1 $f0, 0($at)\n" + "daui $at, $a0, 0xABCE\n" + "lwc1 $f0, -0x1100($at) # 0xEF00\n" "ldc1 $f0, 0($a0)\n" "lwc1 $f0, 4($a0)\n" "lw $t3, 8($a0)\n" "mthc1 $t3, $f0\n" "ldc1 $f0, 256($a0)\n" - "ori $at, $zero, 0x7FF8\n" - "daddu $at, $at, $a0\n" + "daddiu $at, $a0, 32760 # 0x7FF8\n" "lwc1 $f0, 4($at)\n" "lw $t3, 8($at)\n" "mthc1 $t3, $f0\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a0\n" - "ldc1 $f0, 0($at)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a0\n" - "lwc1 $f0, 4($at)\n" - "lw $t3, 8($at)\n" + "daddiu $at, $a0, 32760 # 0x7FF8\n" + "ldc1 $f0, 8($at)\n" + "daddiu $at, $a0, 32760 # 0x7FF8\n" + "lwc1 $f0, 12($at)\n" + "lw $t3, 16($at)\n" "mthc1 $t3, $f0\n" - "lui $at, 1\n" - "daddu $at, $at, $a0\n" - "ldc1 $f0, 0($at)\n" - "lui $at, 0x1234\n" - "ori $at, 0x5678\n" - "daddu $at, $at, $a0\n" + "daui $at, $a0, 1\n" "ldc1 $f0, 0($at)\n" + "daui $at, $a0, 4660 # 0x1234\n" + "ldc1 $f0, 22136($at) # 0x5678\n" "ldc1 $f0, -256($a0)\n" "ldc1 $f0, -32768($a0)\n" - "lui $at, 0xABCD\n" - "ori $at, 0xEF00\n" - "daddu $at, $at, $a0\n" - "ldc1 $f0, 0($at)\n"; + "daui $at, $a0, 0xABCE\n" + "ldc1 $f0, -0x1100($at) # 0xEF00\n"; DriverStr(expected, "LoadFpuFromOffset"); } @@ -1978,6 +2062,10 @@ TEST_F(AssemblerMIPS64Test, StoreToOffset) { __ StoreToOffset(mips64::kStoreDoubleword, mips64::A0, mips64::A1, -256); __ StoreToOffset(mips64::kStoreDoubleword, mips64::A0, mips64::A1, -32768); __ StoreToOffset(mips64::kStoreDoubleword, mips64::A0, mips64::A1, 0xABCDEF00); + __ StoreToOffset(mips64::kStoreDoubleword, mips64::A0, mips64::A1, 0x7FFFFFF8); + __ StoreToOffset(mips64::kStoreDoubleword, mips64::A0, mips64::A1, 0x7FFFFFFC); + __ StoreToOffset(mips64::kStoreDoubleword, mips64::A0, mips64::A1, 0x80000000); + __ StoreToOffset(mips64::kStoreDoubleword, mips64::A0, mips64::A1, 0x80000004); const char* expected = "sb $a0, 0($a0)\n" @@ -1986,25 +2074,18 @@ TEST_F(AssemblerMIPS64Test, StoreToOffset) { "sb $a0, 256($a1)\n" "sb $a0, 1000($a1)\n" "sb $a0, 0x7FFF($a1)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a1\n" - "sb $a0, 0($at)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a1\n" - "sb $a0, 1($at)\n" - "lui $at, 1\n" - "daddu $at, $at, $a1\n" - "sb $a0, 0($at)\n" - "lui $at, 0x1234\n" - "ori $at, 0x5678\n" - "daddu $at, $at, $a1\n" + "daddiu $at, $a1, 0x7FF8\n" + "sb $a0, 8($at)\n" + "daddiu $at, $a1, 0x7FF8\n" + "sb $a0, 9($at)\n" + "daui $at, $a1, 1\n" "sb $a0, 0($at)\n" + "daui $at, $a1, 4660 # 0x1234\n" + "sb $a0, 22136($at) # 0x5678\n" "sb $a0, -256($a1)\n" "sb $a0, -32768($a1)\n" - "lui $at, 0xABCD\n" - "ori $at, 0xEF00\n" - "daddu $at, $at, $a1\n" - "sb $a0, 0($at)\n" + "daui $at, $a1, 43982 # 0xABCE\n" + "sb $a0, -4352($at) # 0xEF00\n" "sh $a0, 0($a0)\n" "sh $a0, 0($a1)\n" @@ -2012,25 +2093,18 @@ TEST_F(AssemblerMIPS64Test, StoreToOffset) { "sh $a0, 256($a1)\n" "sh $a0, 1000($a1)\n" "sh $a0, 0x7FFE($a1)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a1\n" - "sh $a0, 0($at)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a1\n" - "sh $a0, 2($at)\n" - "lui $at, 1\n" - "daddu $at, $at, $a1\n" - "sh $a0, 0($at)\n" - "lui $at, 0x1234\n" - "ori $at, 0x5678\n" - "daddu $at, $at, $a1\n" + "daddiu $at, $a1, 0x7FF8\n" + "sh $a0, 8($at)\n" + "daddiu $at, $a1, 0x7FF8\n" + "sh $a0, 10($at)\n" + "daui $at, $a1, 1\n" "sh $a0, 0($at)\n" + "daui $at, $a1, 4660 # 0x1234\n" + "sh $a0, 22136($at) # 0x5678\n" "sh $a0, -256($a1)\n" "sh $a0, -32768($a1)\n" - "lui $at, 0xABCD\n" - "ori $at, 0xEF00\n" - "daddu $at, $at, $a1\n" - "sh $a0, 0($at)\n" + "daui $at, $a1, 43982 # 0xABCE\n" + "sh $a0, -4352($at) # 0xEF00\n" "sw $a0, 0($a0)\n" "sw $a0, 0($a1)\n" @@ -2038,25 +2112,18 @@ TEST_F(AssemblerMIPS64Test, StoreToOffset) { "sw $a0, 256($a1)\n" "sw $a0, 1000($a1)\n" "sw $a0, 0x7FFC($a1)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a1\n" - "sw $a0, 0($at)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a1\n" - "sw $a0, 4($at)\n" - "lui $at, 1\n" - "daddu $at, $at, $a1\n" - "sw $a0, 0($at)\n" - "lui $at, 0x1234\n" - "ori $at, 0x5678\n" - "daddu $at, $at, $a1\n" + "daddiu $at, $a1, 0x7FF8\n" + "sw $a0, 8($at)\n" + "daddiu $at, $a1, 0x7FF8\n" + "sw $a0, 12($at)\n" + "daui $at, $a1, 1\n" "sw $a0, 0($at)\n" + "daui $at, $a1, 4660 # 0x1234\n" + "sw $a0, 22136($at) # 0x5678\n" "sw $a0, -256($a1)\n" "sw $a0, -32768($a1)\n" - "lui $at, 0xABCD\n" - "ori $at, 0xEF00\n" - "daddu $at, $at, $a1\n" - "sw $a0, 0($at)\n" + "daui $at, $a1, 43982 # 0xABCE\n" + "sw $a0, -4352($at) # 0xEF00\n" "sd $a0, 0($a0)\n" "sd $a0, 0($a1)\n" @@ -2065,32 +2132,38 @@ TEST_F(AssemblerMIPS64Test, StoreToOffset) { "sw $t3, 8($a1)\n" "sd $a0, 256($a1)\n" "sd $a0, 1000($a1)\n" - "ori $at, $zero, 0x7FF8\n" - "daddu $at, $at, $a1\n" + "daddiu $at, $a1, 0x7FF8\n" "sw $a0, 4($at)\n" "dsrl32 $t3, $a0, 0\n" "sw $t3, 8($at)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a1\n" - "sd $a0, 0($at)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a1\n" - "sw $a0, 4($at)\n" + "daddiu $at, $a1, 32760 # 0x7FF8\n" + "sd $a0, 8($at)\n" + "daddiu $at, $a1, 32760 # 0x7FF8\n" + "sw $a0, 12($at)\n" "dsrl32 $t3, $a0, 0\n" - "sw $t3, 8($at)\n" - "lui $at, 1\n" - "daddu $at, $at, $a1\n" - "sd $a0, 0($at)\n" - "lui $at, 0x1234\n" - "ori $at, 0x5678\n" - "daddu $at, $at, $a1\n" + "sw $t3, 16($at)\n" + "daui $at, $a1, 1\n" "sd $a0, 0($at)\n" + "daui $at, $a1, 4660 # 0x1234\n" + "sd $a0, 22136($at) # 0x5678\n" "sd $a0, -256($a1)\n" "sd $a0, -32768($a1)\n" - "lui $at, 0xABCD\n" - "ori $at, 0xEF00\n" - "daddu $at, $at, $a1\n" - "sd $a0, 0($at)\n"; + "daui $at, $a1, 0xABCE\n" + "sd $a0, -0x1100($at)\n" + "daui $at, $a1, 0x8000\n" + "dahi $at, $at, 1\n" + "sd $a0, -8($at)\n" + "daui $at, $a1, 0x8000\n" + "dahi $at, $at, 1\n" + "sw $a0, -4($at) # 0xFFFC\n" + "dsrl32 $t3, $a0, 0\n" + "sw $t3, 0($at) # 0x0\n" + "daui $at, $a1, 0x8000\n" + "sd $a0, 0($at) # 0x0\n" + "daui $at, $a1, 0x8000\n" + "sw $a0, 4($at) # 0x4\n" + "dsrl32 $t3, $a0, 0\n" + "sw $t3, 8($at) # 0x8\n"; DriverStr(expected, "StoreToOffset"); } @@ -2124,60 +2197,115 @@ TEST_F(AssemblerMIPS64Test, StoreFpuToOffset) { "swc1 $f0, 4($a0)\n" "swc1 $f0, 256($a0)\n" "swc1 $f0, 0x7FFC($a0)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a0\n" - "swc1 $f0, 0($at)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a0\n" - "swc1 $f0, 4($at)\n" - "lui $at, 1\n" - "daddu $at, $at, $a0\n" - "swc1 $f0, 0($at)\n" - "lui $at, 0x1234\n" - "ori $at, 0x5678\n" - "daddu $at, $at, $a0\n" + "daddiu $at, $a0, 32760 # 0x7FF8\n" + "swc1 $f0, 8($at)\n" + "daddiu $at, $a0, 32760 # 0x7FF8\n" + "swc1 $f0, 12($at)\n" + "daui $at, $a0, 1\n" "swc1 $f0, 0($at)\n" + "daui $at, $a0, 4660 # 0x1234\n" + "swc1 $f0, 22136($at) # 0x5678\n" "swc1 $f0, -256($a0)\n" "swc1 $f0, -32768($a0)\n" - "lui $at, 0xABCD\n" - "ori $at, 0xEF00\n" - "daddu $at, $at, $a0\n" - "swc1 $f0, 0($at)\n" + "daui $at, $a0, 0xABCE\n" + "swc1 $f0, -0x1100($at)\n" "sdc1 $f0, 0($a0)\n" "mfhc1 $t3, $f0\n" "swc1 $f0, 4($a0)\n" "sw $t3, 8($a0)\n" "sdc1 $f0, 256($a0)\n" - "ori $at, $zero, 0x7FF8\n" - "daddu $at, $at, $a0\n" + "daddiu $at, $a0, 32760 # 0x7FF8\n" "mfhc1 $t3, $f0\n" "swc1 $f0, 4($at)\n" "sw $t3, 8($at)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a0\n" - "sdc1 $f0, 0($at)\n" - "ori $at, $zero, 0x8000\n" - "daddu $at, $at, $a0\n" + "daddiu $at, $a0, 32760 # 0x7FF8\n" + "sdc1 $f0, 8($at)\n" + "daddiu $at, $a0, 32760 # 0x7FF8\n" "mfhc1 $t3, $f0\n" - "swc1 $f0, 4($at)\n" - "sw $t3, 8($at)\n" - "lui $at, 1\n" - "daddu $at, $at, $a0\n" - "sdc1 $f0, 0($at)\n" - "lui $at, 0x1234\n" - "ori $at, 0x5678\n" - "daddu $at, $at, $a0\n" + "swc1 $f0, 12($at)\n" + "sw $t3, 16($at)\n" + "daui $at, $a0, 1\n" "sdc1 $f0, 0($at)\n" + "daui $at, $a0, 4660 # 0x1234\n" + "sdc1 $f0, 22136($at) # 0x5678\n" "sdc1 $f0, -256($a0)\n" "sdc1 $f0, -32768($a0)\n" - "lui $at, 0xABCD\n" - "ori $at, 0xEF00\n" - "daddu $at, $at, $a0\n" - "sdc1 $f0, 0($at)\n"; + "daui $at, $a0, 0xABCE\n" + "sdc1 $f0, -0x1100($at)\n"; DriverStr(expected, "StoreFpuToOffset"); } +TEST_F(AssemblerMIPS64Test, StoreConstToOffset) { + __ StoreConstToOffset(mips64::kStoreByte, 0xFF, mips64::A1, +0, mips64::T8); + __ StoreConstToOffset(mips64::kStoreHalfword, 0xFFFF, mips64::A1, +0, mips64::T8); + __ StoreConstToOffset(mips64::kStoreWord, 0x12345678, mips64::A1, +0, mips64::T8); + __ StoreConstToOffset(mips64::kStoreDoubleword, 0x123456789ABCDEF0, mips64::A1, +0, mips64::T8); + + __ StoreConstToOffset(mips64::kStoreByte, 0, mips64::A1, +0, mips64::T8); + __ StoreConstToOffset(mips64::kStoreHalfword, 0, mips64::A1, +0, mips64::T8); + __ StoreConstToOffset(mips64::kStoreWord, 0, mips64::A1, +0, mips64::T8); + __ StoreConstToOffset(mips64::kStoreDoubleword, 0, mips64::A1, +0, mips64::T8); + + __ StoreConstToOffset(mips64::kStoreDoubleword, 0x1234567812345678, mips64::A1, +0, mips64::T8); + __ StoreConstToOffset(mips64::kStoreDoubleword, 0x1234567800000000, mips64::A1, +0, mips64::T8); + __ StoreConstToOffset(mips64::kStoreDoubleword, 0x0000000012345678, mips64::A1, +0, mips64::T8); + + __ StoreConstToOffset(mips64::kStoreWord, 0, mips64::T8, +0, mips64::T8); + __ StoreConstToOffset(mips64::kStoreWord, 0x12345678, mips64::T8, +0, mips64::T8); + + __ StoreConstToOffset(mips64::kStoreWord, 0, mips64::A1, -0xFFF0, mips64::T8); + __ StoreConstToOffset(mips64::kStoreWord, 0x12345678, mips64::A1, +0xFFF0, mips64::T8); + + __ StoreConstToOffset(mips64::kStoreWord, 0, mips64::T8, -0xFFF0, mips64::T8); + __ StoreConstToOffset(mips64::kStoreWord, 0x12345678, mips64::T8, +0xFFF0, mips64::T8); + + const char* expected = + "ori $t8, $zero, 0xFF\n" + "sb $t8, 0($a1)\n" + "ori $t8, $zero, 0xFFFF\n" + "sh $t8, 0($a1)\n" + "lui $t8, 0x1234\n" + "ori $t8, $t8,0x5678\n" + "sw $t8, 0($a1)\n" + "lui $t8, 0x9abc\n" + "ori $t8, $t8,0xdef0\n" + "dahi $t8, $t8, 0x5679\n" + "dati $t8, $t8, 0x1234\n" + "sd $t8, 0($a1)\n" + "sb $zero, 0($a1)\n" + "sh $zero, 0($a1)\n" + "sw $zero, 0($a1)\n" + "sd $zero, 0($a1)\n" + "lui $t8, 0x1234\n" + "ori $t8, $t8,0x5678\n" + "dins $t8, $t8, 0x20, 0x20\n" + "sd $t8, 0($a1)\n" + "lui $t8, 0x246\n" + "ori $t8, $t8, 0x8acf\n" + "dsll32 $t8, $t8, 0x3\n" + "sd $t8, 0($a1)\n" + "lui $t8, 0x1234\n" + "ori $t8, $t8, 0x5678\n" + "sd $t8, 0($a1)\n" + "sw $zero, 0($t8)\n" + "lui $at,0x1234\n" + "ori $at, $at, 0x5678\n" + "sw $at, 0($t8)\n" + "daddiu $at, $a1, -32760 # 0x8008\n" + "sw $zero, -32760($at) # 0x8008\n" + "daddiu $at, $a1, 32760 # 0x7FF8\n" + "lui $t8, 4660 # 0x1234\n" + "ori $t8, $t8, 22136 # 0x5678\n" + "sw $t8, 32760($at) # 0x7FF8\n" + "daddiu $at, $t8, -32760 # 0x8008\n" + "sw $zero, -32760($at) # 0x8008\n" + "daddiu $at, $t8, 32760 # 0x7FF8\n" + "lui $t8, 4660 # 0x1234\n" + "ori $t8, $t8, 22136 # 0x5678\n" + "sw $t8, 32760($at) # 0x7FF8\n"; + DriverStr(expected, "StoreConstToOffset"); +} ////////////////////////////// // Loading/adding Constants // ////////////////////////////// @@ -2374,6 +2502,370 @@ TEST_F(AssemblerMIPS64Test, LoadConst64) { EXPECT_EQ(tester.GetPathsCovered(), art::mips64::kLoadConst64PathAllPaths); } +// MSA instructions. + +TEST_F(AssemblerMIPS64Test, AndV) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::AndV, "and.v ${reg1}, ${reg2}, ${reg3}"), "and.v"); +} + +TEST_F(AssemblerMIPS64Test, OrV) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::OrV, "or.v ${reg1}, ${reg2}, ${reg3}"), "or.v"); +} + +TEST_F(AssemblerMIPS64Test, NorV) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::NorV, "nor.v ${reg1}, ${reg2}, ${reg3}"), "nor.v"); +} + +TEST_F(AssemblerMIPS64Test, XorV) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::XorV, "xor.v ${reg1}, ${reg2}, ${reg3}"), "xor.v"); +} + +TEST_F(AssemblerMIPS64Test, AddvB) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::AddvB, "addv.b ${reg1}, ${reg2}, ${reg3}"), + "addv.b"); +} + +TEST_F(AssemblerMIPS64Test, AddvH) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::AddvH, "addv.h ${reg1}, ${reg2}, ${reg3}"), + "addv.h"); +} + +TEST_F(AssemblerMIPS64Test, AddvW) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::AddvW, "addv.w ${reg1}, ${reg2}, ${reg3}"), + "addv.w"); +} + +TEST_F(AssemblerMIPS64Test, AddvD) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::AddvD, "addv.d ${reg1}, ${reg2}, ${reg3}"), + "addv.d"); +} + +TEST_F(AssemblerMIPS64Test, SubvB) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::SubvB, "subv.b ${reg1}, ${reg2}, ${reg3}"), + "subv.b"); +} + +TEST_F(AssemblerMIPS64Test, SubvH) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::SubvH, "subv.h ${reg1}, ${reg2}, ${reg3}"), + "subv.h"); +} + +TEST_F(AssemblerMIPS64Test, SubvW) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::SubvW, "subv.w ${reg1}, ${reg2}, ${reg3}"), + "subv.w"); +} + +TEST_F(AssemblerMIPS64Test, SubvD) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::SubvD, "subv.d ${reg1}, ${reg2}, ${reg3}"), + "subv.d"); +} + +TEST_F(AssemblerMIPS64Test, MulvB) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::MulvB, "mulv.b ${reg1}, ${reg2}, ${reg3}"), + "mulv.b"); +} + +TEST_F(AssemblerMIPS64Test, MulvH) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::MulvH, "mulv.h ${reg1}, ${reg2}, ${reg3}"), + "mulv.h"); +} + +TEST_F(AssemblerMIPS64Test, MulvW) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::MulvW, "mulv.w ${reg1}, ${reg2}, ${reg3}"), + "mulv.w"); +} + +TEST_F(AssemblerMIPS64Test, MulvD) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::MulvD, "mulv.d ${reg1}, ${reg2}, ${reg3}"), + "mulv.d"); +} + +TEST_F(AssemblerMIPS64Test, Div_sB) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::Div_sB, "div_s.b ${reg1}, ${reg2}, ${reg3}"), + "div_s.b"); +} + +TEST_F(AssemblerMIPS64Test, Div_sH) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::Div_sH, "div_s.h ${reg1}, ${reg2}, ${reg3}"), + "div_s.h"); +} + +TEST_F(AssemblerMIPS64Test, Div_sW) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::Div_sW, "div_s.w ${reg1}, ${reg2}, ${reg3}"), + "div_s.w"); +} + +TEST_F(AssemblerMIPS64Test, Div_sD) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::Div_sD, "div_s.d ${reg1}, ${reg2}, ${reg3}"), + "div_s.d"); +} + +TEST_F(AssemblerMIPS64Test, Div_uB) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::Div_uB, "div_u.b ${reg1}, ${reg2}, ${reg3}"), + "div_u.b"); +} + +TEST_F(AssemblerMIPS64Test, Div_uH) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::Div_uH, "div_u.h ${reg1}, ${reg2}, ${reg3}"), + "div_u.h"); +} + +TEST_F(AssemblerMIPS64Test, Div_uW) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::Div_uW, "div_u.w ${reg1}, ${reg2}, ${reg3}"), + "div_u.w"); +} + +TEST_F(AssemblerMIPS64Test, Div_uD) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::Div_uD, "div_u.d ${reg1}, ${reg2}, ${reg3}"), + "div_u.d"); +} + +TEST_F(AssemblerMIPS64Test, Mod_sB) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::Mod_sB, "mod_s.b ${reg1}, ${reg2}, ${reg3}"), + "mod_s.b"); +} + +TEST_F(AssemblerMIPS64Test, Mod_sH) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::Mod_sH, "mod_s.h ${reg1}, ${reg2}, ${reg3}"), + "mod_s.h"); +} + +TEST_F(AssemblerMIPS64Test, Mod_sW) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::Mod_sW, "mod_s.w ${reg1}, ${reg2}, ${reg3}"), + "mod_s.w"); +} + +TEST_F(AssemblerMIPS64Test, Mod_sD) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::Mod_sD, "mod_s.d ${reg1}, ${reg2}, ${reg3}"), + "mod_s.d"); +} + +TEST_F(AssemblerMIPS64Test, Mod_uB) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::Mod_uB, "mod_u.b ${reg1}, ${reg2}, ${reg3}"), + "mod_u.b"); +} + +TEST_F(AssemblerMIPS64Test, Mod_uH) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::Mod_uH, "mod_u.h ${reg1}, ${reg2}, ${reg3}"), + "mod_u.h"); +} + +TEST_F(AssemblerMIPS64Test, Mod_uW) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::Mod_uW, "mod_u.w ${reg1}, ${reg2}, ${reg3}"), + "mod_u.w"); +} + +TEST_F(AssemblerMIPS64Test, Mod_uD) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::Mod_uD, "mod_u.d ${reg1}, ${reg2}, ${reg3}"), + "mod_u.d"); +} + +TEST_F(AssemblerMIPS64Test, FaddW) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::FaddW, "fadd.w ${reg1}, ${reg2}, ${reg3}"), + "fadd.w"); +} + +TEST_F(AssemblerMIPS64Test, FaddD) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::FaddD, "fadd.d ${reg1}, ${reg2}, ${reg3}"), + "fadd.d"); +} + +TEST_F(AssemblerMIPS64Test, FsubW) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::FsubW, "fsub.w ${reg1}, ${reg2}, ${reg3}"), + "fsub.w"); +} + +TEST_F(AssemblerMIPS64Test, FsubD) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::FsubD, "fsub.d ${reg1}, ${reg2}, ${reg3}"), + "fsub.d"); +} + +TEST_F(AssemblerMIPS64Test, FmulW) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::FmulW, "fmul.w ${reg1}, ${reg2}, ${reg3}"), + "fmul.w"); +} + +TEST_F(AssemblerMIPS64Test, FmulD) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::FmulD, "fmul.d ${reg1}, ${reg2}, ${reg3}"), + "fmul.d"); +} + +TEST_F(AssemblerMIPS64Test, FdivW) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::FdivW, "fdiv.w ${reg1}, ${reg2}, ${reg3}"), + "fdiv.w"); +} + +TEST_F(AssemblerMIPS64Test, FdivD) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::FdivD, "fdiv.d ${reg1}, ${reg2}, ${reg3}"), + "fdiv.d"); +} + +TEST_F(AssemblerMIPS64Test, Ffint_sW) { + DriverStr(RepeatVV(&mips64::Mips64Assembler::Ffint_sW, "ffint_s.w ${reg1}, ${reg2}"), + "ffint_s.w"); +} + +TEST_F(AssemblerMIPS64Test, Ffint_sD) { + DriverStr(RepeatVV(&mips64::Mips64Assembler::Ffint_sD, "ffint_s.d ${reg1}, ${reg2}"), + "ffint_s.d"); +} + +TEST_F(AssemblerMIPS64Test, Ftint_sW) { + DriverStr(RepeatVV(&mips64::Mips64Assembler::Ftint_sW, "ftint_s.w ${reg1}, ${reg2}"), + "ftint_s.w"); +} + +TEST_F(AssemblerMIPS64Test, Ftint_sD) { + DriverStr(RepeatVV(&mips64::Mips64Assembler::Ftint_sD, "ftint_s.d ${reg1}, ${reg2}"), + "ftint_s.d"); +} + +TEST_F(AssemblerMIPS64Test, SllB) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::SllB, "sll.b ${reg1}, ${reg2}, ${reg3}"), "sll.b"); +} + +TEST_F(AssemblerMIPS64Test, SllH) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::SllH, "sll.h ${reg1}, ${reg2}, ${reg3}"), "sll.h"); +} + +TEST_F(AssemblerMIPS64Test, SllW) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::SllW, "sll.w ${reg1}, ${reg2}, ${reg3}"), "sll.w"); +} + +TEST_F(AssemblerMIPS64Test, SllD) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::SllD, "sll.d ${reg1}, ${reg2}, ${reg3}"), "sll.d"); +} + +TEST_F(AssemblerMIPS64Test, SraB) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::SraB, "sra.b ${reg1}, ${reg2}, ${reg3}"), "sra.b"); +} + +TEST_F(AssemblerMIPS64Test, SraH) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::SraH, "sra.h ${reg1}, ${reg2}, ${reg3}"), "sra.h"); +} + +TEST_F(AssemblerMIPS64Test, SraW) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::SraW, "sra.w ${reg1}, ${reg2}, ${reg3}"), "sra.w"); +} + +TEST_F(AssemblerMIPS64Test, SraD) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::SraD, "sra.d ${reg1}, ${reg2}, ${reg3}"), "sra.d"); +} + +TEST_F(AssemblerMIPS64Test, SrlB) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::SrlB, "srl.b ${reg1}, ${reg2}, ${reg3}"), "srl.b"); +} + +TEST_F(AssemblerMIPS64Test, SrlH) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::SrlH, "srl.h ${reg1}, ${reg2}, ${reg3}"), "srl.h"); +} + +TEST_F(AssemblerMIPS64Test, SrlW) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::SrlW, "srl.w ${reg1}, ${reg2}, ${reg3}"), "srl.w"); +} + +TEST_F(AssemblerMIPS64Test, SrlD) { + DriverStr(RepeatVVV(&mips64::Mips64Assembler::SrlD, "srl.d ${reg1}, ${reg2}, ${reg3}"), "srl.d"); +} + +TEST_F(AssemblerMIPS64Test, SlliB) { + DriverStr(RepeatVVIb(&mips64::Mips64Assembler::SlliB, 3, "slli.b ${reg1}, ${reg2}, {imm}"), + "slli.b"); +} + +TEST_F(AssemblerMIPS64Test, SlliH) { + DriverStr(RepeatVVIb(&mips64::Mips64Assembler::SlliH, 4, "slli.h ${reg1}, ${reg2}, {imm}"), + "slli.h"); +} + +TEST_F(AssemblerMIPS64Test, SlliW) { + DriverStr(RepeatVVIb(&mips64::Mips64Assembler::SlliW, 5, "slli.w ${reg1}, ${reg2}, {imm}"), + "slli.w"); +} + +TEST_F(AssemblerMIPS64Test, SlliD) { + DriverStr(RepeatVVIb(&mips64::Mips64Assembler::SlliD, 6, "slli.d ${reg1}, ${reg2}, {imm}"), + "slli.d"); +} + +TEST_F(AssemblerMIPS64Test, MoveV) { + DriverStr(RepeatVV(&mips64::Mips64Assembler::MoveV, "move.v ${reg1}, ${reg2}"), "move.v"); +} + +TEST_F(AssemblerMIPS64Test, SplatiB) { + DriverStr(RepeatVVIb(&mips64::Mips64Assembler::SplatiB, 4, "splati.b ${reg1}, ${reg2}[{imm}]"), + "splati.b"); +} + +TEST_F(AssemblerMIPS64Test, SplatiH) { + DriverStr(RepeatVVIb(&mips64::Mips64Assembler::SplatiH, 3, "splati.h ${reg1}, ${reg2}[{imm}]"), + "splati.h"); +} + +TEST_F(AssemblerMIPS64Test, SplatiW) { + DriverStr(RepeatVVIb(&mips64::Mips64Assembler::SplatiW, 2, "splati.w ${reg1}, ${reg2}[{imm}]"), + "splati.w"); +} + +TEST_F(AssemblerMIPS64Test, SplatiD) { + DriverStr(RepeatVVIb(&mips64::Mips64Assembler::SplatiD, 1, "splati.d ${reg1}, ${reg2}[{imm}]"), + "splati.d"); +} + +TEST_F(AssemblerMIPS64Test, FillB) { + DriverStr(RepeatVR(&mips64::Mips64Assembler::FillB, "fill.b ${reg1}, ${reg2}"), "fill.b"); +} + +TEST_F(AssemblerMIPS64Test, FillH) { + DriverStr(RepeatVR(&mips64::Mips64Assembler::FillH, "fill.h ${reg1}, ${reg2}"), "fill.h"); +} + +TEST_F(AssemblerMIPS64Test, FillW) { + DriverStr(RepeatVR(&mips64::Mips64Assembler::FillW, "fill.w ${reg1}, ${reg2}"), "fill.w"); +} + +TEST_F(AssemblerMIPS64Test, FillD) { + DriverStr(RepeatVR(&mips64::Mips64Assembler::FillD, "fill.d ${reg1}, ${reg2}"), "fill.d"); +} + +TEST_F(AssemblerMIPS64Test, LdB) { + DriverStr(RepeatVRIb(&mips64::Mips64Assembler::LdB, -10, "ld.b ${reg1}, {imm}(${reg2})"), "ld.b"); +} + +TEST_F(AssemblerMIPS64Test, LdH) { + DriverStr(RepeatVRIb(&mips64::Mips64Assembler::LdH, -10, "ld.h ${reg1}, {imm}(${reg2})", 0, 2), + "ld.h"); +} + +TEST_F(AssemblerMIPS64Test, LdW) { + DriverStr(RepeatVRIb(&mips64::Mips64Assembler::LdW, -10, "ld.w ${reg1}, {imm}(${reg2})", 0, 4), + "ld.w"); +} + +TEST_F(AssemblerMIPS64Test, LdD) { + DriverStr(RepeatVRIb(&mips64::Mips64Assembler::LdD, -10, "ld.d ${reg1}, {imm}(${reg2})", 0, 8), + "ld.d"); +} + +TEST_F(AssemblerMIPS64Test, StB) { + DriverStr(RepeatVRIb(&mips64::Mips64Assembler::StB, -10, "st.b ${reg1}, {imm}(${reg2})"), "st.b"); +} + +TEST_F(AssemblerMIPS64Test, StH) { + DriverStr(RepeatVRIb(&mips64::Mips64Assembler::StH, -10, "st.h ${reg1}, {imm}(${reg2})", 0, 2), + "st.h"); +} + +TEST_F(AssemblerMIPS64Test, StW) { + DriverStr(RepeatVRIb(&mips64::Mips64Assembler::StW, -10, "st.w ${reg1}, {imm}(${reg2})", 0, 4), + "st.w"); +} + +TEST_F(AssemblerMIPS64Test, StD) { + DriverStr(RepeatVRIb(&mips64::Mips64Assembler::StD, -10, "st.d ${reg1}, {imm}(${reg2})", 0, 8), + "st.d"); +} + #undef __ } // namespace art diff --git a/compiler/utils/mips64/constants_mips64.h b/compiler/utils/mips64/constants_mips64.h index f57498d34f..5ae9c73589 100644 --- a/compiler/utils/mips64/constants_mips64.h +++ b/compiler/utils/mips64/constants_mips64.h @@ -51,8 +51,35 @@ enum InstructionFields { kFdShift = 6, kFdBits = 5, + kMsaOperationShift = 23, + kMsaELMOperationShift = 22, + kMsa2ROperationShift = 18, + kMsa2RFOperationShift = 17, + kDfShift = 21, + kDfMShift = 16, + kDf2RShift = 16, + kDfNShift = 16, + kWtShift = 16, + kWtBits = 5, + kWsShift = 11, + kWsBits = 5, + kWdShift = 6, + kWdBits = 5, + kS10Shift = 16, + kS10MinorShift = 2, + kBranchOffsetMask = 0x0000ffff, kJumpOffsetMask = 0x03ffffff, + kMsaMajorOpcode = 0x1e, + kMsaDfMByteMask = 0x70, + kMsaDfMHalfwordMask = 0x60, + kMsaDfMWordMask = 0x40, + kMsaDfMDoublewordMask = 0x00, + kMsaDfNByteMask = 0x00, + kMsaDfNHalfwordMask = 0x20, + kMsaDfNWordMask = 0x30, + kMsaDfNDoublewordMask = 0x38, + kMsaS10Mask = 0x3ff, }; enum ScaleFactor { diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc index 6a57f45e42..5307dc09d9 100644 --- a/compiler/utils/x86/assembler_x86.cc +++ b/compiler/utils/x86/assembler_x86.cc @@ -1169,6 +1169,32 @@ void X86Assembler::pand(XmmRegister dst, XmmRegister src) { } +void X86Assembler::andnpd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x55); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::andnps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x0F); + EmitUint8(0x55); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::pandn(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0xDF); + EmitXmmRegisterOperand(dst, src); +} + + void X86Assembler::orpd(XmmRegister dst, XmmRegister src) { AssemblerBuffer::EnsureCapacity ensured(&buffer_); EmitUint8(0x66); @@ -1195,6 +1221,43 @@ void X86Assembler::por(XmmRegister dst, XmmRegister src) { } +void X86Assembler::pcmpeqb(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x74); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::pcmpeqw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x75); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::pcmpeqd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x76); + EmitXmmRegisterOperand(dst, src); +} + + +void X86Assembler::pcmpeqq(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x29); + EmitXmmRegisterOperand(dst, src); +} + + void X86Assembler::shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm) { AssemblerBuffer::EnsureCapacity ensured(&buffer_); EmitUint8(0x66); diff --git a/compiler/utils/x86/assembler_x86.h b/compiler/utils/x86/assembler_x86.h index e3c123ccaf..f52cf16c8b 100644 --- a/compiler/utils/x86/assembler_x86.h +++ b/compiler/utils/x86/assembler_x86.h @@ -487,10 +487,19 @@ class X86Assembler FINAL : public Assembler { void andps(XmmRegister dst, const Address& src); void pand(XmmRegister dst, XmmRegister src); // no addr variant (for now) + void andnpd(XmmRegister dst, XmmRegister src); // no addr variant (for now) + void andnps(XmmRegister dst, XmmRegister src); + void pandn(XmmRegister dst, XmmRegister src); + void orpd(XmmRegister dst, XmmRegister src); // no addr variant (for now) void orps(XmmRegister dst, XmmRegister src); void por(XmmRegister dst, XmmRegister src); + void pcmpeqb(XmmRegister dst, XmmRegister src); + void pcmpeqw(XmmRegister dst, XmmRegister src); + void pcmpeqd(XmmRegister dst, XmmRegister src); + void pcmpeqq(XmmRegister dst, XmmRegister src); + void shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm); void shufps(XmmRegister dst, XmmRegister src, const Immediate& imm); void pshufd(XmmRegister dst, XmmRegister src, const Immediate& imm); diff --git a/compiler/utils/x86/assembler_x86_test.cc b/compiler/utils/x86/assembler_x86_test.cc index 110d0dcd05..23049079e0 100644 --- a/compiler/utils/x86/assembler_x86_test.cc +++ b/compiler/utils/x86/assembler_x86_test.cc @@ -581,6 +581,18 @@ TEST_F(AssemblerX86Test, PAnd) { DriverStr(RepeatFF(&x86::X86Assembler::pand, "pand %{reg2}, %{reg1}"), "pand"); } +TEST_F(AssemblerX86Test, AndnPD) { + DriverStr(RepeatFF(&x86::X86Assembler::andnpd, "andnpd %{reg2}, %{reg1}"), "andnpd"); +} + +TEST_F(AssemblerX86Test, AndnPS) { + DriverStr(RepeatFF(&x86::X86Assembler::andnps, "andnps %{reg2}, %{reg1}"), "andnps"); +} + +TEST_F(AssemblerX86Test, PAndn) { + DriverStr(RepeatFF(&x86::X86Assembler::pandn, "pandn %{reg2}, %{reg1}"), "pandn"); +} + TEST_F(AssemblerX86Test, OrPD) { DriverStr(RepeatFF(&x86::X86Assembler::orpd, "orpd %{reg2}, %{reg1}"), "orpd"); } @@ -593,6 +605,22 @@ TEST_F(AssemblerX86Test, POr) { DriverStr(RepeatFF(&x86::X86Assembler::por, "por %{reg2}, %{reg1}"), "por"); } +TEST_F(AssemblerX86Test, PCmpeqB) { + DriverStr(RepeatFF(&x86::X86Assembler::pcmpeqb, "pcmpeqb %{reg2}, %{reg1}"), "cmpeqb"); +} + +TEST_F(AssemblerX86Test, PCmpeqW) { + DriverStr(RepeatFF(&x86::X86Assembler::pcmpeqw, "pcmpeqw %{reg2}, %{reg1}"), "cmpeqw"); +} + +TEST_F(AssemblerX86Test, PCmpeqD) { + DriverStr(RepeatFF(&x86::X86Assembler::pcmpeqd, "pcmpeqd %{reg2}, %{reg1}"), "cmpeqd"); +} + +TEST_F(AssemblerX86Test, PCmpeqQ) { + DriverStr(RepeatFF(&x86::X86Assembler::pcmpeqq, "pcmpeqq %{reg2}, %{reg1}"), "cmpeqq"); +} + TEST_F(AssemblerX86Test, ShufPS) { DriverStr(RepeatFFI(&x86::X86Assembler::shufps, 1, "shufps ${imm}, %{reg2}, %{reg1}"), "shufps"); } diff --git a/compiler/utils/x86_64/assembler_x86_64.cc b/compiler/utils/x86_64/assembler_x86_64.cc index 688fdcc37d..d20a6965c3 100644 --- a/compiler/utils/x86_64/assembler_x86_64.cc +++ b/compiler/utils/x86_64/assembler_x86_64.cc @@ -1375,6 +1375,32 @@ void X86_64Assembler::pand(XmmRegister dst, XmmRegister src) { EmitXmmRegisterOperand(dst.LowBits(), src); } +void X86_64Assembler::andnpd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x55); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::andnps(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x55); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pandn(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0xDF); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + void X86_64Assembler::orpd(XmmRegister dst, XmmRegister src) { AssemblerBuffer::EnsureCapacity ensured(&buffer_); EmitUint8(0x66); @@ -1401,6 +1427,43 @@ void X86_64Assembler::por(XmmRegister dst, XmmRegister src) { EmitXmmRegisterOperand(dst.LowBits(), src); } +void X86_64Assembler::pcmpeqb(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x74); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pcmpeqw(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x75); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pcmpeqd(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x76); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + +void X86_64Assembler::pcmpeqq(XmmRegister dst, XmmRegister src) { + AssemblerBuffer::EnsureCapacity ensured(&buffer_); + EmitUint8(0x66); + EmitOptionalRex32(dst, src); + EmitUint8(0x0F); + EmitUint8(0x38); + EmitUint8(0x29); + EmitXmmRegisterOperand(dst.LowBits(), src); +} + void X86_64Assembler::shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm) { AssemblerBuffer::EnsureCapacity ensured(&buffer_); EmitUint8(0x66); diff --git a/compiler/utils/x86_64/assembler_x86_64.h b/compiler/utils/x86_64/assembler_x86_64.h index 480e7116eb..08e17e81e5 100644 --- a/compiler/utils/x86_64/assembler_x86_64.h +++ b/compiler/utils/x86_64/assembler_x86_64.h @@ -515,10 +515,19 @@ class X86_64Assembler FINAL : public Assembler { void andps(XmmRegister dst, XmmRegister src); // no addr variant (for now) void pand(XmmRegister dst, XmmRegister src); + void andnpd(XmmRegister dst, XmmRegister src); // no addr variant (for now) + void andnps(XmmRegister dst, XmmRegister src); + void pandn(XmmRegister dst, XmmRegister src); + void orpd(XmmRegister dst, XmmRegister src); // no addr variant (for now) void orps(XmmRegister dst, XmmRegister src); void por(XmmRegister dst, XmmRegister src); + void pcmpeqb(XmmRegister dst, XmmRegister src); + void pcmpeqw(XmmRegister dst, XmmRegister src); + void pcmpeqd(XmmRegister dst, XmmRegister src); + void pcmpeqq(XmmRegister dst, XmmRegister src); + void shufpd(XmmRegister dst, XmmRegister src, const Immediate& imm); void shufps(XmmRegister dst, XmmRegister src, const Immediate& imm); void pshufd(XmmRegister dst, XmmRegister src, const Immediate& imm); diff --git a/compiler/utils/x86_64/assembler_x86_64_test.cc b/compiler/utils/x86_64/assembler_x86_64_test.cc index ba011c968e..20062fdb07 100644 --- a/compiler/utils/x86_64/assembler_x86_64_test.cc +++ b/compiler/utils/x86_64/assembler_x86_64_test.cc @@ -1269,6 +1269,18 @@ TEST_F(AssemblerX86_64Test, Pand) { DriverStr(RepeatFF(&x86_64::X86_64Assembler::pand, "pand %{reg2}, %{reg1}"), "pand"); } +TEST_F(AssemblerX86_64Test, andnpd) { + DriverStr(RepeatFF(&x86_64::X86_64Assembler::andnpd, "andnpd %{reg2}, %{reg1}"), "andnpd"); +} + +TEST_F(AssemblerX86_64Test, andnps) { + DriverStr(RepeatFF(&x86_64::X86_64Assembler::andnps, "andnps %{reg2}, %{reg1}"), "andnps"); +} + +TEST_F(AssemblerX86_64Test, Pandn) { + DriverStr(RepeatFF(&x86_64::X86_64Assembler::pandn, "pandn %{reg2}, %{reg1}"), "pandn"); +} + TEST_F(AssemblerX86_64Test, Orps) { DriverStr(RepeatFF(&x86_64::X86_64Assembler::orps, "orps %{reg2}, %{reg1}"), "orps"); } @@ -1281,6 +1293,22 @@ TEST_F(AssemblerX86_64Test, Por) { DriverStr(RepeatFF(&x86_64::X86_64Assembler::por, "por %{reg2}, %{reg1}"), "por"); } +TEST_F(AssemblerX86_64Test, PCmpeqb) { + DriverStr(RepeatFF(&x86_64::X86_64Assembler::pcmpeqb, "pcmpeqb %{reg2}, %{reg1}"), "pcmpeqb"); +} + +TEST_F(AssemblerX86_64Test, PCmpeqw) { + DriverStr(RepeatFF(&x86_64::X86_64Assembler::pcmpeqw, "pcmpeqw %{reg2}, %{reg1}"), "pcmpeqw"); +} + +TEST_F(AssemblerX86_64Test, PCmpeqd) { + DriverStr(RepeatFF(&x86_64::X86_64Assembler::pcmpeqd, "pcmpeqd %{reg2}, %{reg1}"), "pcmpeqd"); +} + +TEST_F(AssemblerX86_64Test, PCmpeqq) { + DriverStr(RepeatFF(&x86_64::X86_64Assembler::pcmpeqq, "pcmpeqq %{reg2}, %{reg1}"), "pcmpeqq"); +} + TEST_F(AssemblerX86_64Test, Shufps) { DriverStr(RepeatFFI(&x86_64::X86_64Assembler::shufps, 1, "shufps ${imm}, %{reg2}, %{reg1}"), "shufps"); } diff --git a/compiler/verifier_deps_test.cc b/compiler/verifier_deps_test.cc index 01c33591e5..4bfc84990d 100644 --- a/compiler/verifier_deps_test.cc +++ b/compiler/verifier_deps_test.cc @@ -714,12 +714,12 @@ TEST_F(VerifierDepsTest, MoveException_Unresolved) { TEST_F(VerifierDepsTest, StaticField_Resolved_DeclaredInReferenced) { ASSERT_TRUE(VerifyMethod("StaticField_Resolved_DeclaredInReferenced")); - ASSERT_TRUE(HasClass("Ljava/lang/System;", true, "public final")); + ASSERT_TRUE(HasClass("Ljava/lang/System;", true, "public")); ASSERT_TRUE(HasField("Ljava/lang/System;", "out", "Ljava/io/PrintStream;", true, - "public final static", + "public static", "Ljava/lang/System;")); } @@ -727,13 +727,13 @@ TEST_F(VerifierDepsTest, StaticField_Resolved_DeclaredInSuperclass1) { ASSERT_TRUE(VerifyMethod("StaticField_Resolved_DeclaredInSuperclass1")); ASSERT_TRUE(HasClass("Ljava/util/SimpleTimeZone;", true, "public")); ASSERT_TRUE(HasField( - "Ljava/util/SimpleTimeZone;", "LONG", "I", true, "public final static", "Ljava/util/TimeZone;")); + "Ljava/util/SimpleTimeZone;", "LONG", "I", true, "public static", "Ljava/util/TimeZone;")); } TEST_F(VerifierDepsTest, StaticField_Resolved_DeclaredInSuperclass2) { ASSERT_TRUE(VerifyMethod("StaticField_Resolved_DeclaredInSuperclass2")); ASSERT_TRUE(HasField( - "LMySimpleTimeZone;", "SHORT", "I", true, "public final static", "Ljava/util/TimeZone;")); + "LMySimpleTimeZone;", "SHORT", "I", true, "public static", "Ljava/util/TimeZone;")); } TEST_F(VerifierDepsTest, StaticField_Resolved_DeclaredInInterface1) { @@ -743,7 +743,7 @@ TEST_F(VerifierDepsTest, StaticField_Resolved_DeclaredInInterface1) { "PI_ENABLE_OUTPUT_ESCAPING", "Ljava/lang/String;", true, - "public final static", + "public static", "Ljavax/xml/transform/Result;")); } @@ -753,7 +753,7 @@ TEST_F(VerifierDepsTest, StaticField_Resolved_DeclaredInInterface2) { "PI_ENABLE_OUTPUT_ESCAPING", "Ljava/lang/String;", true, - "public final static", + "public static", "Ljavax/xml/transform/Result;")); } @@ -763,7 +763,7 @@ TEST_F(VerifierDepsTest, StaticField_Resolved_DeclaredInInterface3) { "PI_ENABLE_OUTPUT_ESCAPING", "Ljava/lang/String;", true, - "public final static", + "public static", "Ljavax/xml/transform/Result;")); } @@ -773,13 +773,13 @@ TEST_F(VerifierDepsTest, StaticField_Resolved_DeclaredInInterface4) { "ELEMENT_NODE", "S", true, - "public final static", + "public static", "Lorg/w3c/dom/Node;")); } TEST_F(VerifierDepsTest, StaticField_Unresolved_ReferrerInBoot) { ASSERT_TRUE(VerifyMethod("StaticField_Unresolved_ReferrerInBoot")); - ASSERT_TRUE(HasClass("Ljava/util/TimeZone;", true, "public abstract")); + ASSERT_TRUE(HasClass("Ljava/util/TimeZone;", true, "public")); ASSERT_TRUE(HasField("Ljava/util/TimeZone;", "x", "I", false)); } @@ -851,7 +851,7 @@ TEST_F(VerifierDepsTest, InvokeStatic_Resolved_DeclaredInReferenced) { TEST_F(VerifierDepsTest, InvokeStatic_Resolved_DeclaredInSuperclass1) { ASSERT_TRUE(VerifyMethod("InvokeStatic_Resolved_DeclaredInSuperclass1")); - ASSERT_TRUE(HasClass("Ljavax/net/ssl/SSLSocket;", true, "public abstract")); + ASSERT_TRUE(HasClass("Ljavax/net/ssl/SSLSocket;", true, "public")); ASSERT_TRUE(HasMethod("direct", "Ljavax/net/ssl/SSLSocket;", "setSocketImplFactory", @@ -874,7 +874,7 @@ TEST_F(VerifierDepsTest, InvokeStatic_Resolved_DeclaredInSuperclass2) { TEST_F(VerifierDepsTest, InvokeStatic_DeclaredInInterface1) { ASSERT_TRUE(VerifyMethod("InvokeStatic_DeclaredInInterface1")); - ASSERT_TRUE(HasClass("Ljava/util/Map$Entry;", true, "public abstract interface")); + ASSERT_TRUE(HasClass("Ljava/util/Map$Entry;", true, "public interface")); ASSERT_TRUE(HasMethod("direct", "Ljava/util/Map$Entry;", "comparingByKey", @@ -896,7 +896,7 @@ TEST_F(VerifierDepsTest, InvokeStatic_DeclaredInInterface2) { TEST_F(VerifierDepsTest, InvokeStatic_Unresolved1) { ASSERT_FALSE(VerifyMethod("InvokeStatic_Unresolved1")); - ASSERT_TRUE(HasClass("Ljavax/net/ssl/SSLSocket;", true, "public abstract")); + ASSERT_TRUE(HasClass("Ljavax/net/ssl/SSLSocket;", true, "public")); ASSERT_TRUE(HasMethod("direct", "Ljavax/net/ssl/SSLSocket;", "x", "()V", false)); } @@ -914,7 +914,7 @@ TEST_F(VerifierDepsTest, InvokeDirect_Resolved_DeclaredInReferenced) { TEST_F(VerifierDepsTest, InvokeDirect_Resolved_DeclaredInSuperclass1) { ASSERT_FALSE(VerifyMethod("InvokeDirect_Resolved_DeclaredInSuperclass1")); - ASSERT_TRUE(HasClass("Ljavax/net/ssl/SSLSocket;", true, "public abstract")); + ASSERT_TRUE(HasClass("Ljavax/net/ssl/SSLSocket;", true, "public")); ASSERT_TRUE(HasMethod("direct", "Ljavax/net/ssl/SSLSocket;", "checkOldImpl", @@ -932,7 +932,7 @@ TEST_F(VerifierDepsTest, InvokeDirect_Resolved_DeclaredInSuperclass2) { TEST_F(VerifierDepsTest, InvokeDirect_Unresolved1) { ASSERT_FALSE(VerifyMethod("InvokeDirect_Unresolved1")); - ASSERT_TRUE(HasClass("Ljavax/net/ssl/SSLSocket;", true, "public abstract")); + ASSERT_TRUE(HasClass("Ljavax/net/ssl/SSLSocket;", true, "public")); ASSERT_TRUE(HasMethod("direct", "Ljavax/net/ssl/SSLSocket;", "x", "()V", false)); } @@ -987,7 +987,7 @@ TEST_F(VerifierDepsTest, InvokeVirtual_Resolved_DeclaredInSuperinterface) { "size", "()I", true, - "public abstract", + "public", "Ljava/util/Set;")); } @@ -1016,13 +1016,13 @@ TEST_F(VerifierDepsTest, InvokeVirtual_ActuallyDirect) { TEST_F(VerifierDepsTest, InvokeInterface_Resolved_DeclaredInReferenced) { ASSERT_TRUE(VerifyMethod("InvokeInterface_Resolved_DeclaredInReferenced")); - ASSERT_TRUE(HasClass("Ljava/lang/Runnable;", true, "public abstract interface")); + ASSERT_TRUE(HasClass("Ljava/lang/Runnable;", true, "public interface")); ASSERT_TRUE(HasMethod("interface", "Ljava/lang/Runnable;", "run", "()V", true, - "public abstract", + "public", "Ljava/lang/Runnable;")); } @@ -1038,7 +1038,7 @@ TEST_F(VerifierDepsTest, InvokeInterface_Resolved_DeclaredInSuperinterface1) { "run", "()V", true, - "public abstract", + "public", "Ljava/lang/Runnable;")); } @@ -1049,13 +1049,13 @@ TEST_F(VerifierDepsTest, InvokeInterface_Resolved_DeclaredInSuperinterface2) { "isEmpty", "()Z", true, - "public abstract", + "public", "Ljava/util/Set;")); } TEST_F(VerifierDepsTest, InvokeInterface_Unresolved1) { ASSERT_FALSE(VerifyMethod("InvokeInterface_Unresolved1")); - ASSERT_TRUE(HasClass("Ljava/lang/Runnable;", true, "public abstract interface")); + ASSERT_TRUE(HasClass("Ljava/lang/Runnable;", true, "public interface")); ASSERT_TRUE(HasMethod("interface", "Ljava/lang/Runnable;", "x", "()V", false)); } @@ -1066,20 +1066,20 @@ TEST_F(VerifierDepsTest, InvokeInterface_Unresolved2) { TEST_F(VerifierDepsTest, InvokeSuper_ThisAssignable) { ASSERT_TRUE(VerifyMethod("InvokeSuper_ThisAssignable")); - ASSERT_TRUE(HasClass("Ljava/lang/Runnable;", true, "public abstract interface")); + ASSERT_TRUE(HasClass("Ljava/lang/Runnable;", true, "public interface")); ASSERT_TRUE(HasAssignable("Ljava/lang/Runnable;", "Ljava/lang/Thread;", true)); ASSERT_TRUE(HasMethod("interface", "Ljava/lang/Runnable;", "run", "()V", true, - "public abstract", + "public", "Ljava/lang/Runnable;")); } TEST_F(VerifierDepsTest, InvokeSuper_ThisNotAssignable) { ASSERT_FALSE(VerifyMethod("InvokeSuper_ThisNotAssignable")); - ASSERT_TRUE(HasClass("Ljava/lang/Integer;", true, "public final")); + ASSERT_TRUE(HasClass("Ljava/lang/Integer;", true, "public")); ASSERT_TRUE(HasAssignable("Ljava/lang/Integer;", "Ljava/lang/Thread;", false)); ASSERT_TRUE(HasMethod( "virtual", "Ljava/lang/Integer;", "intValue", "()I", true, "public", "Ljava/lang/Integer;")); @@ -1087,12 +1087,12 @@ TEST_F(VerifierDepsTest, InvokeSuper_ThisNotAssignable) { TEST_F(VerifierDepsTest, ArgumentType_ResolvedReferenceArray) { ASSERT_TRUE(VerifyMethod("ArgumentType_ResolvedReferenceArray")); - ASSERT_TRUE(HasClass("[Ljava/lang/Thread;", true, "public final abstract")); + ASSERT_TRUE(HasClass("[Ljava/lang/Thread;", true, "public")); } TEST_F(VerifierDepsTest, NewArray_Resolved) { ASSERT_TRUE(VerifyMethod("NewArray_Resolved")); - ASSERT_TRUE(HasClass("[Ljava/lang/IllegalStateException;", true, "public final abstract")); + ASSERT_TRUE(HasClass("[Ljava/lang/IllegalStateException;", true, "public")); } TEST_F(VerifierDepsTest, EncodeDecode) { @@ -1528,5 +1528,13 @@ TEST_F(VerifierDepsTest, MultiDexVerification) { ASSERT_FALSE(buffer.empty()); } +TEST_F(VerifierDepsTest, NotAssignable_InterfaceWithClassInBoot) { + ASSERT_TRUE(TestAssignabilityRecording(/* dst */ "Ljava/lang/Exception;", + /* src */ "LIface;", + /* is_strict */ true, + /* is_assignable */ false)); + ASSERT_TRUE(HasAssignable("Ljava/lang/Exception;", "LIface;", false)); +} + } // namespace verifier } // namespace art diff --git a/dex2oat/Android.bp b/dex2oat/Android.bp index 0924aec7f1..048f36d76c 100644 --- a/dex2oat/Android.bp +++ b/dex2oat/Android.bp @@ -14,6 +14,12 @@ // limitations under the License. // +cc_library_headers { + name: "dex2oat_headers", + host_supported: true, + export_include_dirs: ["include"], +} + cc_defaults { name: "dex2oat-defaults", host_supported: true, @@ -40,6 +46,7 @@ cc_defaults { include_dirs: [ "art/cmdline", ], + header_libs: ["dex2oat_headers"], } art_cc_binary { @@ -132,4 +139,5 @@ art_cc_test { "art_gtest_defaults", ], srcs: ["dex2oat_test.cc"], + header_libs: ["dex2oat_headers"], } diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index 3fa30fafb1..e80be8172a 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -54,6 +54,7 @@ #include "debug/method_debug_info.h" #include "dex/quick_compiler_callbacks.h" #include "dex/verification_results.h" +#include "dex2oat_return_codes.h" #include "dex_file-inl.h" #include "driver/compiler_driver.h" #include "driver/compiler_options.h" @@ -1442,11 +1443,11 @@ class Dex2Oat FINAL { // Set up the environment for compilation. Includes starting the runtime and loading/opening the // boot class path. - bool Setup() { + dex2oat::ReturnCode Setup() { TimingLogger::ScopedTiming t("dex2oat Setup", timings_); if (!PrepareImageClasses() || !PrepareCompiledClasses() || !PrepareCompiledMethods()) { - return false; + return dex2oat::ReturnCode::kOther; } verification_results_.reset(new VerificationResults(compiler_options_.get())); @@ -1458,12 +1459,12 @@ class Dex2Oat FINAL { RuntimeArgumentMap runtime_options; if (!PrepareRuntimeOptions(&runtime_options)) { - return false; + return dex2oat::ReturnCode::kOther; } CreateOatWriters(); if (!AddDexFileSources()) { - return false; + return dex2oat::ReturnCode::kOther; } if (IsBootImage() && image_filenames_.size() > 1) { @@ -1479,14 +1480,14 @@ class Dex2Oat FINAL { // When compiling an app, create the runtime early to retrieve // the image location key needed for the oat header. if (!CreateRuntime(std::move(runtime_options))) { - return false; + return dex2oat::ReturnCode::kCreateRuntime; } if (CompilerFilter::DependsOnImageChecksum(compiler_options_->GetCompilerFilter())) { TimingLogger::ScopedTiming t3("Loading image checksum", timings_); std::vector<gc::space::ImageSpace*> image_spaces = Runtime::Current()->GetHeap()->GetBootImageSpaces(); - image_file_location_oat_checksum_ = OatFileAssistant::CalculateCombinedImageChecksum(); + image_file_location_oat_checksum_ = image_spaces[0]->GetImageHeader().GetOatChecksum(); image_file_location_oat_data_begin_ = reinterpret_cast<uintptr_t>(image_spaces[0]->GetImageHeader().GetOatDataBegin()); image_patch_delta_ = image_spaces[0]->GetImageHeader().GetPatchDelta(); @@ -1550,7 +1551,7 @@ class Dex2Oat FINAL { update_input_vdex_, &opened_dex_files_map, &opened_dex_files)) { - return false; + return dex2oat::ReturnCode::kOther; } dex_files_per_oat_file_.push_back(MakeNonOwningPointerVector(opened_dex_files)); if (opened_dex_files_map != nullptr) { @@ -1602,7 +1603,7 @@ class Dex2Oat FINAL { // Note: Runtime acquires ownership of these dex files. runtime_options.Set(RuntimeArgumentMap::BootClassPathDexList, &opened_dex_files_); if (!CreateRuntime(std::move(runtime_options))) { - return false; + return dex2oat::ReturnCode::kOther; } } @@ -1636,7 +1637,7 @@ class Dex2Oat FINAL { for (const std::unique_ptr<MemMap>& map : opened_dex_files_maps_) { if (!map->Protect(PROT_READ | PROT_WRITE)) { PLOG(ERROR) << "Failed to make .dex files writeable."; - return false; + return dex2oat::ReturnCode::kOther; } } @@ -1651,14 +1652,14 @@ class Dex2Oat FINAL { soa.Self()->AssertPendingException(); soa.Self()->ClearException(); PLOG(ERROR) << "Failed to register dex file."; - return false; + return dex2oat::ReturnCode::kOther; } // Pre-register dex files so that we can access verification results without locks during // compilation and verification. verification_results_->AddDexFile(dex_file); } - return true; + return dex2oat::ReturnCode::kNoFailure; } // If we need to keep the oat file open for the image writer. @@ -1907,6 +1908,14 @@ class Dex2Oat FINAL { oat_writer->GetOatDataOffset(), oat_writer->GetOatSize()); } + + if (IsBootImage()) { + // Have the image_file_location_oat_checksum_ for boot oat files + // depend on the contents of all the boot oat files. This way only + // the primary image checksum needs to be checked to determine + // whether any of the images are out of date. + image_file_location_oat_checksum_ ^= oat_writer->GetOatHeader().GetChecksum(); + } } for (size_t i = 0, size = oat_files_.size(); i != size; ++i) { @@ -2781,13 +2790,13 @@ static void b13564922() { #endif } -static int CompileImage(Dex2Oat& dex2oat) { +static dex2oat::ReturnCode CompileImage(Dex2Oat& dex2oat) { dex2oat.LoadClassProfileDescriptors(); dex2oat.Compile(); if (!dex2oat.WriteOutputFiles()) { dex2oat.EraseOutputFiles(); - return EXIT_FAILURE; + return dex2oat::ReturnCode::kOther; } // Flush boot.oat. We always expect the output file by name, and it will be re-opened from the @@ -2796,46 +2805,46 @@ static int CompileImage(Dex2Oat& dex2oat) { if (dex2oat.ShouldKeepOatFileOpen()) { if (!dex2oat.FlushOutputFiles()) { dex2oat.EraseOutputFiles(); - return EXIT_FAILURE; + return dex2oat::ReturnCode::kOther; } } else if (!dex2oat.FlushCloseOutputFiles()) { - return EXIT_FAILURE; + return dex2oat::ReturnCode::kOther; } // Creates the boot.art and patches the oat files. if (!dex2oat.HandleImage()) { - return EXIT_FAILURE; + return dex2oat::ReturnCode::kOther; } // When given --host, finish early without stripping. if (dex2oat.IsHost()) { if (!dex2oat.FlushCloseOutputFiles()) { - return EXIT_FAILURE; + return dex2oat::ReturnCode::kOther; } dex2oat.DumpTiming(); - return EXIT_SUCCESS; + return dex2oat::ReturnCode::kNoFailure; } // Copy stripped to unstripped location, if necessary. if (!dex2oat.CopyStrippedToUnstripped()) { - return EXIT_FAILURE; + return dex2oat::ReturnCode::kOther; } // FlushClose again, as stripping might have re-opened the oat files. if (!dex2oat.FlushCloseOutputFiles()) { - return EXIT_FAILURE; + return dex2oat::ReturnCode::kOther; } dex2oat.DumpTiming(); - return EXIT_SUCCESS; + return dex2oat::ReturnCode::kNoFailure; } -static int CompileApp(Dex2Oat& dex2oat) { +static dex2oat::ReturnCode CompileApp(Dex2Oat& dex2oat) { dex2oat.Compile(); if (!dex2oat.WriteOutputFiles()) { dex2oat.EraseOutputFiles(); - return EXIT_FAILURE; + return dex2oat::ReturnCode::kOther; } // Do not close the oat files here. We might have gotten the output file by file descriptor, @@ -2844,29 +2853,29 @@ static int CompileApp(Dex2Oat& dex2oat) { // When given --host, finish early without stripping. if (dex2oat.IsHost()) { if (!dex2oat.FlushCloseOutputFiles()) { - return EXIT_FAILURE; + return dex2oat::ReturnCode::kOther; } dex2oat.DumpTiming(); - return EXIT_SUCCESS; + return dex2oat::ReturnCode::kNoFailure; } // Copy stripped to unstripped location, if necessary. This will implicitly flush & close the // stripped versions. If this is given, we expect to be able to open writable files by name. if (!dex2oat.CopyStrippedToUnstripped()) { - return EXIT_FAILURE; + return dex2oat::ReturnCode::kOther; } // Flush and close the files. if (!dex2oat.FlushCloseOutputFiles()) { - return EXIT_FAILURE; + return dex2oat::ReturnCode::kOther; } dex2oat.DumpTiming(); - return EXIT_SUCCESS; + return dex2oat::ReturnCode::kNoFailure; } -static int dex2oat(int argc, char** argv) { +static dex2oat::ReturnCode Dex2oat(int argc, char** argv) { b13564922(); TimingLogger timings("compiler", false, false); @@ -2885,14 +2894,14 @@ static int dex2oat(int argc, char** argv) { if (dex2oat->UseProfile()) { if (!dex2oat->LoadProfile()) { LOG(ERROR) << "Failed to process profile file"; - return EXIT_FAILURE; + return dex2oat::ReturnCode::kOther; } } if (dex2oat->DoDexLayoutOptimizations()) { if (dex2oat->HasInputVdexFile()) { LOG(ERROR) << "Dexlayout is incompatible with an input VDEX"; - return EXIT_FAILURE; + return dex2oat::ReturnCode::kOther; } } @@ -2900,7 +2909,7 @@ static int dex2oat(int argc, char** argv) { // Check early that the result of compilation can be written if (!dex2oat->OpenFile()) { - return EXIT_FAILURE; + return dex2oat::ReturnCode::kOther; } // Print the complete line when any of the following is true: @@ -2915,16 +2924,17 @@ static int dex2oat(int argc, char** argv) { LOG(INFO) << StrippedCommandLine(); } - if (!dex2oat->Setup()) { + dex2oat::ReturnCode setup_code = dex2oat->Setup(); + if (setup_code != dex2oat::ReturnCode::kNoFailure) { dex2oat->EraseOutputFiles(); - return EXIT_FAILURE; + return setup_code; } // Helps debugging on device. Can be used to determine which dalvikvm instance invoked a dex2oat // instance. Used by tools/bisection_search/bisection_search.py. VLOG(compiler) << "Running dex2oat (parent PID = " << getppid() << ")"; - bool result; + dex2oat::ReturnCode result; if (dex2oat->IsImage()) { result = CompileImage(*dex2oat); } else { @@ -2937,7 +2947,7 @@ static int dex2oat(int argc, char** argv) { } // namespace art int main(int argc, char** argv) { - int result = art::dex2oat(argc, argv); + int result = static_cast<int>(art::Dex2oat(argc, argv)); // Everything was done, do an explicit exit here to avoid running Runtime destructors that take // time (bug 10645725) unless we're a debug build or running on valgrind. Note: The Dex2Oat class // should not destruct the runtime in this case. diff --git a/dex2oat/dex2oat_test.cc b/dex2oat/dex2oat_test.cc index 289b8ab50a..8c14b50094 100644 --- a/dex2oat/dex2oat_test.cc +++ b/dex2oat/dex2oat_test.cc @@ -30,6 +30,7 @@ #include "base/macros.h" #include "dex_file-inl.h" #include "dex2oat_environment_test.h" +#include "dex2oat_return_codes.h" #include "jit/profile_compilation_info.h" #include "oat.h" #include "oat_file.h" @@ -50,12 +51,12 @@ class Dex2oatTest : public Dex2oatEnvironmentTest { } protected: - void GenerateOdexForTest(const std::string& dex_location, - const std::string& odex_location, - CompilerFilter::Filter filter, - const std::vector<std::string>& extra_args = {}, - bool expect_success = true, - bool use_fd = false) { + int GenerateOdexForTestWithStatus(const std::string& dex_location, + const std::string& odex_location, + CompilerFilter::Filter filter, + std::string* error_msg, + const std::vector<std::string>& extra_args = {}, + bool use_fd = false) { std::unique_ptr<File> oat_file; std::vector<std::string> args; args.push_back("--dex-file=" + dex_location); @@ -73,12 +74,27 @@ class Dex2oatTest : public Dex2oatEnvironmentTest { args.insert(args.end(), extra_args.begin(), extra_args.end()); - std::string error_msg; - bool success = Dex2Oat(args, &error_msg); + int status = Dex2Oat(args, error_msg); if (oat_file != nullptr) { - ASSERT_EQ(oat_file->FlushClose(), 0) << "Could not flush and close oat file"; + CHECK_EQ(oat_file->FlushClose(), 0) << "Could not flush and close oat file"; } + return status; + } + void GenerateOdexForTest(const std::string& dex_location, + const std::string& odex_location, + CompilerFilter::Filter filter, + const std::vector<std::string>& extra_args = {}, + bool expect_success = true, + bool use_fd = false) { + std::string error_msg; + int status = GenerateOdexForTestWithStatus(dex_location, + odex_location, + filter, + &error_msg, + extra_args, + use_fd); + bool success = (status == 0); if (expect_success) { ASSERT_TRUE(success) << error_msg << std::endl << output_; @@ -118,7 +134,7 @@ class Dex2oatTest : public Dex2oatEnvironmentTest { EXPECT_EQ(expected, actual); } - bool Dex2Oat(const std::vector<std::string>& dex2oat_args, std::string* error_msg) { + int Dex2Oat(const std::vector<std::string>& dex2oat_args, std::string* error_msg) { Runtime* runtime = Runtime::Current(); const std::vector<gc::space::ImageSpace*>& image_spaces = @@ -196,6 +212,7 @@ class Dex2oatTest : public Dex2oatEnvironmentTest { c_args.push_back(nullptr); execv(c_args[0], const_cast<char* const*>(c_args.data())); exit(1); + UNREACHABLE(); } else { close(link[1]); char buffer[128]; @@ -206,12 +223,12 @@ class Dex2oatTest : public Dex2oatEnvironmentTest { output_ += std::string(buffer, bytes_read); } close(link[0]); - int status = 0; + int status = -1; if (waitpid(pid, &status, 0) != -1) { success_ = (status == 0); } + return status; } - return success_; } std::string output_ = ""; @@ -845,4 +862,30 @@ TEST_F(Dex2oatWatchdogTest, TestWatchdogTrigger) { RunTest(false, { "--watchdog-timeout=10" }); } +class Dex2oatReturnCodeTest : public Dex2oatTest { + protected: + int RunTest(const std::vector<std::string>& extra_args = {}) { + std::string dex_location = GetScratchDir() + "/Dex2OatSwapTest.jar"; + std::string odex_location = GetOdexDir() + "/Dex2OatSwapTest.odex"; + + Copy(GetTestDexFileName(), dex_location); + + std::string error_msg; + return GenerateOdexForTestWithStatus(dex_location, + odex_location, + CompilerFilter::kSpeed, + &error_msg, + extra_args); + } + + std::string GetTestDexFileName() { + return GetDexSrc1(); + } +}; + +TEST_F(Dex2oatReturnCodeTest, TestCreateRuntime) { + int status = RunTest({ "--boot-image=/this/does/not/exist/yolo.oat" }); + EXPECT_EQ(static_cast<int>(dex2oat::ReturnCode::kCreateRuntime), WEXITSTATUS(status)) << output_; +} + } // namespace art diff --git a/dex2oat/include/dex2oat_return_codes.h b/dex2oat/include/dex2oat_return_codes.h new file mode 100644 index 0000000000..cc5400fc27 --- /dev/null +++ b/dex2oat/include/dex2oat_return_codes.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_DEX2OAT_INCLUDE_DEX2OAT_RETURN_CODES_H_ +#define ART_DEX2OAT_INCLUDE_DEX2OAT_RETURN_CODES_H_ + +namespace art { +namespace dex2oat { + +enum class ReturnCode : int { + kNoFailure = 0, + kOther = 1, + kCreateRuntime = 2, +}; + +} // namespace dex2oat +} // namespace art + +#endif // ART_DEX2OAT_INCLUDE_DEX2OAT_RETURN_CODES_H_ diff --git a/dexlayout/Android.bp b/dexlayout/Android.bp index 9ee9ebd3d4..cf523ece7a 100644 --- a/dexlayout/Android.bp +++ b/dexlayout/Android.bp @@ -19,6 +19,7 @@ art_cc_defaults { "dexlayout.cc", "dex_ir.cc", "dex_ir_builder.cc", + "dex_verify.cc", "dex_visualize.cc", "dex_writer.cc", ], diff --git a/dexlayout/dex_ir.cc b/dexlayout/dex_ir.cc index 34983cf5fb..4228503a8f 100644 --- a/dexlayout/dex_ir.cc +++ b/dexlayout/dex_ir.cc @@ -56,7 +56,7 @@ static void GetLocalsCb(void* context, const DexFile::LocalInfo& entry) { entry.end_address_, entry.reg_))); } -static uint32_t GetCodeItemSize(const DexFile& dex_file, const DexFile::CodeItem& disk_code_item) { +static uint32_t GetCodeItemSize(const DexFile::CodeItem& disk_code_item) { uintptr_t code_item_start = reinterpret_cast<uintptr_t>(&disk_code_item); uint32_t insns_size = disk_code_item.insns_size_in_code_units_; uint32_t tries_size = disk_code_item.tries_size_; @@ -675,7 +675,7 @@ CodeItem* Collections::CreateCodeItem(const DexFile& dex_file, } } - uint32_t size = GetCodeItemSize(dex_file, disk_code_item); + uint32_t size = GetCodeItemSize(disk_code_item); CodeItem* code_item = new CodeItem( registers_size, ins_size, outs_size, debug_info, insns_size, insns, tries, handler_list); code_item->SetSize(size); diff --git a/dexlayout/dex_ir.h b/dexlayout/dex_ir.h index 96afb906c7..78ddde84a1 100644 --- a/dexlayout/dex_ir.h +++ b/dexlayout/dex_ir.h @@ -748,8 +748,7 @@ class ClassDef : public IndexedItem { const TypeId* ClassType() const { return class_type_; } uint32_t GetAccessFlags() const { return access_flags_; } const TypeId* Superclass() const { return superclass_; } - const TypeIdVector* Interfaces() - { return interfaces_ == nullptr ? nullptr : interfaces_->GetTypeList(); } + const TypeList* Interfaces() { return interfaces_; } uint32_t InterfacesOffset() { return interfaces_ == nullptr ? 0 : interfaces_->GetOffset(); } const StringId* SourceFile() const { return source_file_; } AnnotationsDirectoryItem* Annotations() const { return annotations_; } @@ -781,7 +780,7 @@ class TypeAddrPair { uint32_t GetAddress() const { return address_; } private: - const TypeId* type_id_; + const TypeId* type_id_; // This can be nullptr. uint32_t address_; DISALLOW_COPY_AND_ASSIGN(TypeAddrPair); diff --git a/dexlayout/dex_verify.cc b/dexlayout/dex_verify.cc new file mode 100644 index 0000000000..54581292ff --- /dev/null +++ b/dexlayout/dex_verify.cc @@ -0,0 +1,1120 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Implementation file of dex ir verifier. + * + * Compares two dex files at the IR level, allowing differences in layout, but not in data. + */ + +#include "dex_verify.h" + +#include <inttypes.h> + +#include "android-base/stringprintf.h" + +namespace art { + +using android::base::StringPrintf; + +bool VerifyOutputDexFile(dex_ir::Header* orig_header, + dex_ir::Header* output_header, + std::string* error_msg) { + dex_ir::Collections& orig = orig_header->GetCollections(); + dex_ir::Collections& output = output_header->GetCollections(); + + // Compare all id sections. They have a defined order that can't be changed by dexlayout. + if (!VerifyIds(orig.StringIds(), output.StringIds(), "string ids", error_msg) || + !VerifyIds(orig.TypeIds(), output.TypeIds(), "type ids", error_msg) || + !VerifyIds(orig.ProtoIds(), output.ProtoIds(), "proto ids", error_msg) || + !VerifyIds(orig.FieldIds(), output.FieldIds(), "field ids", error_msg) || + !VerifyIds(orig.MethodIds(), output.MethodIds(), "method ids", error_msg)) { + return false; + } + // Compare class defs. The order may have been changed by dexlayout. + if (!VerifyClassDefs(orig.ClassDefs(), output.ClassDefs(), error_msg)) { + return false; + } + return true; +} + +template<class T> bool VerifyIds(std::vector<std::unique_ptr<T>>& orig, + std::vector<std::unique_ptr<T>>& output, + const char* section_name, + std::string* error_msg) { + if (orig.size() != output.size()) { + *error_msg = StringPrintf( + "Mismatched size for %s section: %zu vs %zu.", section_name, orig.size(), output.size()); + return false; + } + for (size_t i = 0; i < orig.size(); ++i) { + if (!VerifyId(orig[i].get(), output[i].get(), error_msg)) { + return false; + } + } + return true; +} + +bool VerifyId(dex_ir::StringId* orig, dex_ir::StringId* output, std::string* error_msg) { + if (strcmp(orig->Data(), output->Data()) != 0) { + *error_msg = StringPrintf("Mismatched string data for string id %u at offset %x: %s vs %s.", + orig->GetIndex(), + orig->GetOffset(), + orig->Data(), + output->Data()); + return false; + } + return true; +} + +bool VerifyId(dex_ir::TypeId* orig, dex_ir::TypeId* output, std::string* error_msg) { + if (orig->GetStringId()->GetIndex() != output->GetStringId()->GetIndex()) { + *error_msg = StringPrintf("Mismatched string index for type id %u at offset %x: %u vs %u.", + orig->GetIndex(), + orig->GetOffset(), + orig->GetStringId()->GetIndex(), + output->GetStringId()->GetIndex()); + return false; + } + return true; +} + +bool VerifyId(dex_ir::ProtoId* orig, dex_ir::ProtoId* output, std::string* error_msg) { + if (orig->Shorty()->GetIndex() != output->Shorty()->GetIndex()) { + *error_msg = StringPrintf("Mismatched string index for proto id %u at offset %x: %u vs %u.", + orig->GetIndex(), + orig->GetOffset(), + orig->Shorty()->GetIndex(), + output->Shorty()->GetIndex()); + return false; + } + if (orig->ReturnType()->GetIndex() != output->ReturnType()->GetIndex()) { + *error_msg = StringPrintf("Mismatched type index for proto id %u at offset %x: %u vs %u.", + orig->GetIndex(), + orig->GetOffset(), + orig->ReturnType()->GetIndex(), + output->ReturnType()->GetIndex()); + return false; + } + if (!VerifyTypeList(orig->Parameters(), output->Parameters())) { + *error_msg = StringPrintf("Mismatched type list for proto id %u at offset %x.", + orig->GetIndex(), + orig->GetOffset()); + } + return true; +} + +bool VerifyId(dex_ir::FieldId* orig, dex_ir::FieldId* output, std::string* error_msg) { + if (orig->Class()->GetIndex() != output->Class()->GetIndex()) { + *error_msg = + StringPrintf("Mismatched class type index for field id %u at offset %x: %u vs %u.", + orig->GetIndex(), + orig->GetOffset(), + orig->Class()->GetIndex(), + output->Class()->GetIndex()); + return false; + } + if (orig->Type()->GetIndex() != output->Type()->GetIndex()) { + *error_msg = StringPrintf("Mismatched type index for field id %u at offset %x: %u vs %u.", + orig->GetIndex(), + orig->GetOffset(), + orig->Class()->GetIndex(), + output->Class()->GetIndex()); + return false; + } + if (orig->Name()->GetIndex() != output->Name()->GetIndex()) { + *error_msg = StringPrintf("Mismatched string index for field id %u at offset %x: %u vs %u.", + orig->GetIndex(), + orig->GetOffset(), + orig->Name()->GetIndex(), + output->Name()->GetIndex()); + return false; + } + return true; +} + +bool VerifyId(dex_ir::MethodId* orig, dex_ir::MethodId* output, std::string* error_msg) { + if (orig->Class()->GetIndex() != output->Class()->GetIndex()) { + *error_msg = StringPrintf("Mismatched type index for method id %u at offset %x: %u vs %u.", + orig->GetIndex(), + orig->GetOffset(), + orig->Class()->GetIndex(), + output->Class()->GetIndex()); + return false; + } + if (orig->Proto()->GetIndex() != output->Proto()->GetIndex()) { + *error_msg = StringPrintf("Mismatched proto index for method id %u at offset %x: %u vs %u.", + orig->GetIndex(), + orig->GetOffset(), + orig->Class()->GetIndex(), + output->Class()->GetIndex()); + return false; + } + if (orig->Name()->GetIndex() != output->Name()->GetIndex()) { + *error_msg = + StringPrintf("Mismatched string index for method id %u at offset %x: %u vs %u.", + orig->GetIndex(), + orig->GetOffset(), + orig->Name()->GetIndex(), + output->Name()->GetIndex()); + return false; + } + return true; +} + +struct ClassDefCompare { + bool operator()(dex_ir::ClassDef* lhs, dex_ir::ClassDef* rhs) const { + return lhs->ClassType()->GetIndex() < rhs->ClassType()->GetIndex(); + } +}; + +// The class defs may have a new order due to dexlayout. Use the class's class_idx to uniquely +// identify them and sort them for comparison. +bool VerifyClassDefs(std::vector<std::unique_ptr<dex_ir::ClassDef>>& orig, + std::vector<std::unique_ptr<dex_ir::ClassDef>>& output, + std::string* error_msg) { + if (orig.size() != output.size()) { + *error_msg = StringPrintf( + "Mismatched size for class defs section: %zu vs %zu.", orig.size(), output.size()); + return false; + } + // Store the class defs into sets sorted by the class's type index. + std::set<dex_ir::ClassDef*, ClassDefCompare> orig_set; + std::set<dex_ir::ClassDef*, ClassDefCompare> output_set; + for (size_t i = 0; i < orig.size(); ++i) { + orig_set.insert(orig[i].get()); + output_set.insert(output[i].get()); + } + auto orig_iter = orig_set.begin(); + auto output_iter = output_set.begin(); + while (orig_iter != orig_set.end() && output_iter != output_set.end()) { + if (!VerifyClassDef(*orig_iter, *output_iter, error_msg)) { + return false; + } + orig_iter++; + output_iter++; + } + return true; +} + +bool VerifyClassDef(dex_ir::ClassDef* orig, dex_ir::ClassDef* output, std::string* error_msg) { + if (orig->ClassType()->GetIndex() != output->ClassType()->GetIndex()) { + *error_msg = + StringPrintf("Mismatched class type index for class def %u at offset %x: %u vs %u.", + orig->GetIndex(), + orig->GetOffset(), + orig->ClassType()->GetIndex(), + output->ClassType()->GetIndex()); + return false; + } + if (orig->GetAccessFlags() != output->GetAccessFlags()) { + *error_msg = + StringPrintf("Mismatched access flags for class def %u at offset %x: %x vs %x.", + orig->GetIndex(), + orig->GetOffset(), + orig->GetAccessFlags(), + output->GetAccessFlags()); + return false; + } + uint32_t orig_super = orig->Superclass() == nullptr ? 0 : orig->Superclass()->GetIndex(); + uint32_t output_super = output->Superclass() == nullptr ? 0 : output->Superclass()->GetIndex(); + if (orig_super != output_super) { + *error_msg = + StringPrintf("Mismatched super class for class def %u at offset %x: %u vs %u.", + orig->GetIndex(), + orig->GetOffset(), + orig_super, + output_super); + return false; + } + if (!VerifyTypeList(orig->Interfaces(), output->Interfaces())) { + *error_msg = StringPrintf("Mismatched type list for class def %u at offset %x.", + orig->GetIndex(), + orig->GetOffset()); + return false; + } + const char* orig_source = orig->SourceFile() == nullptr ? "" : orig->SourceFile()->Data(); + const char* output_source = output->SourceFile() == nullptr ? "" : output->SourceFile()->Data(); + if (strcmp(orig_source, output_source) != 0) { + *error_msg = StringPrintf("Mismatched source file for class def %u at offset %x: %s vs %s.", + orig->GetIndex(), + orig->GetOffset(), + orig_source, + output_source); + return false; + } + if (!VerifyAnnotationsDirectory(orig->Annotations(), output->Annotations(), error_msg)) { + return false; + } + if (!VerifyClassData(orig->GetClassData(), output->GetClassData(), error_msg)) { + return false; + } + return VerifyEncodedArray(orig->StaticValues(), output->StaticValues(), error_msg); +} + +bool VerifyTypeList(const dex_ir::TypeList* orig, const dex_ir::TypeList* output) { + if (orig == nullptr || output == nullptr) { + return orig == output; + } + const dex_ir::TypeIdVector* orig_list = orig->GetTypeList(); + const dex_ir::TypeIdVector* output_list = output->GetTypeList(); + if (orig_list->size() != output_list->size()) { + return false; + } + for (size_t i = 0; i < orig_list->size(); ++i) { + if ((*orig_list)[i]->GetIndex() != (*output_list)[i]->GetIndex()) { + return false; + } + } + return true; +} + +bool VerifyAnnotationsDirectory(dex_ir::AnnotationsDirectoryItem* orig, + dex_ir::AnnotationsDirectoryItem* output, + std::string* error_msg) { + if (orig == nullptr || output == nullptr) { + if (orig != output) { + *error_msg = "Found unexpected empty annotations directory."; + return false; + } + return true; + } + if (!VerifyAnnotationSet(orig->GetClassAnnotation(), output->GetClassAnnotation(), error_msg)) { + return false; + } + if (!VerifyFieldAnnotations(orig->GetFieldAnnotations(), + output->GetFieldAnnotations(), + orig->GetOffset(), + error_msg)) { + return false; + } + if (!VerifyMethodAnnotations(orig->GetMethodAnnotations(), + output->GetMethodAnnotations(), + orig->GetOffset(), + error_msg)) { + return false; + } + return VerifyParameterAnnotations(orig->GetParameterAnnotations(), + output->GetParameterAnnotations(), + orig->GetOffset(), + error_msg); +} + +bool VerifyFieldAnnotations(dex_ir::FieldAnnotationVector* orig, + dex_ir::FieldAnnotationVector* output, + uint32_t orig_offset, + std::string* error_msg) { + if (orig == nullptr || output == nullptr) { + if (orig != output) { + *error_msg = StringPrintf( + "Found unexpected empty field annotations for annotations directory at offset %x.", + orig_offset); + return false; + } + return true; + } + if (orig->size() != output->size()) { + *error_msg = StringPrintf( + "Mismatched field annotations size for annotations directory at offset %x: %zu vs %zu.", + orig_offset, + orig->size(), + output->size()); + return false; + } + for (size_t i = 0; i < orig->size(); ++i) { + dex_ir::FieldAnnotation* orig_field = (*orig)[i].get(); + dex_ir::FieldAnnotation* output_field = (*output)[i].get(); + if (orig_field->GetFieldId()->GetIndex() != output_field->GetFieldId()->GetIndex()) { + *error_msg = StringPrintf( + "Mismatched field annotation index for annotations directory at offset %x: %u vs %u.", + orig_offset, + orig_field->GetFieldId()->GetIndex(), + output_field->GetFieldId()->GetIndex()); + return false; + } + if (!VerifyAnnotationSet(orig_field->GetAnnotationSetItem(), + output_field->GetAnnotationSetItem(), + error_msg)) { + return false; + } + } + return true; +} + +bool VerifyMethodAnnotations(dex_ir::MethodAnnotationVector* orig, + dex_ir::MethodAnnotationVector* output, + uint32_t orig_offset, + std::string* error_msg) { + if (orig == nullptr || output == nullptr) { + if (orig != output) { + *error_msg = StringPrintf( + "Found unexpected empty method annotations for annotations directory at offset %x.", + orig_offset); + return false; + } + return true; + } + if (orig->size() != output->size()) { + *error_msg = StringPrintf( + "Mismatched method annotations size for annotations directory at offset %x: %zu vs %zu.", + orig_offset, + orig->size(), + output->size()); + return false; + } + for (size_t i = 0; i < orig->size(); ++i) { + dex_ir::MethodAnnotation* orig_method = (*orig)[i].get(); + dex_ir::MethodAnnotation* output_method = (*output)[i].get(); + if (orig_method->GetMethodId()->GetIndex() != output_method->GetMethodId()->GetIndex()) { + *error_msg = StringPrintf( + "Mismatched method annotation index for annotations directory at offset %x: %u vs %u.", + orig_offset, + orig_method->GetMethodId()->GetIndex(), + output_method->GetMethodId()->GetIndex()); + return false; + } + if (!VerifyAnnotationSet(orig_method->GetAnnotationSetItem(), + output_method->GetAnnotationSetItem(), + error_msg)) { + return false; + } + } + return true; +} + +bool VerifyParameterAnnotations(dex_ir::ParameterAnnotationVector* orig, + dex_ir::ParameterAnnotationVector* output, + uint32_t orig_offset, + std::string* error_msg) { + if (orig == nullptr || output == nullptr) { + if (orig != output) { + *error_msg = StringPrintf( + "Found unexpected empty parameter annotations for annotations directory at offset %x.", + orig_offset); + return false; + } + return true; + } + if (orig->size() != output->size()) { + *error_msg = StringPrintf( + "Mismatched parameter annotations size for annotations directory at offset %x: %zu vs %zu.", + orig_offset, + orig->size(), + output->size()); + return false; + } + for (size_t i = 0; i < orig->size(); ++i) { + dex_ir::ParameterAnnotation* orig_param = (*orig)[i].get(); + dex_ir::ParameterAnnotation* output_param = (*output)[i].get(); + if (orig_param->GetMethodId()->GetIndex() != output_param->GetMethodId()->GetIndex()) { + *error_msg = StringPrintf( + "Mismatched parameter annotation index for annotations directory at offset %x: %u vs %u.", + orig_offset, + orig_param->GetMethodId()->GetIndex(), + output_param->GetMethodId()->GetIndex()); + return false; + } + if (!VerifyAnnotationSetRefList(orig_param->GetAnnotations(), + output_param->GetAnnotations(), + error_msg)) { + return false; + } + } + return true; +} + +bool VerifyAnnotationSetRefList(dex_ir::AnnotationSetRefList* orig, + dex_ir::AnnotationSetRefList* output, + std::string* error_msg) { + std::vector<dex_ir::AnnotationSetItem*>* orig_items = orig->GetItems(); + std::vector<dex_ir::AnnotationSetItem*>* output_items = output->GetItems(); + if (orig_items->size() != output_items->size()) { + *error_msg = StringPrintf( + "Mismatched annotation set ref list size at offset %x: %zu vs %zu.", + orig->GetOffset(), + orig_items->size(), + output_items->size()); + return false; + } + for (size_t i = 0; i < orig_items->size(); ++i) { + if (!VerifyAnnotationSet((*orig_items)[i], (*output_items)[i], error_msg)) { + return false; + } + } + return true; +} + +bool VerifyAnnotationSet(dex_ir::AnnotationSetItem* orig, + dex_ir::AnnotationSetItem* output, + std::string* error_msg) { + if (orig == nullptr || output == nullptr) { + if (orig != output) { + *error_msg = "Found unexpected empty annotation set."; + return false; + } + return true; + } + std::vector<dex_ir::AnnotationItem*>* orig_items = orig->GetItems(); + std::vector<dex_ir::AnnotationItem*>* output_items = output->GetItems(); + if (orig_items->size() != output_items->size()) { + *error_msg = StringPrintf("Mismatched size for annotation set at offset %x: %zu vs %zu.", + orig->GetOffset(), + orig_items->size(), + output_items->size()); + return false; + } + for (size_t i = 0; i < orig_items->size(); ++i) { + if (!VerifyAnnotation((*orig_items)[i], (*output_items)[i], error_msg)) { + return false; + } + } + return true; +} + +bool VerifyAnnotation(dex_ir::AnnotationItem* orig, + dex_ir::AnnotationItem* output, + std::string* error_msg) { + if (orig->GetVisibility() != output->GetVisibility()) { + *error_msg = StringPrintf("Mismatched visibility for annotation at offset %x: %u vs %u.", + orig->GetOffset(), + orig->GetVisibility(), + output->GetVisibility()); + return false; + } + return VerifyEncodedAnnotation(orig->GetAnnotation(), + output->GetAnnotation(), + orig->GetOffset(), + error_msg); +} + +bool VerifyEncodedAnnotation(dex_ir::EncodedAnnotation* orig, + dex_ir::EncodedAnnotation* output, + uint32_t orig_offset, + std::string* error_msg) { + if (orig->GetType()->GetIndex() != output->GetType()->GetIndex()) { + *error_msg = StringPrintf( + "Mismatched encoded annotation type for annotation at offset %x: %u vs %u.", + orig_offset, + orig->GetType()->GetIndex(), + output->GetType()->GetIndex()); + return false; + } + dex_ir::AnnotationElementVector* orig_elements = orig->GetAnnotationElements(); + dex_ir::AnnotationElementVector* output_elements = output->GetAnnotationElements(); + if (orig_elements->size() != output_elements->size()) { + *error_msg = StringPrintf( + "Mismatched encoded annotation size for annotation at offset %x: %zu vs %zu.", + orig_offset, + orig_elements->size(), + output_elements->size()); + return false; + } + for (size_t i = 0; i < orig_elements->size(); ++i) { + if (!VerifyAnnotationElement((*orig_elements)[i].get(), + (*output_elements)[i].get(), + orig_offset, + error_msg)) { + return false; + } + } + return true; +} + +bool VerifyAnnotationElement(dex_ir::AnnotationElement* orig, + dex_ir::AnnotationElement* output, + uint32_t orig_offset, + std::string* error_msg) { + if (orig->GetName()->GetIndex() != output->GetName()->GetIndex()) { + *error_msg = StringPrintf( + "Mismatched annotation element name for annotation at offset %x: %u vs %u.", + orig_offset, + orig->GetName()->GetIndex(), + output->GetName()->GetIndex()); + return false; + } + return VerifyEncodedValue(orig->GetValue(), output->GetValue(), orig_offset, error_msg); +} + +bool VerifyEncodedValue(dex_ir::EncodedValue* orig, + dex_ir::EncodedValue* output, + uint32_t orig_offset, + std::string* error_msg) { + if (orig->Type() != output->Type()) { + *error_msg = StringPrintf( + "Mismatched encoded value type for annotation or encoded array at offset %x: %d vs %d.", + orig_offset, + orig->Type(), + output->Type()); + return false; + } + switch (orig->Type()) { + case DexFile::kDexAnnotationByte: + if (orig->GetByte() != output->GetByte()) { + *error_msg = StringPrintf("Mismatched encoded byte for annotation at offset %x: %d vs %d.", + orig_offset, + orig->GetByte(), + output->GetByte()); + return false; + } + break; + case DexFile::kDexAnnotationShort: + if (orig->GetShort() != output->GetShort()) { + *error_msg = StringPrintf("Mismatched encoded short for annotation at offset %x: %d vs %d.", + orig_offset, + orig->GetShort(), + output->GetShort()); + return false; + } + break; + case DexFile::kDexAnnotationChar: + if (orig->GetChar() != output->GetChar()) { + *error_msg = StringPrintf("Mismatched encoded char for annotation at offset %x: %c vs %c.", + orig_offset, + orig->GetChar(), + output->GetChar()); + return false; + } + break; + case DexFile::kDexAnnotationInt: + if (orig->GetInt() != output->GetInt()) { + *error_msg = StringPrintf("Mismatched encoded int for annotation at offset %x: %d vs %d.", + orig_offset, + orig->GetInt(), + output->GetInt()); + return false; + } + break; + case DexFile::kDexAnnotationLong: + if (orig->GetLong() != output->GetLong()) { + *error_msg = StringPrintf( + "Mismatched encoded long for annotation at offset %x: %" PRId64 " vs %" PRId64 ".", + orig_offset, + orig->GetLong(), + output->GetLong()); + return false; + } + break; + case DexFile::kDexAnnotationFloat: + // The float value is encoded, so compare as if it's an int. + if (orig->GetInt() != output->GetInt()) { + *error_msg = StringPrintf( + "Mismatched encoded float for annotation at offset %x: %x (encoded) vs %x (encoded).", + orig_offset, + orig->GetInt(), + output->GetInt()); + return false; + } + break; + case DexFile::kDexAnnotationDouble: + // The double value is encoded, so compare as if it's a long. + if (orig->GetLong() != output->GetLong()) { + *error_msg = StringPrintf( + "Mismatched encoded double for annotation at offset %x: %" PRIx64 + " (encoded) vs %" PRIx64 " (encoded).", + orig_offset, + orig->GetLong(), + output->GetLong()); + return false; + } + break; + case DexFile::kDexAnnotationString: + if (orig->GetStringId()->GetIndex() != output->GetStringId()->GetIndex()) { + *error_msg = StringPrintf( + "Mismatched encoded string for annotation at offset %x: %s vs %s.", + orig_offset, + orig->GetStringId()->Data(), + output->GetStringId()->Data()); + return false; + } + break; + case DexFile::kDexAnnotationType: + if (orig->GetTypeId()->GetIndex() != output->GetTypeId()->GetIndex()) { + *error_msg = StringPrintf("Mismatched encoded type for annotation at offset %x: %u vs %u.", + orig_offset, + orig->GetTypeId()->GetIndex(), + output->GetTypeId()->GetIndex()); + return false; + } + break; + case DexFile::kDexAnnotationField: + case DexFile::kDexAnnotationEnum: + if (orig->GetFieldId()->GetIndex() != output->GetFieldId()->GetIndex()) { + *error_msg = StringPrintf("Mismatched encoded field for annotation at offset %x: %u vs %u.", + orig_offset, + orig->GetFieldId()->GetIndex(), + output->GetFieldId()->GetIndex()); + return false; + } + break; + case DexFile::kDexAnnotationMethod: + if (orig->GetMethodId()->GetIndex() != output->GetMethodId()->GetIndex()) { + *error_msg = StringPrintf( + "Mismatched encoded method for annotation at offset %x: %u vs %u.", + orig_offset, + orig->GetMethodId()->GetIndex(), + output->GetMethodId()->GetIndex()); + return false; + } + break; + case DexFile::kDexAnnotationArray: + if (!VerifyEncodedArray(orig->GetEncodedArray(), output->GetEncodedArray(), error_msg)) { + return false; + } + break; + case DexFile::kDexAnnotationAnnotation: + if (!VerifyEncodedAnnotation(orig->GetEncodedAnnotation(), + output->GetEncodedAnnotation(), + orig_offset, + error_msg)) { + return false; + } + break; + case DexFile::kDexAnnotationNull: + break; + case DexFile::kDexAnnotationBoolean: + if (orig->GetBoolean() != output->GetBoolean()) { + *error_msg = StringPrintf( + "Mismatched encoded boolean for annotation at offset %x: %d vs %d.", + orig_offset, + orig->GetBoolean(), + output->GetBoolean()); + return false; + } + break; + default: + break; + } + return true; +} + +bool VerifyEncodedArray(dex_ir::EncodedArrayItem* orig, + dex_ir::EncodedArrayItem* output, + std::string* error_msg) { + if (orig == nullptr || output == nullptr) { + if (orig != output) { + *error_msg = "Found unexpected empty encoded array."; + return false; + } + return true; + } + dex_ir::EncodedValueVector* orig_vector = orig->GetEncodedValues(); + dex_ir::EncodedValueVector* output_vector = output->GetEncodedValues(); + if (orig_vector->size() != output_vector->size()) { + *error_msg = StringPrintf("Mismatched size for encoded array at offset %x: %zu vs %zu.", + orig->GetOffset(), + orig_vector->size(), + output_vector->size()); + return false; + } + for (size_t i = 0; i < orig_vector->size(); ++i) { + if (!VerifyEncodedValue((*orig_vector)[i].get(), + (*output_vector)[i].get(), + orig->GetOffset(), + error_msg)) { + return false; + } + } + return true; +} + +bool VerifyClassData(dex_ir::ClassData* orig, dex_ir::ClassData* output, std::string* error_msg) { + if (orig == nullptr || output == nullptr) { + if (orig != output) { + *error_msg = "Found unexpected empty class data."; + return false; + } + return true; + } + if (!VerifyFields(orig->StaticFields(), output->StaticFields(), orig->GetOffset(), error_msg)) { + return false; + } + if (!VerifyFields(orig->InstanceFields(), + output->InstanceFields(), + orig->GetOffset(), + error_msg)) { + return false; + } + if (!VerifyMethods(orig->DirectMethods(), + output->DirectMethods(), + orig->GetOffset(), + error_msg)) { + return false; + } + return VerifyMethods(orig->VirtualMethods(), + output->VirtualMethods(), + orig->GetOffset(), + error_msg); +} + +bool VerifyFields(dex_ir::FieldItemVector* orig, + dex_ir::FieldItemVector* output, + uint32_t orig_offset, + std::string* error_msg) { + if (orig->size() != output->size()) { + *error_msg = StringPrintf("Mismatched fields size for class data at offset %x: %zu vs %zu.", + orig_offset, + orig->size(), + output->size()); + return false; + } + for (size_t i = 0; i < orig->size(); ++i) { + dex_ir::FieldItem* orig_field = (*orig)[i].get(); + dex_ir::FieldItem* output_field = (*output)[i].get(); + if (orig_field->GetFieldId()->GetIndex() != output_field->GetFieldId()->GetIndex()) { + *error_msg = StringPrintf("Mismatched field index for class data at offset %x: %u vs %u.", + orig_offset, + orig_field->GetFieldId()->GetIndex(), + output_field->GetFieldId()->GetIndex()); + return false; + } + if (orig_field->GetAccessFlags() != output_field->GetAccessFlags()) { + *error_msg = StringPrintf( + "Mismatched field access flags for class data at offset %x: %u vs %u.", + orig_offset, + orig_field->GetAccessFlags(), + output_field->GetAccessFlags()); + return false; + } + } + return true; +} + +bool VerifyMethods(dex_ir::MethodItemVector* orig, + dex_ir::MethodItemVector* output, + uint32_t orig_offset, + std::string* error_msg) { + if (orig->size() != output->size()) { + *error_msg = StringPrintf("Mismatched methods size for class data at offset %x: %zu vs %zu.", + orig_offset, + orig->size(), + output->size()); + return false; + } + for (size_t i = 0; i < orig->size(); ++i) { + dex_ir::MethodItem* orig_method = (*orig)[i].get(); + dex_ir::MethodItem* output_method = (*output)[i].get(); + if (orig_method->GetMethodId()->GetIndex() != output_method->GetMethodId()->GetIndex()) { + *error_msg = StringPrintf("Mismatched method index for class data at offset %x: %u vs %u.", + orig_offset, + orig_method->GetMethodId()->GetIndex(), + output_method->GetMethodId()->GetIndex()); + return false; + } + if (orig_method->GetAccessFlags() != output_method->GetAccessFlags()) { + *error_msg = StringPrintf( + "Mismatched method access flags for class data at offset %x: %u vs %u.", + orig_offset, + orig_method->GetAccessFlags(), + output_method->GetAccessFlags()); + return false; + } + if (!VerifyCode(orig_method->GetCodeItem(), output_method->GetCodeItem(), error_msg)) { + return false; + } + } + return true; +} + +bool VerifyCode(dex_ir::CodeItem* orig, dex_ir::CodeItem* output, std::string* error_msg) { + if (orig == nullptr || output == nullptr) { + if (orig != output) { + *error_msg = "Found unexpected empty code item."; + return false; + } + return true; + } + if (orig->RegistersSize() != output->RegistersSize()) { + *error_msg = StringPrintf("Mismatched registers size for code item at offset %x: %u vs %u.", + orig->GetOffset(), + orig->RegistersSize(), + output->RegistersSize()); + return false; + } + if (orig->InsSize() != output->InsSize()) { + *error_msg = StringPrintf("Mismatched ins size for code item at offset %x: %u vs %u.", + orig->GetOffset(), + orig->InsSize(), + output->InsSize()); + return false; + } + if (orig->OutsSize() != output->OutsSize()) { + *error_msg = StringPrintf("Mismatched outs size for code item at offset %x: %u vs %u.", + orig->GetOffset(), + orig->OutsSize(), + output->OutsSize()); + return false; + } + if (orig->TriesSize() != output->TriesSize()) { + *error_msg = StringPrintf("Mismatched tries size for code item at offset %x: %u vs %u.", + orig->GetOffset(), + orig->TriesSize(), + output->TriesSize()); + return false; + } + if (!VerifyDebugInfo(orig->DebugInfo(), output->DebugInfo(), error_msg)) { + return false; + } + if (orig->InsnsSize() != output->InsnsSize()) { + *error_msg = StringPrintf("Mismatched insns size for code item at offset %x: %u vs %u.", + orig->GetOffset(), + orig->InsnsSize(), + output->InsnsSize()); + return false; + } + if (memcmp(orig->Insns(), output->Insns(), orig->InsnsSize()) != 0) { + *error_msg = StringPrintf("Mismatched insns for code item at offset %x.", + orig->GetOffset()); + return false; + } + if (!VerifyTries(orig->Tries(), output->Tries(), orig->GetOffset(), error_msg)) { + return false; + } + return VerifyHandlers(orig->Handlers(), output->Handlers(), orig->GetOffset(), error_msg); +} + +bool VerifyDebugInfo(dex_ir::DebugInfoItem* orig, + dex_ir::DebugInfoItem* output, + std::string* error_msg) { + if (orig == nullptr || output == nullptr) { + if (orig != output) { + *error_msg = "Found unexpected empty debug info."; + return false; + } + return true; + } + if (!VerifyPositionInfo(orig->GetPositionInfo(), + output->GetPositionInfo(), + orig->GetOffset(), + error_msg)) { + return false; + } + return VerifyLocalInfo(orig->GetLocalInfo(), + output->GetLocalInfo(), + orig->GetOffset(), + error_msg); +} + +bool VerifyPositionInfo(dex_ir::PositionInfoVector& orig, + dex_ir::PositionInfoVector& output, + uint32_t orig_offset, + std::string* error_msg) { + if (orig.size() != output.size()) { + *error_msg = StringPrintf( + "Mismatched number of positions for debug info at offset %x: %zu vs %zu.", + orig_offset, + orig.size(), + output.size()); + return false; + } + for (size_t i = 0; i < orig.size(); ++i) { + if (orig[i]->address_ != output[i]->address_) { + *error_msg = StringPrintf( + "Mismatched position address for debug info at offset %x: %u vs %u.", + orig_offset, + orig[i]->address_, + output[i]->address_); + return false; + } + if (orig[i]->line_ != output[i]->line_) { + *error_msg = StringPrintf("Mismatched position line for debug info at offset %x: %u vs %u.", + orig_offset, + orig[i]->line_, + output[i]->line_); + return false; + } + } + return true; +} + +bool VerifyLocalInfo(dex_ir::LocalInfoVector& orig, + dex_ir::LocalInfoVector& output, + uint32_t orig_offset, + std::string* error_msg) { + if (orig.size() != output.size()) { + *error_msg = StringPrintf( + "Mismatched number of locals for debug info at offset %x: %zu vs %zu.", + orig_offset, + orig.size(), + output.size()); + return false; + } + for (size_t i = 0; i < orig.size(); ++i) { + if (orig[i]->name_ != output[i]->name_) { + *error_msg = StringPrintf("Mismatched local name for debug info at offset %x: %s vs %s.", + orig_offset, + orig[i]->name_.c_str(), + output[i]->name_.c_str()); + return false; + } + if (orig[i]->descriptor_ != output[i]->descriptor_) { + *error_msg = StringPrintf( + "Mismatched local descriptor for debug info at offset %x: %s vs %s.", + orig_offset, + orig[i]->descriptor_.c_str(), + output[i]->descriptor_.c_str()); + return false; + } + if (orig[i]->signature_ != output[i]->signature_) { + *error_msg = StringPrintf("Mismatched local signature for debug info at offset %x: %s vs %s.", + orig_offset, + orig[i]->signature_.c_str(), + output[i]->signature_.c_str()); + return false; + } + if (orig[i]->start_address_ != output[i]->start_address_) { + *error_msg = StringPrintf( + "Mismatched local start address for debug info at offset %x: %u vs %u.", + orig_offset, + orig[i]->start_address_, + output[i]->start_address_); + return false; + } + if (orig[i]->end_address_ != output[i]->end_address_) { + *error_msg = StringPrintf( + "Mismatched local end address for debug info at offset %x: %u vs %u.", + orig_offset, + orig[i]->end_address_, + output[i]->end_address_); + return false; + } + if (orig[i]->reg_ != output[i]->reg_) { + *error_msg = StringPrintf("Mismatched local reg for debug info at offset %x: %u vs %u.", + orig_offset, + orig[i]->reg_, + output[i]->reg_); + return false; + } + } + return true; +} + +bool VerifyTries(dex_ir::TryItemVector* orig, + dex_ir::TryItemVector* output, + uint32_t orig_offset, + std::string* error_msg) { + if (orig == nullptr || output == nullptr) { + if (orig != output) { + *error_msg = "Found unexpected empty try items."; + return false; + } + return true; + } + if (orig->size() != output->size()) { + *error_msg = StringPrintf("Mismatched tries size for code item at offset %x: %zu vs %zu.", + orig_offset, + orig->size(), + output->size()); + return false; + } + for (size_t i = 0; i < orig->size(); ++i) { + const dex_ir::TryItem* orig_try = (*orig)[i].get(); + const dex_ir::TryItem* output_try = (*output)[i].get(); + if (orig_try->StartAddr() != output_try->StartAddr()) { + *error_msg = StringPrintf( + "Mismatched try item start addr for code item at offset %x: %u vs %u.", + orig_offset, + orig_try->StartAddr(), + output_try->StartAddr()); + return false; + } + if (orig_try->InsnCount() != output_try->InsnCount()) { + *error_msg = StringPrintf( + "Mismatched try item insn count for code item at offset %x: %u vs %u.", + orig_offset, + orig_try->InsnCount(), + output_try->InsnCount()); + return false; + } + if (!VerifyHandler(orig_try->GetHandlers(), + output_try->GetHandlers(), + orig_offset, + error_msg)) { + return false; + } + } + return true; +} + +bool VerifyHandlers(dex_ir::CatchHandlerVector* orig, + dex_ir::CatchHandlerVector* output, + uint32_t orig_offset, + std::string* error_msg) { + if (orig == nullptr || output == nullptr) { + if (orig != output) { + *error_msg = "Found unexpected empty catch handlers."; + return false; + } + return true; + } + if (orig->size() != output->size()) { + *error_msg = StringPrintf( + "Mismatched catch handlers size for code item at offset %x: %zu vs %zu.", + orig_offset, + orig->size(), + output->size()); + return false; + } + for (size_t i = 0; i < orig->size(); ++i) { + if (!VerifyHandler((*orig)[i].get(), (*output)[i].get(), orig_offset, error_msg)) { + return false; + } + } + return true; +} + +bool VerifyHandler(const dex_ir::CatchHandler* orig, + const dex_ir::CatchHandler* output, + uint32_t orig_offset, + std::string* error_msg) { + dex_ir::TypeAddrPairVector* orig_handlers = orig->GetHandlers(); + dex_ir::TypeAddrPairVector* output_handlers = output->GetHandlers(); + if (orig_handlers->size() != output_handlers->size()) { + *error_msg = StringPrintf( + "Mismatched number of catch handlers for code item at offset %x: %zu vs %zu.", + orig_offset, + orig_handlers->size(), + output_handlers->size()); + return false; + } + for (size_t i = 0; i < orig_handlers->size(); ++i) { + const dex_ir::TypeAddrPair* orig_handler = (*orig_handlers)[i].get(); + const dex_ir::TypeAddrPair* output_handler = (*output_handlers)[i].get(); + if (orig_handler->GetTypeId() == nullptr || output_handler->GetTypeId() == nullptr) { + if (orig_handler->GetTypeId() != output_handler->GetTypeId()) { + *error_msg = StringPrintf( + "Found unexpected catch all catch handler for code item at offset %x.", + orig_offset); + return false; + } + } else if (orig_handler->GetTypeId()->GetIndex() != output_handler->GetTypeId()->GetIndex()) { + *error_msg = StringPrintf( + "Mismatched catch handler type for code item at offset %x: %u vs %u.", + orig_offset, + orig_handler->GetTypeId()->GetIndex(), + output_handler->GetTypeId()->GetIndex()); + return false; + } + if (orig_handler->GetAddress() != output_handler->GetAddress()) { + *error_msg = StringPrintf( + "Mismatched catch handler address for code item at offset %x: %u vs %u.", + orig_offset, + orig_handler->GetAddress(), + output_handler->GetAddress()); + return false; + } + } + return true; +} + +} // namespace art diff --git a/dexlayout/dex_verify.h b/dexlayout/dex_verify.h new file mode 100644 index 0000000000..58c95d6947 --- /dev/null +++ b/dexlayout/dex_verify.h @@ -0,0 +1,125 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Header file of dex ir verifier. + * + * Compares two dex files at the IR level, allowing differences in layout, but not in data. + */ + +#ifndef ART_DEXLAYOUT_DEX_VERIFY_H_ +#define ART_DEXLAYOUT_DEX_VERIFY_H_ + +#include "dex_ir.h" + +namespace art { +// Check that the output dex file contains the same data as the original. +// Compares the dex IR of both dex files. Allows the dex files to have different layouts. +bool VerifyOutputDexFile(dex_ir::Header* orig_header, + dex_ir::Header* output_header, + std::string* error_msg); + +template<class T> bool VerifyIds(std::vector<std::unique_ptr<T>>& orig, + std::vector<std::unique_ptr<T>>& output, + const char* section_name, + std::string* error_msg); +bool VerifyId(dex_ir::StringId* orig, dex_ir::StringId* output, std::string* error_msg); +bool VerifyId(dex_ir::TypeId* orig, dex_ir::TypeId* output, std::string* error_msg); +bool VerifyId(dex_ir::ProtoId* orig, dex_ir::ProtoId* output, std::string* error_msg); +bool VerifyId(dex_ir::FieldId* orig, dex_ir::FieldId* output, std::string* error_msg); +bool VerifyId(dex_ir::MethodId* orig, dex_ir::MethodId* output, std::string* error_msg); + +bool VerifyClassDefs(std::vector<std::unique_ptr<dex_ir::ClassDef>>& orig, + std::vector<std::unique_ptr<dex_ir::ClassDef>>& output, + std::string* error_msg); +bool VerifyClassDef(dex_ir::ClassDef* orig, dex_ir::ClassDef* output, std::string* error_msg); + +bool VerifyTypeList(const dex_ir::TypeList* orig, const dex_ir::TypeList* output); + +bool VerifyAnnotationsDirectory(dex_ir::AnnotationsDirectoryItem* orig, + dex_ir::AnnotationsDirectoryItem* output, + std::string* error_msg); +bool VerifyFieldAnnotations(dex_ir::FieldAnnotationVector* orig, + dex_ir::FieldAnnotationVector* output, + uint32_t orig_offset, + std::string* error_msg); +bool VerifyMethodAnnotations(dex_ir::MethodAnnotationVector* orig, + dex_ir::MethodAnnotationVector* output, + uint32_t orig_offset, + std::string* error_msg); +bool VerifyParameterAnnotations(dex_ir::ParameterAnnotationVector* orig, + dex_ir::ParameterAnnotationVector* output, + uint32_t orig_offset, + std::string* error_msg); +bool VerifyAnnotationSetRefList(dex_ir::AnnotationSetRefList* orig, + dex_ir::AnnotationSetRefList* output, + std::string* error_msg); +bool VerifyAnnotationSet(dex_ir::AnnotationSetItem* orig, + dex_ir::AnnotationSetItem* output, + std::string* error_msg); +bool VerifyAnnotation(dex_ir::AnnotationItem* orig, + dex_ir::AnnotationItem* output, + std::string* error_msg); +bool VerifyEncodedAnnotation(dex_ir::EncodedAnnotation* orig, + dex_ir::EncodedAnnotation* output, + uint32_t orig_offset, + std::string* error_msg); +bool VerifyAnnotationElement(dex_ir::AnnotationElement* orig, + dex_ir::AnnotationElement* output, + uint32_t orig_offset, + std::string* error_msg); +bool VerifyEncodedValue(dex_ir::EncodedValue* orig, + dex_ir::EncodedValue* output, + uint32_t orig_offset, + std::string* error_msg); +bool VerifyEncodedArray(dex_ir::EncodedArrayItem* orig, + dex_ir::EncodedArrayItem* output, + std::string* error_msg); + +bool VerifyClassData(dex_ir::ClassData* orig, dex_ir::ClassData* output, std::string* error_msg); +bool VerifyFields(dex_ir::FieldItemVector* orig, + dex_ir::FieldItemVector* output, + uint32_t orig_offset, + std::string* error_msg); +bool VerifyMethods(dex_ir::MethodItemVector* orig, + dex_ir::MethodItemVector* output, + uint32_t orig_offset, + std::string* error_msg); +bool VerifyCode(dex_ir::CodeItem* orig, dex_ir::CodeItem* output, std::string* error_msg); +bool VerifyDebugInfo(dex_ir::DebugInfoItem* orig, + dex_ir::DebugInfoItem* output, + std::string* error_msg); +bool VerifyPositionInfo(dex_ir::PositionInfoVector& orig, + dex_ir::PositionInfoVector& output, + uint32_t orig_offset, + std::string* error_msg); +bool VerifyLocalInfo(dex_ir::LocalInfoVector& orig, + dex_ir::LocalInfoVector& output, + uint32_t orig_offset, + std::string* error_msg); +bool VerifyTries(dex_ir::TryItemVector* orig, + dex_ir::TryItemVector* output, + uint32_t orig_offset, + std::string* error_msg); +bool VerifyHandlers(dex_ir::CatchHandlerVector* orig, + dex_ir::CatchHandlerVector* output, + uint32_t orig_offset, + std::string* error_msg); +bool VerifyHandler(const dex_ir::CatchHandler* orig, + const dex_ir::CatchHandler* output, + uint32_t orig_offset, + std::string* error_msg); +} // namespace art + +#endif // ART_DEXLAYOUT_DEX_VERIFY_H_ diff --git a/dexlayout/dex_visualize.cc b/dexlayout/dex_visualize.cc index 8997146c74..452f51b28b 100644 --- a/dexlayout/dex_visualize.cc +++ b/dexlayout/dex_visualize.cc @@ -35,9 +35,9 @@ namespace art { -std::string MultidexName(const std::string& prefix, - size_t dex_file_index, - const std::string& suffix) { +static std::string MultidexName(const std::string& prefix, + size_t dex_file_index, + const std::string& suffix) { return prefix + ((dex_file_index > 0) ? std::to_string(dex_file_index + 1) : "") + suffix; } @@ -432,20 +432,41 @@ void VisualizeDexLayout(dex_ir::Header* header, } // for } +static uint32_t FindNextByteAfterSection(dex_ir::Header* header, + const dex_ir::Collections& collections, + std::vector<const FileSection*>& sorted_sections, + size_t section_index) { + for (size_t i = section_index + 1; i < sorted_sections.size(); ++i) { + const FileSection* section = sorted_sections[i]; + if (section->size_fn_(collections) != 0) { + return section->offset_fn_(collections); + } + } + return header->FileSize(); +} + /* * Dumps the offset and size of sections within the file. */ void ShowDexSectionStatistics(dex_ir::Header* header, size_t dex_file_index) { // Compute the (multidex) class file name). - fprintf(stdout, "%s\n", MultidexName("classes", dex_file_index, ".dex").c_str()); - fprintf(stdout, "section offset items\n"); + fprintf(stdout, "%s (%d bytes)\n", + MultidexName("classes", dex_file_index, ".dex").c_str(), + header->FileSize()); + fprintf(stdout, "section offset items bytes pages pct\n"); const dex_ir::Collections& collections = header->GetCollections(); std::vector<const FileSection*> sorted_sections(GetSortedSections(collections, kSortAscending)); - for (const FileSection* file_section : sorted_sections) { - fprintf(stdout, "%-10s 0x%08x 0x%08x\n", - file_section->name_.c_str(), - file_section->offset_fn_(collections), - file_section->size_fn_(collections)); + for (size_t i = 0; i < sorted_sections.size(); ++i) { + const FileSection* file_section = sorted_sections[i]; + const char* name = file_section->name_.c_str(); + uint32_t offset = file_section->offset_fn_(collections); + uint32_t items = file_section->size_fn_(collections); + uint32_t bytes = 0; + if (items > 0) { + bytes = FindNextByteAfterSection(header, collections, sorted_sections, i) - offset; + } + fprintf(stdout, "%-10s %8d %8d %8d %8d %%%02d\n", name, offset, items, bytes, + (bytes + kPageSize - 1) / kPageSize, 100 * bytes / header->FileSize()); } fprintf(stdout, "\n"); } diff --git a/dexlayout/dexlayout.cc b/dexlayout/dexlayout.cc index f74fb4e80e..615bcf92ea 100644 --- a/dexlayout/dexlayout.cc +++ b/dexlayout/dexlayout.cc @@ -36,6 +36,7 @@ #include "dex_file-inl.h" #include "dex_file_verifier.h" #include "dex_instruction-inl.h" +#include "dex_verify.h" #include "dex_visualize.h" #include "dex_writer.h" #include "jit/profile_compilation_info.h" @@ -1368,10 +1369,11 @@ void DexLayout::DumpClass(int idx, char** last_package) { } // Interfaces. - const dex_ir::TypeIdVector* interfaces = class_def->Interfaces(); + const dex_ir::TypeList* interfaces = class_def->Interfaces(); if (interfaces != nullptr) { - for (uint32_t i = 0; i < interfaces->size(); i++) { - DumpInterface((*interfaces)[i], i); + const dex_ir::TypeIdVector* interfaces_vector = interfaces->GetTypeList(); + for (uint32_t i = 0; i < interfaces_vector->size(); i++) { + DumpInterface((*interfaces_vector)[i], i); } // for } @@ -1692,7 +1694,8 @@ void DexLayout::LayoutOutputFile(const DexFile* dex_file) { header_->SetFileSize(header_->FileSize() + diff); } -void DexLayout::OutputDexFile(const std::string& dex_file_location) { +void DexLayout::OutputDexFile(const DexFile* dex_file) { + const std::string& dex_file_location = dex_file->GetLocation(); std::string error_msg; std::unique_ptr<File> new_file; if (!options_.output_to_memmap_) { @@ -1725,18 +1728,24 @@ void DexLayout::OutputDexFile(const std::string& dex_file_location) { if (new_file != nullptr) { UNUSED(new_file->FlushCloseOrErase()); } - // Verify the output dex file is ok on debug builds. + // Verify the output dex file's structure for debug builds. if (kIsDebugBuild) { std::string location = "memory mapped file for " + dex_file_location; - std::unique_ptr<const DexFile> dex_file(DexFile::Open(mem_map_->Begin(), - mem_map_->Size(), - location, - header_->Checksum(), - /*oat_dex_file*/ nullptr, - /*verify*/ true, - /*verify_checksum*/ false, - &error_msg)); - DCHECK(dex_file != nullptr) << "Failed to re-open output file:" << error_msg; + std::unique_ptr<const DexFile> output_dex_file(DexFile::Open(mem_map_->Begin(), + mem_map_->Size(), + location, + header_->Checksum(), + /*oat_dex_file*/ nullptr, + /*verify*/ true, + /*verify_checksum*/ false, + &error_msg)); + DCHECK(output_dex_file != nullptr) << "Failed to re-open output file:" << error_msg; + } + // Do IR-level comparison between input and output. This check ignores potential differences + // due to layout, so offsets are not checked. Instead, it checks the data contents of each item. + if (options_.verify_output_) { + std::unique_ptr<dex_ir::Header> orig_header(dex_ir::DexIrBuilder(*dex_file)); + CHECK(VerifyOutputDexFile(orig_header.get(), header_, &error_msg)) << error_msg; } } @@ -1774,7 +1783,7 @@ void DexLayout::ProcessDexFile(const char* file_name, if (info_ != nullptr) { LayoutOutputFile(dex_file); } - OutputDexFile(dex_file->GetLocation()); + OutputDexFile(dex_file); } } diff --git a/dexlayout/dexlayout.h b/dexlayout/dexlayout.h index 74b525372e..f26b423847 100644 --- a/dexlayout/dexlayout.h +++ b/dexlayout/dexlayout.h @@ -58,6 +58,7 @@ class Options { bool show_section_headers_ = false; bool show_section_statistics_ = false; bool verbose_ = false; + bool verify_output_ = false; bool visualize_pattern_ = false; OutputFormat output_format_ = kOutputPlain; const char* output_dex_directory_ = nullptr; @@ -115,7 +116,7 @@ class DexLayout { // Creates a new layout for the dex file based on profile info. // Currently reorders ClassDefs, ClassDataItems, and CodeItems. void LayoutOutputFile(const DexFile* dex_file); - void OutputDexFile(const std::string& dex_file_location); + void OutputDexFile(const DexFile* dex_file); void DumpCFG(const DexFile* dex_file, int idx); void DumpCFG(const DexFile* dex_file, uint32_t dex_method_idx, const DexFile::CodeItem* code); diff --git a/dexlayout/dexlayout_main.cc b/dexlayout/dexlayout_main.cc index 3eac660eca..38faf9688b 100644 --- a/dexlayout/dexlayout_main.cc +++ b/dexlayout/dexlayout_main.cc @@ -1,4 +1,4 @@ -/* + /* * Copyright (C) 2016 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); @@ -44,7 +44,7 @@ static const char* kProgramName = "dexlayout"; static void Usage(void) { fprintf(stderr, "Copyright (C) 2016 The Android Open Source Project\n\n"); fprintf(stderr, "%s: [-a] [-c] [-d] [-e] [-f] [-h] [-i] [-l layout] [-o outfile] [-p profile]" - " [-s] [-w directory] dexfile...\n\n", kProgramName); + " [-s] [-t] [-v] [-w directory] dexfile...\n\n", kProgramName); fprintf(stderr, " -a : display annotations\n"); fprintf(stderr, " -b : build dex_ir\n"); fprintf(stderr, " -c : verify checksum and exit\n"); @@ -58,6 +58,7 @@ static void Usage(void) { fprintf(stderr, " -p : profile file name (defaults to no profile)\n"); fprintf(stderr, " -s : visualize reference pattern\n"); fprintf(stderr, " -t : display file section sizes\n"); + fprintf(stderr, " -v : verify output file is canonical to input (IR level comparison)\n"); fprintf(stderr, " -w : output dex directory \n"); } @@ -76,7 +77,7 @@ int DexlayoutDriver(int argc, char** argv) { // Parse all arguments. while (1) { - const int ic = getopt(argc, argv, "abcdefghil:mo:p:stw:"); + const int ic = getopt(argc, argv, "abcdefghil:mo:p:stvw:"); if (ic < 0) { break; // done } @@ -132,6 +133,9 @@ int DexlayoutDriver(int argc, char** argv) { options.show_section_statistics_ = true; options.verbose_ = false; break; + case 'v': // verify output + options.verify_output_ = true; + break; case 'w': // output dex files directory options.output_dex_directory_ = optarg; break; diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc index 1f6b87447f..fc6c18b1df 100644 --- a/disassembler/disassembler_mips.cc +++ b/disassembler/disassembler_mips.cc @@ -43,6 +43,7 @@ struct MipsInstruction { static const uint32_t kOpcodeShift = 26; static const uint32_t kCop1 = (17 << kOpcodeShift); +static const uint32_t kMsa = (30 << kOpcodeShift); // MSA major opcode. static const uint32_t kITypeMask = (0x3f << kOpcodeShift); static const uint32_t kJTypeMask = (0x3f << kOpcodeShift); @@ -51,6 +52,8 @@ static const uint32_t kSpecial0Mask = (0x3f << kOpcodeShift); static const uint32_t kSpecial2Mask = (0x3f << kOpcodeShift); static const uint32_t kSpecial3Mask = (0x3f << kOpcodeShift); static const uint32_t kFpMask = kRTypeMask; +static const uint32_t kMsaMask = kRTypeMask; +static const uint32_t kMsaSpecialMask = (0x3f << kOpcodeShift); static const MipsInstruction gMipsInstructions[] = { // "sll r0, r0, 0" is the canonical "nop", used in delay slots. @@ -417,6 +420,36 @@ static const MipsInstruction gMipsInstructions[] = { { kFpMask, kCop1 | 0x10, "sel", "fadt" }, { kFpMask, kCop1 | 0x1e, "max", "fadt" }, { kFpMask, kCop1 | 0x1c, "min", "fadt" }, + + // MSA instructions. + { kMsaMask | (0x1f << 21), kMsa | (0x0 << 21) | 0x1e, "and.v", "kmn" }, + { kMsaMask | (0x1f << 21), kMsa | (0x1 << 21) | 0x1e, "or.v", "kmn" }, + { kMsaMask | (0x1f << 21), kMsa | (0x2 << 21) | 0x1e, "nor.v", "kmn" }, + { kMsaMask | (0x1f << 21), kMsa | (0x3 << 21) | 0x1e, "xor.v", "kmn" }, + { kMsaMask | (0x7 << 23), kMsa | (0x0 << 23) | 0xe, "addv", "Vkmn" }, + { kMsaMask | (0x7 << 23), kMsa | (0x1 << 23) | 0xe, "subv", "Vkmn" }, + { kMsaMask | (0x7 << 23), kMsa | (0x0 << 23) | 0x12, "mulv", "Vkmn" }, + { kMsaMask | (0x7 << 23), kMsa | (0x4 << 23) | 0x12, "div_s", "Vkmn" }, + { kMsaMask | (0x7 << 23), kMsa | (0x5 << 23) | 0x12, "div_u", "Vkmn" }, + { kMsaMask | (0x7 << 23), kMsa | (0x6 << 23) | 0x12, "mod_s", "Vkmn" }, + { kMsaMask | (0x7 << 23), kMsa | (0x7 << 23) | 0x12, "mod_u", "Vkmn" }, + { kMsaMask | (0xf << 22), kMsa | (0x0 << 22) | 0x1b, "fadd", "Ukmn" }, + { kMsaMask | (0xf << 22), kMsa | (0x1 << 22) | 0x1b, "fsub", "Ukmn" }, + { kMsaMask | (0xf << 22), kMsa | (0x2 << 22) | 0x1b, "fmul", "Ukmn" }, + { kMsaMask | (0xf << 22), kMsa | (0x3 << 22) | 0x1b, "fdiv", "Ukmn" }, + { kMsaMask | (0x1ff << 17), kMsa | (0x19e << 17) | 0x1e, "ffint_s", "ukm" }, + { kMsaMask | (0x1ff << 17), kMsa | (0x19c << 17) | 0x1e, "ftint_s", "ukm" }, + { kMsaMask | (0x7 << 23), kMsa | (0x0 << 23) | 0xd, "sll", "Vkmn" }, + { kMsaMask | (0x7 << 23), kMsa | (0x1 << 23) | 0xd, "sra", "Vkmn" }, + { kMsaMask | (0x7 << 23), kMsa | (0x2 << 23) | 0xd, "srl", "Vkmn" }, + { kMsaMask | (0x7 << 23), kMsa | (0x0 << 23) | 0x9, "slli", "kmW" }, + { kMsaMask | (0x7 << 23), kMsa | (0x1 << 23) | 0x9, "srai", "kmW" }, + { kMsaMask | (0x7 << 23), kMsa | (0x2 << 23) | 0x9, "srli", "kmW" }, + { kMsaMask | (0x3ff << 16), kMsa | (0xbe << 16) | 0x19, "move.v", "km" }, + { kMsaMask | (0xf << 22), kMsa | (0x1 << 22) | 0x19, "splati", "kX" }, + { kMsaMask | (0xff << 18), kMsa | (0xc0 << 18) | 0x1e, "fill", "vkD" }, + { kMsaSpecialMask | (0xf << 2), kMsa | (0x8 << 2), "ld", "kw" }, + { kMsaSpecialMask | (0xf << 2), kMsa | (0x9 << 2), "st", "kw" }, }; static uint32_t ReadU32(const uint8_t* ptr) { @@ -559,6 +592,111 @@ size_t DisassemblerMips::Dump(std::ostream& os, const uint8_t* instr_ptr) { case 't': args << 'f' << rt; break; case 'Z': args << (rd + 1); break; // sz ([d]ext size). case 'z': args << (rd - sa + 1); break; // sz ([d]ins, dinsu size). + case 'k': args << 'w' << sa; break; + case 'm': args << 'w' << rd; break; + case 'n': args << 'w' << rt; break; + case 'U': // MSA 1-bit df (word/doubleword), position 21. + { + int32_t df = (instruction >> 21) & 0x1; + switch (df) { + case 0: opcode += ".w"; break; + case 1: opcode += ".d"; break; + } + continue; // No ", ". + } + case 'u': // MSA 1-bit df (word/doubleword), position 16. + { + int32_t df = (instruction >> 16) & 0x1; + switch (df) { + case 0: opcode += ".w"; break; + case 1: opcode += ".d"; break; + } + continue; // No ", ". + } + case 'V': // MSA 2-bit df, position 21. + { + int32_t df = (instruction >> 21) & 0x3; + switch (df) { + case 0: opcode += ".b"; break; + case 1: opcode += ".h"; break; + case 2: opcode += ".w"; break; + case 3: opcode += ".d"; break; + } + continue; // No ", ". + } + case 'v': // MSA 2-bit df, position 16. + { + int32_t df = (instruction >> 16) & 0x3; + switch (df) { + case 0: opcode += ".b"; break; + case 1: opcode += ".h"; break; + case 2: opcode += ".w"; break; + case 3: opcode += ".d"; break; + } + continue; // No ", ". + } + case 'W': // MSA df/m. + { + int32_t df_m = (instruction >> 16) & 0x7f; + if ((df_m & (0x1 << 6)) == 0) { + opcode += ".d"; + args << (df_m & 0x3f); + break; + } + if ((df_m & (0x1 << 5)) == 0) { + opcode += ".w"; + args << (df_m & 0x1f); + break; + } + if ((df_m & (0x1 << 4)) == 0) { + opcode += ".h"; + args << (df_m & 0xf); + break; + } + if ((df_m & (0x1 << 3)) == 0) { + opcode += ".b"; + args << (df_m & 0x7); + } + break; + } + case 'w': // MSA +x(rs). + { + int32_t df = instruction & 0x3; + int32_t s10 = (instruction >> 16) & 0x3ff; + s10 -= (s10 & 0x200) << 1; // Sign-extend s10. + switch (df) { + case 0: opcode += ".b"; break; + case 1: opcode += ".h"; break; + case 2: opcode += ".w"; break; + case 3: opcode += ".d"; break; + } + args << StringPrintf("%+d(r%d)", s10 << df, rd); + break; + } + case 'X': // MSA df/n - ws[x]. + { + int32_t df_n = (instruction >> 16) & 0x3f; + if ((df_n & (0x3 << 4)) == 0) { + opcode += ".b"; + args << 'w' << rd << '[' << (df_n & 0xf) << ']'; + break; + } + if ((df_n & (0x3 << 3)) == 0) { + opcode += ".h"; + args << 'w' << rd << '[' << (df_n & 0x7) << ']'; + break; + } + if ((df_n & (0x3 << 2)) == 0) { + opcode += ".w"; + args << 'w' << rd << '[' << (df_n & 0x3) << ']'; + break; + } + if ((df_n & (0x3 << 1)) == 0) { + opcode += ".d"; + args << 'w' << rd << '[' << (df_n & 0x1) << ']'; + } + break; + } } if (*(args_fmt + 1)) { args << ", "; diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc index e7670230e5..878d0f2cfe 100644 --- a/oatdump/oatdump.cc +++ b/oatdump/oatdump.cc @@ -596,7 +596,7 @@ class OatDumper { kByteKindStackMapInlineInfoIndex, kByteKindStackMapRegisterMaskIndex, kByteKindStackMapStackMaskIndex, - kByteKindInlineInfoMethodIndex, + kByteKindInlineInfoMethodIndexIdx, kByteKindInlineInfoDexPc, kByteKindInlineInfoExtraData, kByteKindInlineInfoDexRegisterMap, @@ -605,7 +605,7 @@ class OatDumper { // Special ranges for std::accumulate convenience. kByteKindStackMapFirst = kByteKindStackMapNativePc, kByteKindStackMapLast = kByteKindStackMapStackMaskIndex, - kByteKindInlineInfoFirst = kByteKindInlineInfoMethodIndex, + kByteKindInlineInfoFirst = kByteKindInlineInfoMethodIndexIdx, kByteKindInlineInfoLast = kByteKindInlineInfoIsLast, }; int64_t bits[kByteKindCount] = {}; @@ -685,8 +685,8 @@ class OatDumper { { ScopedIndentation indent1(&os); Dump(os, - "InlineInfoMethodIndex ", - bits[kByteKindInlineInfoMethodIndex], + "InlineInfoMethodIndexIdx ", + bits[kByteKindInlineInfoMethodIndexIdx], inline_info_bits, "inline info"); Dump(os, @@ -1363,7 +1363,8 @@ class OatDumper { CodeInfo code_info(raw_code_info); DCHECK(code_item != nullptr); ScopedIndentation indent1(vios); - DumpCodeInfo(vios, code_info, oat_method, *code_item); + MethodInfo method_info = oat_method.GetOatQuickMethodHeader()->GetOptimizedMethodInfo(); + DumpCodeInfo(vios, code_info, oat_method, *code_item, method_info); } } else if (IsMethodGeneratedByDexToDexCompiler(oat_method, code_item)) { // We don't encode the size in the table, so just emit that we have quickened @@ -1379,12 +1380,14 @@ class OatDumper { void DumpCodeInfo(VariableIndentationOutputStream* vios, const CodeInfo& code_info, const OatFile::OatMethod& oat_method, - const DexFile::CodeItem& code_item) { + const DexFile::CodeItem& code_item, + const MethodInfo& method_info) { code_info.Dump(vios, oat_method.GetCodeOffset(), code_item.registers_size_, options_.dump_code_info_stack_maps_, - instruction_set_); + instruction_set_, + method_info); } void DumpVregLocations(std::ostream& os, const OatFile::OatMethod& oat_method, @@ -1592,6 +1595,7 @@ class OatDumper { } else if (!bad_input && IsMethodGeneratedByOptimizingCompiler(oat_method, code_item)) { // The optimizing compiler outputs its CodeInfo data in the vmap table. StackMapsHelper helper(oat_method.GetVmapTable(), instruction_set_); + MethodInfo method_info(oat_method.GetOatQuickMethodHeader()->GetOptimizedMethodInfo()); { CodeInfoEncoding encoding(helper.GetEncoding()); StackMapEncoding stack_map_encoding(encoding.stack_map.encoding); @@ -1652,8 +1656,9 @@ class OatDumper { const size_t num_inline_infos = encoding.inline_info.num_entries; if (num_inline_infos > 0u) { stats_.AddBits( - Stats::kByteKindInlineInfoMethodIndex, - encoding.inline_info.encoding.GetMethodIndexEncoding().BitSize() * num_inline_infos); + Stats::kByteKindInlineInfoMethodIndexIdx, + encoding.inline_info.encoding.GetMethodIndexIdxEncoding().BitSize() * + num_inline_infos); stats_.AddBits( Stats::kByteKindInlineInfoDexPc, encoding.inline_info.encoding.GetDexPcEncoding().BitSize() * num_inline_infos); @@ -1679,6 +1684,7 @@ class OatDumper { stack_map.Dump(vios, helper.GetCodeInfo(), helper.GetEncoding(), + method_info, oat_method.GetCodeOffset(), code_item->registers_size_, instruction_set_); diff --git a/runtime/Android.bp b/runtime/Android.bp index d075c58d27..6c3bc0450b 100644 --- a/runtime/Android.bp +++ b/runtime/Android.bp @@ -149,7 +149,6 @@ cc_defaults { "native/dalvik_system_VMStack.cc", "native/dalvik_system_ZygoteHooks.cc", "native/java_lang_Class.cc", - "native/java_lang_DexCache.cc", "native/java_lang_Object.cc", "native/java_lang_String.cc", "native/java_lang_StringFactory.cc", diff --git a/runtime/arch/mips64/instruction_set_features_mips64.cc b/runtime/arch/mips64/instruction_set_features_mips64.cc index 5757906618..08d0bac2c3 100644 --- a/runtime/arch/mips64/instruction_set_features_mips64.cc +++ b/runtime/arch/mips64/instruction_set_features_mips64.cc @@ -30,22 +30,52 @@ using android::base::StringPrintf; Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromVariant( const std::string& variant, std::string* error_msg ATTRIBUTE_UNUSED) { + bool msa = true; if (variant != "default" && variant != "mips64r6") { LOG(WARNING) << "Unexpected CPU variant for Mips64 using defaults: " << variant; } - return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures()); + return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(msa)); } -Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromBitmap(uint32_t bitmap ATTRIBUTE_UNUSED) { - return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures()); +Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromBitmap(uint32_t bitmap) { + bool msa = (bitmap & kMsaBitfield) != 0; + return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(msa)); } Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromCppDefines() { - return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures()); +#if defined(_MIPS_ARCH_MIPS64R6) + const bool msa = true; +#else + const bool msa = false; +#endif + return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(msa)); } Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromCpuInfo() { - return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures()); + // Look in /proc/cpuinfo for features we need. Only use this when we can guarantee that + // the kernel puts the appropriate feature flags in here. Sometimes it doesn't. + bool msa = false; + + std::ifstream in("/proc/cpuinfo"); + if (!in.fail()) { + while (!in.eof()) { + std::string line; + std::getline(in, line); + if (!in.eof()) { + LOG(INFO) << "cpuinfo line: " << line; + if (line.find("ASEs") != std::string::npos) { + LOG(INFO) << "found Application Specific Extensions"; + if (line.find("msa") != std::string::npos) { + msa = true; + } + } + } + } + in.close(); + } else { + LOG(ERROR) << "Failed to open /proc/cpuinfo"; + } + return Mips64FeaturesUniquePtr(new Mips64InstructionSetFeatures(msa)); } Mips64FeaturesUniquePtr Mips64InstructionSetFeatures::FromHwcap() { @@ -62,28 +92,40 @@ bool Mips64InstructionSetFeatures::Equals(const InstructionSetFeatures* other) c if (kMips64 != other->GetInstructionSet()) { return false; } - return true; + const Mips64InstructionSetFeatures* other_as_mips64 = other->AsMips64InstructionSetFeatures(); + return msa_ == other_as_mips64->msa_; } uint32_t Mips64InstructionSetFeatures::AsBitmap() const { - return 0; + return (msa_ ? kMsaBitfield : 0); } std::string Mips64InstructionSetFeatures::GetFeatureString() const { - return "default"; + std::string result; + if (msa_) { + result += "msa"; + } else { + result += "-msa"; + } + return result; } std::unique_ptr<const InstructionSetFeatures> Mips64InstructionSetFeatures::AddFeaturesFromSplitString( const std::vector<std::string>& features, std::string* error_msg) const { - auto i = features.begin(); - if (i != features.end()) { - // We don't have any features. + bool msa = msa_; + for (auto i = features.begin(); i != features.end(); i++) { std::string feature = android::base::Trim(*i); - *error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str()); - return nullptr; + if (feature == "msa") { + msa = true; + } else if (feature == "-msa") { + msa = false; + } else { + *error_msg = StringPrintf("Unknown instruction set feature: '%s'", feature.c_str()); + return nullptr; + } } - return std::unique_ptr<const InstructionSetFeatures>(new Mips64InstructionSetFeatures()); + return std::unique_ptr<const InstructionSetFeatures>(new Mips64InstructionSetFeatures(msa)); } } // namespace art diff --git a/runtime/arch/mips64/instruction_set_features_mips64.h b/runtime/arch/mips64/instruction_set_features_mips64.h index c80c466dfc..d9f30c755e 100644 --- a/runtime/arch/mips64/instruction_set_features_mips64.h +++ b/runtime/arch/mips64/instruction_set_features_mips64.h @@ -58,6 +58,11 @@ class Mips64InstructionSetFeatures FINAL : public InstructionSetFeatures { std::string GetFeatureString() const OVERRIDE; + // Does it have MSA (MIPS SIMD Architecture) support. + bool HasMsa() const { + return msa_; + } + virtual ~Mips64InstructionSetFeatures() {} protected: @@ -67,9 +72,16 @@ class Mips64InstructionSetFeatures FINAL : public InstructionSetFeatures { std::string* error_msg) const OVERRIDE; private: - Mips64InstructionSetFeatures() : InstructionSetFeatures() { + explicit Mips64InstructionSetFeatures(bool msa) : InstructionSetFeatures(), msa_(msa) { } + // Bitmap positions for encoding features as a bitmap. + enum { + kMsaBitfield = 1, + }; + + const bool msa_; + DISALLOW_COPY_AND_ASSIGN(Mips64InstructionSetFeatures); }; diff --git a/runtime/arch/mips64/instruction_set_features_mips64_test.cc b/runtime/arch/mips64/instruction_set_features_mips64_test.cc index 380c4e5433..563200ff76 100644 --- a/runtime/arch/mips64/instruction_set_features_mips64_test.cc +++ b/runtime/arch/mips64/instruction_set_features_mips64_test.cc @@ -27,8 +27,8 @@ TEST(Mips64InstructionSetFeaturesTest, Mips64Features) { ASSERT_TRUE(mips64_features.get() != nullptr) << error_msg; EXPECT_EQ(mips64_features->GetInstructionSet(), kMips64); EXPECT_TRUE(mips64_features->Equals(mips64_features.get())); - EXPECT_STREQ("default", mips64_features->GetFeatureString().c_str()); - EXPECT_EQ(mips64_features->AsBitmap(), 0U); + EXPECT_STREQ("msa", mips64_features->GetFeatureString().c_str()); + EXPECT_EQ(mips64_features->AsBitmap(), 1U); } } // namespace art diff --git a/runtime/arch/mips64/registers_mips64.cc b/runtime/arch/mips64/registers_mips64.cc index 495920809f..1ee2cdd204 100644 --- a/runtime/arch/mips64/registers_mips64.cc +++ b/runtime/arch/mips64/registers_mips64.cc @@ -46,5 +46,14 @@ std::ostream& operator<<(std::ostream& os, const FpuRegister& rhs) { return os; } +std::ostream& operator<<(std::ostream& os, const VectorRegister& rhs) { + if (rhs >= W0 && rhs < kNumberOfVectorRegisters) { + os << "w" << static_cast<int>(rhs); + } else { + os << "VectorRegister[" << static_cast<int>(rhs) << "]"; + } + return os; +} + } // namespace mips64 } // namespace art diff --git a/runtime/arch/mips64/registers_mips64.h b/runtime/arch/mips64/registers_mips64.h index 81fae72b44..30de2cc009 100644 --- a/runtime/arch/mips64/registers_mips64.h +++ b/runtime/arch/mips64/registers_mips64.h @@ -107,6 +107,45 @@ enum FpuRegister { }; std::ostream& operator<<(std::ostream& os, const FpuRegister& rhs); +// Values for vector registers. +enum VectorRegister { + W0 = 0, + W1 = 1, + W2 = 2, + W3 = 3, + W4 = 4, + W5 = 5, + W6 = 6, + W7 = 7, + W8 = 8, + W9 = 9, + W10 = 10, + W11 = 11, + W12 = 12, + W13 = 13, + W14 = 14, + W15 = 15, + W16 = 16, + W17 = 17, + W18 = 18, + W19 = 19, + W20 = 20, + W21 = 21, + W22 = 22, + W23 = 23, + W24 = 24, + W25 = 25, + W26 = 26, + W27 = 27, + W28 = 28, + W29 = 29, + W30 = 30, + W31 = 31, + kNumberOfVectorRegisters = 32, + kNoVectorRegister = -1, +}; +std::ostream& operator<<(std::ostream& os, const VectorRegister& rhs); + } // namespace mips64 } // namespace art diff --git a/runtime/art_method-inl.h b/runtime/art_method-inl.h index 685e26c78d..b47f8f0fc2 100644 --- a/runtime/art_method-inl.h +++ b/runtime/art_method-inl.h @@ -347,7 +347,11 @@ inline const char* ArtMethod::GetDeclaringClassSourceFile() { inline uint16_t ArtMethod::GetClassDefIndex() { DCHECK(!IsProxyMethod()); - return GetDeclaringClass()->GetDexClassDefIndex(); + if (LIKELY(!IsObsolete())) { + return GetDeclaringClass()->GetDexClassDefIndex(); + } else { + return FindObsoleteDexClassDefIndex(); + } } inline const DexFile::ClassDef& ArtMethod::GetClassDef() { diff --git a/runtime/art_method.cc b/runtime/art_method.cc index 9d74e7c92b..80a877350b 100644 --- a/runtime/art_method.cc +++ b/runtime/art_method.cc @@ -104,6 +104,16 @@ mirror::DexCache* ArtMethod::GetObsoleteDexCache() { UNREACHABLE(); } +uint16_t ArtMethod::FindObsoleteDexClassDefIndex() { + DCHECK(!Runtime::Current()->IsAotCompiler()) << PrettyMethod(); + DCHECK(IsObsolete()); + const DexFile* dex_file = GetDexFile(); + const dex::TypeIndex declaring_class_type = dex_file->GetMethodId(GetDexMethodIndex()).class_idx_; + const DexFile::ClassDef* class_def = dex_file->FindClassDef(declaring_class_type); + CHECK(class_def != nullptr); + return dex_file->GetIndexForClassDef(*class_def); +} + mirror::String* ArtMethod::GetNameAsString(Thread* self) { CHECK(!IsProxyMethod()); StackHandleScope<1> hs(self); diff --git a/runtime/art_method.h b/runtime/art_method.h index cd1950c0e2..2248c3bd9d 100644 --- a/runtime/art_method.h +++ b/runtime/art_method.h @@ -700,6 +700,8 @@ class ArtMethod FINAL { } ptr_sized_fields_; private: + uint16_t FindObsoleteDexClassDefIndex() REQUIRES_SHARED(Locks::mutator_lock_); + bool IsAnnotatedWith(jclass klass, uint32_t visibility); static constexpr size_t PtrSizedFieldsOffset(PointerSize pointer_size) { diff --git a/runtime/base/bit_utils.h b/runtime/base/bit_utils.h index 4041f5e1ed..f536c72bae 100644 --- a/runtime/base/bit_utils.h +++ b/runtime/base/bit_utils.h @@ -27,6 +27,22 @@ namespace art { +// Like sizeof, but count how many bits a type takes. Pass type explicitly. +template <typename T> +constexpr size_t BitSizeOf() { + static_assert(std::is_integral<T>::value, "T must be integral"); + using unsigned_type = typename std::make_unsigned<T>::type; + static_assert(sizeof(T) == sizeof(unsigned_type), "Unexpected type size mismatch!"); + static_assert(std::numeric_limits<unsigned_type>::radix == 2, "Unexpected radix!"); + return std::numeric_limits<unsigned_type>::digits; +} + +// Like sizeof, but count how many bits a type takes. Infers type from parameter. +template <typename T> +constexpr size_t BitSizeOf(T /*x*/) { + return BitSizeOf<T>(); +} + template<typename T> constexpr int CLZ(T x) { static_assert(std::is_integral<T>::value, "T must be integral"); @@ -37,6 +53,14 @@ constexpr int CLZ(T x) { return (sizeof(T) == sizeof(uint32_t)) ? __builtin_clz(x) : __builtin_clzll(x); } +// Similar to CLZ except that on zero input it returns bitwidth and supports signed integers. +template<typename T> +constexpr int JAVASTYLE_CLZ(T x) { + static_assert(std::is_integral<T>::value, "T must be integral"); + using unsigned_type = typename std::make_unsigned<T>::type; + return (x == 0) ? BitSizeOf<T>() : CLZ(static_cast<unsigned_type>(x)); +} + template<typename T> constexpr int CTZ(T x) { static_assert(std::is_integral<T>::value, "T must be integral"); @@ -48,12 +72,32 @@ constexpr int CTZ(T x) { return (sizeof(T) == sizeof(uint32_t)) ? __builtin_ctz(x) : __builtin_ctzll(x); } +// Similar to CTZ except that on zero input it returns bitwidth and supports signed integers. +template<typename T> +constexpr int JAVASTYLE_CTZ(T x) { + static_assert(std::is_integral<T>::value, "T must be integral"); + using unsigned_type = typename std::make_unsigned<T>::type; + return (x == 0) ? BitSizeOf<T>() : CTZ(static_cast<unsigned_type>(x)); +} + // Return the number of 1-bits in `x`. template<typename T> constexpr int POPCOUNT(T x) { return (sizeof(T) == sizeof(uint32_t)) ? __builtin_popcount(x) : __builtin_popcountll(x); } +// Swap bytes. +template<typename T> +constexpr T BSWAP(T x) { + if (sizeof(T) == sizeof(uint16_t)) { + return __builtin_bswap16(x); + } else if (sizeof(T) == sizeof(uint32_t)) { + return __builtin_bswap32(x); + } else { + return __builtin_bswap64(x); + } +} + // Find the bit position of the most significant bit (0-based), or -1 if there were no bits set. template <typename T> constexpr ssize_t MostSignificantBit(T value) { @@ -169,22 +213,6 @@ inline bool IsAlignedParam(T* x, int n) { #define DCHECK_ALIGNED_PARAM(value, alignment) \ DCHECK(::art::IsAlignedParam(value, alignment)) << reinterpret_cast<const void*>(value) -// Like sizeof, but count how many bits a type takes. Pass type explicitly. -template <typename T> -constexpr size_t BitSizeOf() { - static_assert(std::is_integral<T>::value, "T must be integral"); - using unsigned_type = typename std::make_unsigned<T>::type; - static_assert(sizeof(T) == sizeof(unsigned_type), "Unexpected type size mismatch!"); - static_assert(std::numeric_limits<unsigned_type>::radix == 2, "Unexpected radix!"); - return std::numeric_limits<unsigned_type>::digits; -} - -// Like sizeof, but count how many bits a type takes. Infers type from parameter. -template <typename T> -constexpr size_t BitSizeOf(T /*x*/) { - return BitSizeOf<T>(); -} - inline uint16_t Low16Bits(uint32_t value) { return static_cast<uint16_t>(value); } @@ -363,6 +391,59 @@ IterationRange<HighToLowBitIterator<T>> HighToLowBits(T bits) { HighToLowBitIterator<T>(bits), HighToLowBitIterator<T>()); } +// Returns value with bit set in lowest one-bit position or 0 if 0. (java.lang.X.lowestOneBit). +template <typename kind> +inline static kind LowestOneBitValue(kind opnd) { + // Hacker's Delight, Section 2-1 + return opnd & -opnd; +} + +// Returns value with bit set in hightest one-bit position or 0 if 0. (java.lang.X.highestOneBit). +template <typename T> +inline static T HighestOneBitValue(T opnd) { + using unsigned_type = typename std::make_unsigned<T>::type; + T res; + if (opnd == 0) { + res = 0; + } else { + int bit_position = BitSizeOf<T>() - (CLZ(static_cast<unsigned_type>(opnd)) + 1); + res = static_cast<T>(UINT64_C(1) << bit_position); + } + return res; +} + +// Rotate bits. +template <typename T, bool left> +inline static T Rot(T opnd, int distance) { + int mask = BitSizeOf<T>() - 1; + int unsigned_right_shift = left ? (-distance & mask) : (distance & mask); + int signed_left_shift = left ? (distance & mask) : (-distance & mask); + using unsigned_type = typename std::make_unsigned<T>::type; + return (static_cast<unsigned_type>(opnd) >> unsigned_right_shift) | (opnd << signed_left_shift); +} + +// TUNING: use rbit for arm/arm64 +inline static uint32_t ReverseBits32(uint32_t opnd) { + // Hacker's Delight 7-1 + opnd = ((opnd >> 1) & 0x55555555) | ((opnd & 0x55555555) << 1); + opnd = ((opnd >> 2) & 0x33333333) | ((opnd & 0x33333333) << 2); + opnd = ((opnd >> 4) & 0x0F0F0F0F) | ((opnd & 0x0F0F0F0F) << 4); + opnd = ((opnd >> 8) & 0x00FF00FF) | ((opnd & 0x00FF00FF) << 8); + opnd = ((opnd >> 16)) | ((opnd) << 16); + return opnd; +} + +// TUNING: use rbit for arm/arm64 +inline static uint64_t ReverseBits64(uint64_t opnd) { + // Hacker's Delight 7-1 + opnd = (opnd & 0x5555555555555555L) << 1 | ((opnd >> 1) & 0x5555555555555555L); + opnd = (opnd & 0x3333333333333333L) << 2 | ((opnd >> 2) & 0x3333333333333333L); + opnd = (opnd & 0x0f0f0f0f0f0f0f0fL) << 4 | ((opnd >> 4) & 0x0f0f0f0f0f0f0f0fL); + opnd = (opnd & 0x00ff00ff00ff00ffL) << 8 | ((opnd >> 8) & 0x00ff00ff00ff00ffL); + opnd = (opnd << 48) | ((opnd & 0xffff0000L) << 16) | ((opnd >> 16) & 0xffff0000L) | (opnd >> 48); + return opnd; +} + } // namespace art #endif // ART_RUNTIME_BASE_BIT_UTILS_H_ diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index ab2b39588a..fa87c8ce25 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -906,7 +906,6 @@ bool ClassLinker::InitFromBootImage(std::string* error_msg) { runtime->GetOatFileManager().RegisterImageOatFiles(spaces); DCHECK(!oat_files.empty()); const OatHeader& default_oat_header = oat_files[0]->GetOatHeader(); - CHECK_EQ(default_oat_header.GetImageFileLocationOatChecksum(), 0U); CHECK_EQ(default_oat_header.GetImageFileLocationOatDataBegin(), 0U); const char* image_file_location = oat_files[0]->GetOatHeader(). GetStoreValueByKey(OatHeader::kImageLocationKey); @@ -1163,9 +1162,7 @@ class VerifyDirectInterfacesInTableClassVisitor { for (ObjPtr<mirror::Class> klass : classes_) { for (uint32_t i = 0, num = klass->NumDirectInterfaces(); i != num; ++i) { CHECK(klass->GetDirectInterface(self_, klass, i) != nullptr) - << klass->PrettyDescriptor() << " iface #" << i - << klass->GetDexFile().StringByTypeIdx(klass->GetDirectInterfaceTypeIdx(i)) - << " Bug: 34839984"; + << klass->PrettyDescriptor() << " iface #" << i; } } } @@ -1921,12 +1918,22 @@ bool ClassLinker::AddImageSpace( // Since it ensures classes are in the class table. VerifyClassInTableArtMethodVisitor visitor2(class_table); header.VisitPackedArtMethods(&visitor2, space->Begin(), kRuntimePointerSize); - } - if (app_image) { - // TODO: Restrict this check to debug builds. Bug: 34839984 + // Verify that all direct interfaces of classes in the class table are also resolved. VerifyDirectInterfacesInTableClassVisitor visitor(class_loader.Get()); class_table->Visit(visitor); visitor.Check(); + // Check that all non-primitive classes in dex caches are also in the class table. + for (int32_t i = 0; i < dex_caches->GetLength(); i++) { + ObjPtr<mirror::DexCache> dex_cache = dex_caches->Get(i); + mirror::TypeDexCacheType* const types = dex_cache->GetResolvedTypes(); + for (int32_t j = 0, num_types = dex_cache->NumResolvedTypes(); j < num_types; j++) { + ObjPtr<mirror::Class> klass = types[j].load(std::memory_order_relaxed).object.Read(); + if (klass != nullptr && !klass->IsPrimitive()) { + CHECK(class_table->Contains(klass)) << klass->PrettyDescriptor() + << " " << dex_cache->GetDexFile()->GetLocation(); + } + } + } } VLOG(class_linker) << "Adding image space took " << PrettyDuration(NanoTime() - start_time); return true; @@ -4402,9 +4409,9 @@ mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable& decoded_name->ToModifiedUtf8().c_str())); CHECK_EQ(ArtField::PrettyField(klass->GetStaticField(1)), throws_field_name); - CHECK_EQ(klass.Get()->GetInterfaces(), + CHECK_EQ(klass.Get()->GetProxyInterfaces(), soa.Decode<mirror::ObjectArray<mirror::Class>>(interfaces)); - CHECK_EQ(klass.Get()->GetThrows(), + CHECK_EQ(klass.Get()->GetProxyThrows(), soa.Decode<mirror::ObjectArray<mirror::ObjectArray<mirror::Class>>>(throws)); } return klass.Get(); @@ -4546,108 +4553,6 @@ bool ClassLinker::CanWeInitializeClass(ObjPtr<mirror::Class> klass, bool can_ini return CanWeInitializeClass(super_class, can_init_statics, can_init_parents); } -std::string DescribeSpace(ObjPtr<mirror::Class> klass) REQUIRES_SHARED(Locks::mutator_lock_) { - std::ostringstream oss; - gc::Heap* heap = Runtime::Current()->GetHeap(); - gc::space::ContinuousSpace* cs = heap->FindContinuousSpaceFromAddress(klass.Ptr()); - if (cs != nullptr) { - if (cs->IsImageSpace()) { - oss << "image/" << cs->GetName() << "/" << cs->AsImageSpace()->GetImageFilename(); - } else { - oss << "continuous/" << cs->GetName(); - } - } else { - gc::space::DiscontinuousSpace* ds = - heap->FindDiscontinuousSpaceFromObject(klass, /* fail_ok */ true); - if (ds != nullptr) { - oss << "discontinuous/" << ds->GetName(); - } else { - oss << "invalid"; - } - } - return oss.str(); -} - -std::string DescribeLoaders(ObjPtr<mirror::Class> klass, const char* iface_descriptor) - REQUIRES_SHARED(Locks::mutator_lock_) { - std::ostringstream oss; - uint32_t hash = ComputeModifiedUtf8Hash(iface_descriptor); - ScopedObjectAccessUnchecked soa(Thread::Current()); - ObjPtr<mirror::Class> path_class_loader = - soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_PathClassLoader); - ObjPtr<mirror::Class> dex_class_loader = - soa.Decode<mirror::Class>(WellKnownClasses::dalvik_system_DexClassLoader); - - // Print the class loader chain. - bool found_iface; - const char* loader_separator = ""; - for (ObjPtr<mirror::ClassLoader> loader = klass->GetClassLoader(); - loader != nullptr; - loader = loader->GetParent()) { - oss << loader_separator << loader->GetClass()->PrettyDescriptor(); - loader_separator = ";"; - // If we didn't find the interface yet, try to find it in the current class loader. - if (!found_iface) { - ClassTable* table = Runtime::Current()->GetClassLinker()->ClassTableForClassLoader(loader); - ObjPtr<mirror::Class> iface = - (table != nullptr) ? table->Lookup(iface_descriptor, hash) : nullptr; - if (iface != nullptr) { - found_iface = true; - oss << "[hit:" << DescribeSpace(iface) << "]"; - } - } - - // For PathClassLoader or DexClassLoader also dump the dex file locations. - if (loader->GetClass() == path_class_loader || loader->GetClass() == dex_class_loader) { - ArtField* const cookie_field = - jni::DecodeArtField(WellKnownClasses::dalvik_system_DexFile_cookie); - ArtField* const dex_file_field = - jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList__Element_dexFile); - ObjPtr<mirror::Object> dex_path_list = - jni::DecodeArtField(WellKnownClasses::dalvik_system_BaseDexClassLoader_pathList)-> - GetObject(loader); - if (dex_path_list != nullptr && dex_file_field != nullptr && cookie_field != nullptr) { - ObjPtr<mirror::Object> dex_elements_obj = - jni::DecodeArtField(WellKnownClasses::dalvik_system_DexPathList_dexElements)-> - GetObject(dex_path_list); - if (dex_elements_obj != nullptr) { - ObjPtr<mirror::ObjectArray<mirror::Object>> dex_elements = - dex_elements_obj->AsObjectArray<mirror::Object>(); - oss << "("; - const char* path_separator = ""; - for (int32_t i = 0; i != dex_elements->GetLength(); ++i) { - ObjPtr<mirror::Object> element = dex_elements->GetWithoutChecks(i); - ObjPtr<mirror::Object> dex_file = - (element != nullptr) ? dex_file_field->GetObject(element) : nullptr; - ObjPtr<mirror::LongArray> long_array = - (dex_file != nullptr) ? cookie_field->GetObject(dex_file)->AsLongArray() : nullptr; - if (long_array != nullptr) { - int32_t long_array_size = long_array->GetLength(); - // First element is the oat file. - for (int32_t j = kDexFileIndexStart; j < long_array_size; ++j) { - const DexFile* cp_dex_file = reinterpret_cast<const DexFile*>( - static_cast<uintptr_t>(long_array->GetWithoutChecks(j))); - oss << path_separator << cp_dex_file->GetLocation(); - path_separator = ":"; - } - } - } - oss << ")"; - } - } - } - } - - // Do a paranoid check that the `klass` itself is in the class table. - ClassTable* table = - Runtime::Current()->GetClassLinker()->ClassTableForClassLoader(klass->GetClassLoader()); - ObjPtr<mirror::Class> k = (table != nullptr) ? table->LookupByDescriptor(klass) : nullptr; - if (k != klass) { - oss << "{FAIL:" << k.Ptr() << "!=" << klass.Ptr() << "}"; - } - return oss.str(); -} - bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass, bool can_init_statics, bool can_init_parents) { // see JLS 3rd edition, 12.4.2 "Detailed Initialization Procedure" for the locking protocol @@ -4795,15 +4700,7 @@ bool ClassLinker::InitializeClass(Thread* self, Handle<mirror::Class> klass, MutableHandle<mirror::Class> handle_scope_iface(hs_iface.NewHandle<mirror::Class>(nullptr)); for (size_t i = 0; i < num_direct_interfaces; i++) { handle_scope_iface.Assign(mirror::Class::GetDirectInterface(self, klass.Get(), i)); - if (UNLIKELY(handle_scope_iface == nullptr)) { - const char* iface_descriptor = - klass->GetDexFile().StringByTypeIdx(klass->GetDirectInterfaceTypeIdx(i)); - LOG(FATAL) << "Check failed: handle_scope_iface != nullptr " - << "Debug data for bug 34839984: " - << klass->PrettyDescriptor() << " iface #" << i << " " << iface_descriptor - << " space: " << DescribeSpace(klass.Get()) - << " loaders: " << DescribeLoaders(klass.Get(), iface_descriptor); - } + CHECK(handle_scope_iface != nullptr) << klass->PrettyDescriptor() << " iface #" << i; CHECK(handle_scope_iface->IsInterface()); if (handle_scope_iface->HasBeenRecursivelyInitialized()) { // We have already done this for this interface. Skip it. @@ -4939,15 +4836,7 @@ bool ClassLinker::InitializeDefaultInterfaceRecursive(Thread* self, // First we initialize all of iface's super-interfaces recursively. for (size_t i = 0; i < num_direct_ifaces; i++) { ObjPtr<mirror::Class> super_iface = mirror::Class::GetDirectInterface(self, iface.Get(), i); - if (UNLIKELY(super_iface == nullptr)) { - const char* iface_descriptor = - iface->GetDexFile().StringByTypeIdx(iface->GetDirectInterfaceTypeIdx(i)); - LOG(FATAL) << "Check failed: super_iface != nullptr " - << "Debug data for bug 34839984: " - << iface->PrettyDescriptor() << " iface #" << i << " " << iface_descriptor - << " space: " << DescribeSpace(iface.Get()) - << " loaders: " << DescribeLoaders(iface.Get(), iface_descriptor); - } + CHECK(super_iface != nullptr) << iface->PrettyDescriptor() << " iface #" << i; if (!super_iface->HasBeenRecursivelyInitialized()) { // Recursive step handle_super_iface.Assign(super_iface); @@ -6843,10 +6732,11 @@ static void CheckClassOwnsVTableEntries(Thread* self, auto is_same_method = [m] (const ArtMethod& meth) { return &meth == m; }; - CHECK((super_vtable_length > i && superclass->GetVTableEntry(i, pointer_size) == m) || - std::find_if(virtuals.begin(), virtuals.end(), is_same_method) != virtuals.end()) - << m->PrettyMethod() << " does not seem to be owned by current class " - << klass->PrettyClass() << " or any of its superclasses!"; + if (!((super_vtable_length > i && superclass->GetVTableEntry(i, pointer_size) == m) || + std::find_if(virtuals.begin(), virtuals.end(), is_same_method) != virtuals.end())) { + LOG(WARNING) << m->PrettyMethod() << " does not seem to be owned by current class " + << klass->PrettyClass() << " or any of its superclasses!"; + } } } @@ -6874,14 +6764,15 @@ static void CheckVTableHasNoDuplicates(Thread* self, other_entry->GetAccessFlags())) { continue; } - CHECK(vtable_entry != other_entry && - !name_comparator.HasSameNameAndSignature( - other_entry->GetInterfaceMethodIfProxy(pointer_size))) - << "vtable entries " << i << " and " << j << " are identical for " - << klass->PrettyClass() << " in method " << vtable_entry->PrettyMethod() << " (0x" - << std::hex << reinterpret_cast<uintptr_t>(vtable_entry) << ") and " - << other_entry->PrettyMethod() << " (0x" << std::hex - << reinterpret_cast<uintptr_t>(other_entry) << ")"; + if (vtable_entry == other_entry || + name_comparator.HasSameNameAndSignature( + other_entry->GetInterfaceMethodIfProxy(pointer_size))) { + LOG(WARNING) << "vtable entries " << i << " and " << j << " are identical for " + << klass->PrettyClass() << " in method " << vtable_entry->PrettyMethod() + << " (0x" << std::hex << reinterpret_cast<uintptr_t>(vtable_entry) << ") and " + << other_entry->PrettyMethod() << " (0x" << std::hex + << reinterpret_cast<uintptr_t>(other_entry) << ")"; + } } } } diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc index e5722a13a7..9f04e598eb 100644 --- a/runtime/class_linker_test.cc +++ b/runtime/class_linker_test.cc @@ -668,7 +668,6 @@ struct ProxyOffsets : public CheckOffsets<mirror::Proxy> { struct DexCacheOffsets : public CheckOffsets<mirror::DexCache> { DexCacheOffsets() : CheckOffsets<mirror::DexCache>(false, "Ljava/lang/DexCache;") { - addOffset(OFFSETOF_MEMBER(mirror::DexCache, dex_), "dex"); addOffset(OFFSETOF_MEMBER(mirror::DexCache, dex_file_), "dexFile"); addOffset(OFFSETOF_MEMBER(mirror::DexCache, location_), "location"); addOffset(OFFSETOF_MEMBER(mirror::DexCache, num_resolved_call_sites_), "numResolvedCallSites"); diff --git a/runtime/class_table.cc b/runtime/class_table.cc index af4f998fdf..374b711aa8 100644 --- a/runtime/class_table.cc +++ b/runtime/class_table.cc @@ -55,6 +55,12 @@ mirror::Class* ClassTable::LookupByDescriptor(ObjPtr<mirror::Class> klass) { return nullptr; } +// To take into account http://b/35845221 +#pragma clang diagnostic push +#if __clang_major__ < 4 +#pragma clang diagnostic ignored "-Wunreachable-code" +#endif + mirror::Class* ClassTable::UpdateClass(const char* descriptor, mirror::Class* klass, size_t hash) { WriterMutexLock mu(Thread::Current(), lock_); // Should only be updating latest table. @@ -80,6 +86,8 @@ mirror::Class* ClassTable::UpdateClass(const char* descriptor, mirror::Class* kl return existing; } +#pragma clang diagnostic pop + size_t ClassTable::CountDefiningLoaderClasses(ObjPtr<mirror::ClassLoader> defining_loader, const ClassSet& set) const { size_t count = 0; @@ -105,6 +113,20 @@ size_t ClassTable::NumNonZygoteClasses(ObjPtr<mirror::ClassLoader> defining_load return CountDefiningLoaderClasses(defining_loader, classes_.back()); } +size_t ClassTable::NumReferencedZygoteClasses() const { + ReaderMutexLock mu(Thread::Current(), lock_); + size_t sum = 0; + for (size_t i = 0; i < classes_.size() - 1; ++i) { + sum += classes_[i].Size(); + } + return sum; +} + +size_t ClassTable::NumReferencedNonZygoteClasses() const { + ReaderMutexLock mu(Thread::Current(), lock_); + return classes_.back().Size(); +} + mirror::Class* ClassTable::Lookup(const char* descriptor, size_t hash) { DescriptorHashPair pair(descriptor, hash); ReaderMutexLock mu(Thread::Current(), lock_); diff --git a/runtime/class_table.h b/runtime/class_table.h index 711eae45b8..79f5aea399 100644 --- a/runtime/class_table.h +++ b/runtime/class_table.h @@ -144,16 +144,26 @@ class ClassTable { REQUIRES(!lock_) REQUIRES_SHARED(Locks::mutator_lock_); - // Returns the number of classes in previous snapshots. + // Returns the number of classes in previous snapshots defined by `defining_loader`. size_t NumZygoteClasses(ObjPtr<mirror::ClassLoader> defining_loader) const REQUIRES(!lock_) REQUIRES_SHARED(Locks::mutator_lock_); - // Returns all off the classes in the lastest snapshot. + // Returns all off the classes in the lastest snapshot defined by `defining_loader`. size_t NumNonZygoteClasses(ObjPtr<mirror::ClassLoader> defining_loader) const REQUIRES(!lock_) REQUIRES_SHARED(Locks::mutator_lock_); + // Returns the number of classes in previous snapshots no matter the defining loader. + size_t NumReferencedZygoteClasses() const + REQUIRES(!lock_) + REQUIRES_SHARED(Locks::mutator_lock_); + + // Returns all off the classes in the lastest snapshot no matter the defining loader. + size_t NumReferencedNonZygoteClasses() const + REQUIRES(!lock_) + REQUIRES_SHARED(Locks::mutator_lock_); + // Update a class in the table with the new class. Returns the existing class which was replaced. mirror::Class* UpdateClass(const char* descriptor, mirror::Class* new_klass, size_t hash) REQUIRES(!lock_) diff --git a/runtime/dex_file_annotations.cc b/runtime/dex_file_annotations.cc index d39ea35a90..6b9654dc49 100644 --- a/runtime/dex_file_annotations.cc +++ b/runtime/dex_file_annotations.cc @@ -41,7 +41,80 @@ struct DexFile::AnnotationValue { }; namespace { -mirror::Object* CreateAnnotationMember(Handle<mirror::Class> klass, + +// A helper class that contains all the data needed to do annotation lookup. +class ClassData { + public: + explicit ClassData(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) + : ClassData(ScopedNullHandle<mirror::Class>(), // klass + method, + *method->GetDexFile(), + &method->GetClassDef()) {} + + // Requires Scope to be able to create at least 1 handles. + template <typename Scope> + ClassData(Scope& hs, ArtField* field) REQUIRES_SHARED(Locks::mutator_lock_) + : ClassData(hs.NewHandle(field->GetDeclaringClass())) { } + + explicit ClassData(Handle<mirror::Class> klass) REQUIRES_SHARED(art::Locks::mutator_lock_) + : ClassData(klass, // klass + nullptr, // method + klass->GetDexFile(), + klass->GetClassDef()) {} + + const DexFile& GetDexFile() const REQUIRES_SHARED(Locks::mutator_lock_) { + return dex_file_; + } + + const DexFile::ClassDef* GetClassDef() const REQUIRES_SHARED(Locks::mutator_lock_) { + return class_def_; + } + + ObjPtr<mirror::DexCache> GetDexCache() const REQUIRES_SHARED(Locks::mutator_lock_) { + if (method_ != nullptr) { + return method_->GetDexCache(); + } else { + return real_klass_->GetDexCache(); + } + } + + ObjPtr<mirror::ClassLoader> GetClassLoader() const REQUIRES_SHARED(Locks::mutator_lock_) { + if (method_ != nullptr) { + return method_->GetDeclaringClass()->GetClassLoader(); + } else { + return real_klass_->GetClassLoader(); + } + } + + ObjPtr<mirror::Class> GetRealClass() const REQUIRES_SHARED(Locks::mutator_lock_) { + if (method_ != nullptr) { + return method_->GetDeclaringClass(); + } else { + return real_klass_.Get(); + } + } + + private: + ClassData(Handle<mirror::Class> klass, + ArtMethod* method, + const DexFile& dex_file, + const DexFile::ClassDef* class_def) REQUIRES_SHARED(Locks::mutator_lock_) + : real_klass_(klass), + method_(method), + dex_file_(dex_file), + class_def_(class_def) { + DCHECK((method_ == nullptr) || real_klass_.IsNull()); + } + + Handle<mirror::Class> real_klass_; + ArtMethod* method_; + const DexFile& dex_file_; + const DexFile::ClassDef* class_def_; + + DISALLOW_COPY_AND_ASSIGN(ClassData); +}; + +mirror::Object* CreateAnnotationMember(const ClassData& klass, Handle<mirror::Class> annotation_class, const uint8_t** annotation) REQUIRES_SHARED(Locks::mutator_lock_); @@ -185,9 +258,8 @@ const uint8_t* SearchEncodedAnnotation(const DexFile& dex_file, const DexFile::AnnotationSetItem* FindAnnotationSetForMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) { const DexFile* dex_file = method->GetDexFile(); - mirror::Class* klass = method->GetDeclaringClass(); const DexFile::AnnotationsDirectoryItem* annotations_dir = - dex_file->GetAnnotationsDirectory(*klass->GetClassDef()); + dex_file->GetAnnotationsDirectory(method->GetClassDef()); if (annotations_dir == nullptr) { return nullptr; } @@ -209,9 +281,8 @@ const DexFile::AnnotationSetItem* FindAnnotationSetForMethod(ArtMethod* method) const DexFile::ParameterAnnotationsItem* FindAnnotationsItemForMethod(ArtMethod* method) REQUIRES_SHARED(Locks::mutator_lock_) { const DexFile* dex_file = method->GetDexFile(); - mirror::Class* klass = method->GetDeclaringClass(); const DexFile::AnnotationsDirectoryItem* annotations_dir = - dex_file->GetAnnotationsDirectory(*klass->GetClassDef()); + dex_file->GetAnnotationsDirectory(method->GetClassDef()); if (annotations_dir == nullptr) { return nullptr; } @@ -230,30 +301,34 @@ const DexFile::ParameterAnnotationsItem* FindAnnotationsItemForMethod(ArtMethod* return nullptr; } -const DexFile::AnnotationSetItem* FindAnnotationSetForClass(Handle<mirror::Class> klass) +const DexFile::AnnotationSetItem* FindAnnotationSetForClass(const ClassData& klass) REQUIRES_SHARED(Locks::mutator_lock_) { - const DexFile& dex_file = klass->GetDexFile(); + const DexFile& dex_file = klass.GetDexFile(); const DexFile::AnnotationsDirectoryItem* annotations_dir = - dex_file.GetAnnotationsDirectory(*klass->GetClassDef()); + dex_file.GetAnnotationsDirectory(*klass.GetClassDef()); if (annotations_dir == nullptr) { return nullptr; } return dex_file.GetClassAnnotationSet(annotations_dir); } -mirror::Object* ProcessEncodedAnnotation(Handle<mirror::Class> klass, const uint8_t** annotation) +mirror::Object* ProcessEncodedAnnotation(const ClassData& klass, const uint8_t** annotation) REQUIRES_SHARED(Locks::mutator_lock_) { uint32_t type_index = DecodeUnsignedLeb128(annotation); uint32_t size = DecodeUnsignedLeb128(annotation); Thread* self = Thread::Current(); ScopedObjectAccessUnchecked soa(self); - StackHandleScope<2> hs(self); + StackHandleScope<4> hs(self); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); Handle<mirror::Class> annotation_class(hs.NewHandle( - class_linker->ResolveType(klass->GetDexFile(), dex::TypeIndex(type_index), klass.Get()))); + class_linker->ResolveType(klass.GetDexFile(), + dex::TypeIndex(type_index), + hs.NewHandle(klass.GetDexCache()), + hs.NewHandle(klass.GetClassLoader())))); if (annotation_class == nullptr) { - LOG(INFO) << "Unable to resolve " << klass->PrettyClass() << " annotation class " << type_index; + LOG(INFO) << "Unable to resolve " << klass.GetRealClass()->PrettyClass() + << " annotation class " << type_index; DCHECK(Thread::Current()->IsExceptionPending()); Thread::Current()->ClearException(); return nullptr; @@ -300,13 +375,13 @@ mirror::Object* ProcessEncodedAnnotation(Handle<mirror::Class> klass, const uint } template <bool kTransactionActive> -bool ProcessAnnotationValue(Handle<mirror::Class> klass, +bool ProcessAnnotationValue(const ClassData& klass, const uint8_t** annotation_ptr, DexFile::AnnotationValue* annotation_value, Handle<mirror::Class> array_class, DexFile::AnnotationResultStyle result_style) REQUIRES_SHARED(Locks::mutator_lock_) { - const DexFile& dex_file = klass->GetDexFile(); + const DexFile& dex_file = klass.GetDexFile(); Thread* self = Thread::Current(); ObjPtr<mirror::Object> element_object = nullptr; bool set_object = false; @@ -361,9 +436,8 @@ bool ProcessAnnotationValue(Handle<mirror::Class> klass, annotation_value->value_.SetI(index); } else { StackHandleScope<1> hs(self); - Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache())); element_object = Runtime::Current()->GetClassLinker()->ResolveString( - klass->GetDexFile(), dex::StringIndex(index), dex_cache); + klass.GetDexFile(), dex::StringIndex(index), hs.NewHandle(klass.GetDexCache())); set_object = true; if (element_object == nullptr) { return false; @@ -377,8 +451,12 @@ bool ProcessAnnotationValue(Handle<mirror::Class> klass, annotation_value->value_.SetI(index); } else { dex::TypeIndex type_index(index); + StackHandleScope<2> hs(self); element_object = Runtime::Current()->GetClassLinker()->ResolveType( - klass->GetDexFile(), type_index, klass.Get()); + klass.GetDexFile(), + type_index, + hs.NewHandle(klass.GetDexCache()), + hs.NewHandle(klass.GetClassLoader())); set_object = true; if (element_object == nullptr) { CHECK(self->IsExceptionPending()); @@ -399,12 +477,13 @@ bool ProcessAnnotationValue(Handle<mirror::Class> klass, if (result_style == DexFile::kAllRaw) { annotation_value->value_.SetI(index); } else { - StackHandleScope<2> hs(self); - Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache())); - Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader())); ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + StackHandleScope<2> hs(self); ArtMethod* method = class_linker->ResolveMethodWithoutInvokeType( - klass->GetDexFile(), index, dex_cache, class_loader); + klass.GetDexFile(), + index, + hs.NewHandle(klass.GetDexCache()), + hs.NewHandle(klass.GetClassLoader())); if (method == nullptr) { return false; } @@ -439,10 +518,11 @@ bool ProcessAnnotationValue(Handle<mirror::Class> klass, annotation_value->value_.SetI(index); } else { StackHandleScope<2> hs(self); - Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache())); - Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader())); ArtField* field = Runtime::Current()->GetClassLinker()->ResolveFieldJLS( - klass->GetDexFile(), index, dex_cache, class_loader); + klass.GetDexFile(), + index, + hs.NewHandle(klass.GetDexCache()), + hs.NewHandle(klass.GetClassLoader())); if (field == nullptr) { return false; } @@ -467,10 +547,12 @@ bool ProcessAnnotationValue(Handle<mirror::Class> klass, annotation_value->value_.SetI(index); } else { StackHandleScope<3> hs(self); - Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache())); - Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader())); ArtField* enum_field = Runtime::Current()->GetClassLinker()->ResolveField( - klass->GetDexFile(), index, dex_cache, class_loader, true); + klass.GetDexFile(), + index, + hs.NewHandle(klass.GetDexCache()), + hs.NewHandle(klass.GetClassLoader()), + true); if (enum_field == nullptr) { return false; } else { @@ -595,10 +677,10 @@ bool ProcessAnnotationValue(Handle<mirror::Class> klass, return true; } -mirror::Object* CreateAnnotationMember(Handle<mirror::Class> klass, +mirror::Object* CreateAnnotationMember(const ClassData& klass, Handle<mirror::Class> annotation_class, const uint8_t** annotation) { - const DexFile& dex_file = klass->GetDexFile(); + const DexFile& dex_file = klass.GetDexFile(); Thread* self = Thread::Current(); ScopedObjectAccessUnchecked soa(self); StackHandleScope<5> hs(self); @@ -666,12 +748,12 @@ mirror::Object* CreateAnnotationMember(Handle<mirror::Class> klass, } const DexFile::AnnotationItem* GetAnnotationItemFromAnnotationSet( - Handle<mirror::Class> klass, + const ClassData& klass, const DexFile::AnnotationSetItem* annotation_set, uint32_t visibility, Handle<mirror::Class> annotation_class) REQUIRES_SHARED(Locks::mutator_lock_) { - const DexFile& dex_file = klass->GetDexFile(); + const DexFile& dex_file = klass.GetDexFile(); for (uint32_t i = 0; i < annotation_set->size_; ++i) { const DexFile::AnnotationItem* annotation_item = dex_file.GetAnnotationItem(annotation_set, i); if (!IsVisibilityCompatible(annotation_item->visibility_, visibility)) { @@ -679,12 +761,16 @@ const DexFile::AnnotationItem* GetAnnotationItemFromAnnotationSet( } const uint8_t* annotation = annotation_item->annotation_; uint32_t type_index = DecodeUnsignedLeb128(&annotation); + StackHandleScope<2> hs(Thread::Current()); mirror::Class* resolved_class = Runtime::Current()->GetClassLinker()->ResolveType( - klass->GetDexFile(), dex::TypeIndex(type_index), klass.Get()); + klass.GetDexFile(), + dex::TypeIndex(type_index), + hs.NewHandle(klass.GetDexCache()), + hs.NewHandle(klass.GetClassLoader())); if (resolved_class == nullptr) { std::string temp; LOG(WARNING) << StringPrintf("Unable to resolve %s annotation class %d", - klass->GetDescriptor(&temp), type_index); + klass.GetRealClass()->GetDescriptor(&temp), type_index); CHECK(Thread::Current()->IsExceptionPending()); Thread::Current()->ClearException(); continue; @@ -698,7 +784,7 @@ const DexFile::AnnotationItem* GetAnnotationItemFromAnnotationSet( } mirror::Object* GetAnnotationObjectFromAnnotationSet( - Handle<mirror::Class> klass, + const ClassData& klass, const DexFile::AnnotationSetItem* annotation_set, uint32_t visibility, Handle<mirror::Class> annotation_class) @@ -712,13 +798,13 @@ mirror::Object* GetAnnotationObjectFromAnnotationSet( return ProcessEncodedAnnotation(klass, &annotation); } -mirror::Object* GetAnnotationValue(Handle<mirror::Class> klass, +mirror::Object* GetAnnotationValue(const ClassData& klass, const DexFile::AnnotationItem* annotation_item, const char* annotation_name, Handle<mirror::Class> array_class, uint32_t expected_type) REQUIRES_SHARED(Locks::mutator_lock_) { - const DexFile& dex_file = klass->GetDexFile(); + const DexFile& dex_file = klass.GetDexFile(); const uint8_t* annotation = SearchEncodedAnnotation(dex_file, annotation_item->annotation_, annotation_name); if (annotation == nullptr) { @@ -745,10 +831,10 @@ mirror::Object* GetAnnotationValue(Handle<mirror::Class> klass, return annotation_value.value_.GetL(); } -mirror::ObjectArray<mirror::String>* GetSignatureValue(Handle<mirror::Class> klass, +mirror::ObjectArray<mirror::String>* GetSignatureValue(const ClassData& klass, const DexFile::AnnotationSetItem* annotation_set) REQUIRES_SHARED(Locks::mutator_lock_) { - const DexFile& dex_file = klass->GetDexFile(); + const DexFile& dex_file = klass.GetDexFile(); StackHandleScope<1> hs(Thread::Current()); const DexFile::AnnotationItem* annotation_item = SearchAnnotationSet(dex_file, annotation_set, "Ldalvik/annotation/Signature;", @@ -771,10 +857,10 @@ mirror::ObjectArray<mirror::String>* GetSignatureValue(Handle<mirror::Class> kla return obj->AsObjectArray<mirror::String>(); } -mirror::ObjectArray<mirror::Class>* GetThrowsValue(Handle<mirror::Class> klass, +mirror::ObjectArray<mirror::Class>* GetThrowsValue(const ClassData& klass, const DexFile::AnnotationSetItem* annotation_set) REQUIRES_SHARED(Locks::mutator_lock_) { - const DexFile& dex_file = klass->GetDexFile(); + const DexFile& dex_file = klass.GetDexFile(); StackHandleScope<1> hs(Thread::Current()); const DexFile::AnnotationItem* annotation_item = SearchAnnotationSet(dex_file, annotation_set, "Ldalvik/annotation/Throws;", @@ -798,11 +884,11 @@ mirror::ObjectArray<mirror::Class>* GetThrowsValue(Handle<mirror::Class> klass, } mirror::ObjectArray<mirror::Object>* ProcessAnnotationSet( - Handle<mirror::Class> klass, + const ClassData& klass, const DexFile::AnnotationSetItem* annotation_set, uint32_t visibility) REQUIRES_SHARED(Locks::mutator_lock_) { - const DexFile& dex_file = klass->GetDexFile(); + const DexFile& dex_file = klass.GetDexFile(); Thread* self = Thread::Current(); ScopedObjectAccessUnchecked soa(self); StackHandleScope<2> hs(self); @@ -856,11 +942,11 @@ mirror::ObjectArray<mirror::Object>* ProcessAnnotationSet( } mirror::ObjectArray<mirror::Object>* ProcessAnnotationSetRefList( - Handle<mirror::Class> klass, + const ClassData& klass, const DexFile::AnnotationSetRefList* set_ref_list, uint32_t size) REQUIRES_SHARED(Locks::mutator_lock_) { - const DexFile& dex_file = klass->GetDexFile(); + const DexFile& dex_file = klass.GetDexFile(); Thread* self = Thread::Current(); ScopedObjectAccessUnchecked soa(self); StackHandleScope<1> hs(self); @@ -899,15 +985,17 @@ mirror::Object* GetAnnotationForField(ArtField* field, Handle<mirror::Class> ann return nullptr; } StackHandleScope<1> hs(Thread::Current()); - Handle<mirror::Class> field_class(hs.NewHandle(field->GetDeclaringClass())); - return GetAnnotationObjectFromAnnotationSet(field_class, annotation_set, - DexFile::kDexVisibilityRuntime, annotation_class); + const ClassData field_class(hs, field); + return GetAnnotationObjectFromAnnotationSet(field_class, + annotation_set, + DexFile::kDexVisibilityRuntime, + annotation_class); } mirror::ObjectArray<mirror::Object>* GetAnnotationsForField(ArtField* field) { const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForField(field); StackHandleScope<1> hs(Thread::Current()); - Handle<mirror::Class> field_class(hs.NewHandle(field->GetDeclaringClass())); + const ClassData field_class(hs, field); return ProcessAnnotationSet(field_class, annotation_set, DexFile::kDexVisibilityRuntime); } @@ -917,7 +1005,7 @@ mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForField(ArtField* fi return nullptr; } StackHandleScope<1> hs(Thread::Current()); - Handle<mirror::Class> field_class(hs.NewHandle(field->GetDeclaringClass())); + const ClassData field_class(hs, field); return GetSignatureValue(field_class, annotation_set); } @@ -927,17 +1015,17 @@ bool IsFieldAnnotationPresent(ArtField* field, Handle<mirror::Class> annotation_ return false; } StackHandleScope<1> hs(Thread::Current()); - Handle<mirror::Class> field_class(hs.NewHandle(field->GetDeclaringClass())); + const ClassData field_class(hs, field); const DexFile::AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet( field_class, annotation_set, DexFile::kDexVisibilityRuntime, annotation_class); return annotation_item != nullptr; } mirror::Object* GetAnnotationDefaultValue(ArtMethod* method) { - const DexFile* dex_file = method->GetDexFile(); - mirror::Class* klass = method->GetDeclaringClass(); + const ClassData klass(method); + const DexFile* dex_file = &klass.GetDexFile(); const DexFile::AnnotationsDirectoryItem* annotations_dir = - dex_file->GetAnnotationsDirectory(*klass->GetClassDef()); + dex_file->GetAnnotationsDirectory(*klass.GetClassDef()); if (annotations_dir == nullptr) { return nullptr; } @@ -965,10 +1053,9 @@ mirror::Object* GetAnnotationDefaultValue(ArtMethod* method) { return nullptr; } DexFile::AnnotationValue annotation_value; - StackHandleScope<2> hs(Thread::Current()); - Handle<mirror::Class> h_klass(hs.NewHandle(klass)); + StackHandleScope<1> hs(Thread::Current()); Handle<mirror::Class> return_type(hs.NewHandle(method->GetReturnType(true /* resolve */))); - if (!ProcessAnnotationValue<false>(h_klass, + if (!ProcessAnnotationValue<false>(klass, &annotation, &annotation_value, return_type, @@ -983,17 +1070,15 @@ mirror::Object* GetAnnotationForMethod(ArtMethod* method, Handle<mirror::Class> if (annotation_set == nullptr) { return nullptr; } - StackHandleScope<1> hs(Thread::Current()); - Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass())); - return GetAnnotationObjectFromAnnotationSet(method_class, annotation_set, + return GetAnnotationObjectFromAnnotationSet(ClassData(method), annotation_set, DexFile::kDexVisibilityRuntime, annotation_class); } mirror::ObjectArray<mirror::Object>* GetAnnotationsForMethod(ArtMethod* method) { const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForMethod(method); - StackHandleScope<1> hs(Thread::Current()); - Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass())); - return ProcessAnnotationSet(method_class, annotation_set, DexFile::kDexVisibilityRuntime); + return ProcessAnnotationSet(ClassData(method), + annotation_set, + DexFile::kDexVisibilityRuntime); } mirror::ObjectArray<mirror::Class>* GetExceptionTypesForMethod(ArtMethod* method) { @@ -1001,9 +1086,7 @@ mirror::ObjectArray<mirror::Class>* GetExceptionTypesForMethod(ArtMethod* method if (annotation_set == nullptr) { return nullptr; } - StackHandleScope<1> hs(Thread::Current()); - Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass())); - return GetThrowsValue(method_class, annotation_set); + return GetThrowsValue(ClassData(method), annotation_set); } mirror::ObjectArray<mirror::Object>* GetParameterAnnotations(ArtMethod* method) { @@ -1019,9 +1102,7 @@ mirror::ObjectArray<mirror::Object>* GetParameterAnnotations(ArtMethod* method) return nullptr; } uint32_t size = set_ref_list->size_; - StackHandleScope<1> hs(Thread::Current()); - Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass())); - return ProcessAnnotationSetRefList(method_class, set_ref_list, size); + return ProcessAnnotationSetRefList(ClassData(method), set_ref_list, size); } mirror::Object* GetAnnotationForMethodParameter(ArtMethod* method, @@ -1045,9 +1126,7 @@ mirror::Object* GetAnnotationForMethodParameter(ArtMethod* method, const DexFile::AnnotationSetItem* annotation_set = dex_file->GetSetRefItemItem(annotation_set_ref); - StackHandleScope<1> hs(Thread::Current()); - Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass())); - return GetAnnotationObjectFromAnnotationSet(method_class, + return GetAnnotationObjectFromAnnotationSet(ClassData(method), annotation_set, DexFile::kDexVisibilityRuntime, annotation_class); @@ -1072,7 +1151,7 @@ bool GetParametersMetadataForMethod(ArtMethod* method, return false; } - StackHandleScope<5> hs(Thread::Current()); + StackHandleScope<4> hs(Thread::Current()); // Extract the parameters' names String[]. ObjPtr<mirror::Class> string_class = mirror::String::GetJavaLangString(); @@ -1082,9 +1161,9 @@ bool GetParametersMetadataForMethod(ArtMethod* method, return false; } - Handle<mirror::Class> klass = hs.NewHandle(method->GetDeclaringClass()); + ClassData data(method); Handle<mirror::Object> names_obj = - hs.NewHandle(GetAnnotationValue(klass, + hs.NewHandle(GetAnnotationValue(data, annotation_item, "names", string_array_class, @@ -1099,7 +1178,7 @@ bool GetParametersMetadataForMethod(ArtMethod* method, return false; } Handle<mirror::Object> access_flags_obj = - hs.NewHandle(GetAnnotationValue(klass, + hs.NewHandle(GetAnnotationValue(data, annotation_item, "accessFlags", int_array_class, @@ -1118,9 +1197,7 @@ mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForMethod(ArtMethod* if (annotation_set == nullptr) { return nullptr; } - StackHandleScope<1> hs(Thread::Current()); - Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass())); - return GetSignatureValue(method_class, annotation_set); + return GetSignatureValue(ClassData(method), annotation_set); } bool IsMethodAnnotationPresent(ArtMethod* method, Handle<mirror::Class> annotation_class, @@ -1129,37 +1206,39 @@ bool IsMethodAnnotationPresent(ArtMethod* method, Handle<mirror::Class> annotati if (annotation_set == nullptr) { return false; } - StackHandleScope<1> hs(Thread::Current()); - Handle<mirror::Class> method_class(hs.NewHandle(method->GetDeclaringClass())); const DexFile::AnnotationItem* annotation_item = - GetAnnotationItemFromAnnotationSet(method_class, annotation_set, visibility, - annotation_class); + GetAnnotationItemFromAnnotationSet(ClassData(method), + annotation_set, visibility, annotation_class); return annotation_item != nullptr; } mirror::Object* GetAnnotationForClass(Handle<mirror::Class> klass, Handle<mirror::Class> annotation_class) { - const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); + ClassData data(klass); + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data); if (annotation_set == nullptr) { return nullptr; } - return GetAnnotationObjectFromAnnotationSet(klass, annotation_set, DexFile::kDexVisibilityRuntime, + return GetAnnotationObjectFromAnnotationSet(data, + annotation_set, + DexFile::kDexVisibilityRuntime, annotation_class); } mirror::ObjectArray<mirror::Object>* GetAnnotationsForClass(Handle<mirror::Class> klass) { - const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); - return ProcessAnnotationSet(klass, annotation_set, DexFile::kDexVisibilityRuntime); + ClassData data(klass); + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data); + return ProcessAnnotationSet(data, annotation_set, DexFile::kDexVisibilityRuntime); } mirror::ObjectArray<mirror::Class>* GetDeclaredClasses(Handle<mirror::Class> klass) { - const DexFile& dex_file = klass->GetDexFile(); - const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); + ClassData data(klass); + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data); if (annotation_set == nullptr) { return nullptr; } const DexFile::AnnotationItem* annotation_item = - SearchAnnotationSet(dex_file, annotation_set, "Ldalvik/annotation/MemberClasses;", + SearchAnnotationSet(data.GetDexFile(), annotation_set, "Ldalvik/annotation/MemberClasses;", DexFile::kDexVisibilitySystem); if (annotation_item == nullptr) { return nullptr; @@ -1172,7 +1251,7 @@ mirror::ObjectArray<mirror::Class>* GetDeclaredClasses(Handle<mirror::Class> kla return nullptr; } mirror::Object* obj = - GetAnnotationValue(klass, annotation_item, "value", class_array_class, + GetAnnotationValue(data, annotation_item, "value", class_array_class, DexFile::kDexAnnotationArray); if (obj == nullptr) { return nullptr; @@ -1181,18 +1260,18 @@ mirror::ObjectArray<mirror::Class>* GetDeclaredClasses(Handle<mirror::Class> kla } mirror::Class* GetDeclaringClass(Handle<mirror::Class> klass) { - const DexFile& dex_file = klass->GetDexFile(); - const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); + ClassData data(klass); + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data); if (annotation_set == nullptr) { return nullptr; } const DexFile::AnnotationItem* annotation_item = - SearchAnnotationSet(dex_file, annotation_set, "Ldalvik/annotation/EnclosingClass;", + SearchAnnotationSet(data.GetDexFile(), annotation_set, "Ldalvik/annotation/EnclosingClass;", DexFile::kDexVisibilitySystem); if (annotation_item == nullptr) { return nullptr; } - mirror::Object* obj = GetAnnotationValue(klass, annotation_item, "value", + mirror::Object* obj = GetAnnotationValue(data, annotation_item, "value", ScopedNullHandle<mirror::Class>(), DexFile::kDexAnnotationType); if (obj == nullptr) { @@ -1202,28 +1281,30 @@ mirror::Class* GetDeclaringClass(Handle<mirror::Class> klass) { } mirror::Class* GetEnclosingClass(Handle<mirror::Class> klass) { - const DexFile& dex_file = klass->GetDexFile(); mirror::Class* declaring_class = GetDeclaringClass(klass); if (declaring_class != nullptr) { return declaring_class; } - const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); + ClassData data(klass); + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data); if (annotation_set == nullptr) { return nullptr; } const DexFile::AnnotationItem* annotation_item = - SearchAnnotationSet(dex_file, annotation_set, "Ldalvik/annotation/EnclosingMethod;", + SearchAnnotationSet(data.GetDexFile(), + annotation_set, + "Ldalvik/annotation/EnclosingMethod;", DexFile::kDexVisibilitySystem); if (annotation_item == nullptr) { return nullptr; } const uint8_t* annotation = - SearchEncodedAnnotation(dex_file, annotation_item->annotation_, "value"); + SearchEncodedAnnotation(data.GetDexFile(), annotation_item->annotation_, "value"); if (annotation == nullptr) { return nullptr; } DexFile::AnnotationValue annotation_value; - if (!ProcessAnnotationValue<false>(klass, + if (!ProcessAnnotationValue<false>(data, &annotation, &annotation_value, ScopedNullHandle<mirror::Class>(), @@ -1234,10 +1315,11 @@ mirror::Class* GetEnclosingClass(Handle<mirror::Class> klass) { return nullptr; } StackHandleScope<2> hs(Thread::Current()); - Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache())); - Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader())); ArtMethod* method = Runtime::Current()->GetClassLinker()->ResolveMethodWithoutInvokeType( - klass->GetDexFile(), annotation_value.value_.GetI(), dex_cache, class_loader); + data.GetDexFile(), + annotation_value.value_.GetI(), + hs.NewHandle(data.GetDexCache()), + hs.NewHandle(data.GetClassLoader())); if (method == nullptr) { return nullptr; } @@ -1245,39 +1327,44 @@ mirror::Class* GetEnclosingClass(Handle<mirror::Class> klass) { } mirror::Object* GetEnclosingMethod(Handle<mirror::Class> klass) { - const DexFile& dex_file = klass->GetDexFile(); - const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); + ClassData data(klass); + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data); if (annotation_set == nullptr) { return nullptr; } const DexFile::AnnotationItem* annotation_item = - SearchAnnotationSet(dex_file, annotation_set, "Ldalvik/annotation/EnclosingMethod;", + SearchAnnotationSet(data.GetDexFile(), + annotation_set, + "Ldalvik/annotation/EnclosingMethod;", DexFile::kDexVisibilitySystem); if (annotation_item == nullptr) { return nullptr; } - return GetAnnotationValue(klass, annotation_item, "value", ScopedNullHandle<mirror::Class>(), + return GetAnnotationValue(data, annotation_item, "value", ScopedNullHandle<mirror::Class>(), DexFile::kDexAnnotationMethod); } bool GetInnerClass(Handle<mirror::Class> klass, mirror::String** name) { - const DexFile& dex_file = klass->GetDexFile(); - const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); + ClassData data(klass); + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data); if (annotation_set == nullptr) { return false; } const DexFile::AnnotationItem* annotation_item = SearchAnnotationSet( - dex_file, annotation_set, "Ldalvik/annotation/InnerClass;", DexFile::kDexVisibilitySystem); + data.GetDexFile(), + annotation_set, + "Ldalvik/annotation/InnerClass;", + DexFile::kDexVisibilitySystem); if (annotation_item == nullptr) { return false; } const uint8_t* annotation = - SearchEncodedAnnotation(dex_file, annotation_item->annotation_, "name"); + SearchEncodedAnnotation(data.GetDexFile(), annotation_item->annotation_, "name"); if (annotation == nullptr) { return false; } DexFile::AnnotationValue annotation_value; - if (!ProcessAnnotationValue<false>(klass, + if (!ProcessAnnotationValue<false>(data, &annotation, &annotation_value, ScopedNullHandle<mirror::Class>(), @@ -1293,24 +1380,24 @@ bool GetInnerClass(Handle<mirror::Class> klass, mirror::String** name) { } bool GetInnerClassFlags(Handle<mirror::Class> klass, uint32_t* flags) { - const DexFile& dex_file = klass->GetDexFile(); - const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); + ClassData data(klass); + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data); if (annotation_set == nullptr) { return false; } const DexFile::AnnotationItem* annotation_item = - SearchAnnotationSet(dex_file, annotation_set, "Ldalvik/annotation/InnerClass;", + SearchAnnotationSet(data.GetDexFile(), annotation_set, "Ldalvik/annotation/InnerClass;", DexFile::kDexVisibilitySystem); if (annotation_item == nullptr) { return false; } const uint8_t* annotation = - SearchEncodedAnnotation(dex_file, annotation_item->annotation_, "accessFlags"); + SearchEncodedAnnotation(data.GetDexFile(), annotation_item->annotation_, "accessFlags"); if (annotation == nullptr) { return false; } DexFile::AnnotationValue annotation_value; - if (!ProcessAnnotationValue<false>(klass, + if (!ProcessAnnotationValue<false>(data, &annotation, &annotation_value, ScopedNullHandle<mirror::Class>(), @@ -1325,20 +1412,22 @@ bool GetInnerClassFlags(Handle<mirror::Class> klass, uint32_t* flags) { } mirror::ObjectArray<mirror::String>* GetSignatureAnnotationForClass(Handle<mirror::Class> klass) { - const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); + ClassData data(klass); + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data); if (annotation_set == nullptr) { return nullptr; } - return GetSignatureValue(klass, annotation_set); + return GetSignatureValue(data, annotation_set); } bool IsClassAnnotationPresent(Handle<mirror::Class> klass, Handle<mirror::Class> annotation_class) { - const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(klass); + ClassData data(klass); + const DexFile::AnnotationSetItem* annotation_set = FindAnnotationSetForClass(data); if (annotation_set == nullptr) { return false; } const DexFile::AnnotationItem* annotation_item = GetAnnotationItemFromAnnotationSet( - klass, annotation_set, DexFile::kDexVisibilityRuntime, annotation_class); + data, annotation_set, DexFile::kDexVisibilityRuntime, annotation_class); return annotation_item != nullptr; } diff --git a/runtime/dexopt_test.cc b/runtime/dexopt_test.cc index 51678699b4..db65e40da5 100644 --- a/runtime/dexopt_test.cc +++ b/runtime/dexopt_test.cc @@ -111,7 +111,7 @@ void DexoptTest::GenerateOatForTest(const std::string& dex_location, &error_msg)); ASSERT_TRUE(image_header != nullptr) << error_msg; const OatHeader& oat_header = odex_file->GetOatHeader(); - uint32_t combined_checksum = OatFileAssistant::CalculateCombinedImageChecksum(); + uint32_t combined_checksum = image_header->GetOatChecksum(); if (CompilerFilter::DependsOnImageChecksum(filter)) { if (with_alternate_image) { diff --git a/runtime/entrypoints/entrypoint_utils-inl.h b/runtime/entrypoints/entrypoint_utils-inl.h index 3bc49b8506..ba8cec3a52 100644 --- a/runtime/entrypoints/entrypoint_utils-inl.h +++ b/runtime/entrypoints/entrypoint_utils-inl.h @@ -43,6 +43,7 @@ namespace art { inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method, + const MethodInfo& method_info, const InlineInfo& inline_info, const InlineInfoEncoding& encoding, uint8_t inlining_depth) @@ -56,7 +57,7 @@ inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method, return inline_info.GetArtMethodAtDepth(encoding, inlining_depth); } - uint32_t method_index = inline_info.GetMethodIndexAtDepth(encoding, inlining_depth); + uint32_t method_index = inline_info.GetMethodIndexAtDepth(encoding, method_info, inlining_depth); if (inline_info.GetDexPcAtDepth(encoding, inlining_depth) == static_cast<uint32_t>(-1)) { // "charAt" special case. It is the only non-leaf method we inline across dex files. ArtMethod* inlined_method = jni::DecodeArtMethod(WellKnownClasses::java_lang_String_charAt); @@ -68,6 +69,7 @@ inline ArtMethod* GetResolvedMethod(ArtMethod* outer_method, ArtMethod* caller = outer_method; if (inlining_depth != 0) { caller = GetResolvedMethod(outer_method, + method_info, inline_info, encoding, inlining_depth - 1); diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc index 6301362e09..b5130d7999 100644 --- a/runtime/entrypoints/entrypoint_utils.cc +++ b/runtime/entrypoints/entrypoint_utils.cc @@ -138,7 +138,7 @@ JValue InvokeProxyInvocationHandler(ScopedObjectAccessAlreadyRunnable& soa, cons reinterpret_cast<uintptr_t>(&virtual_methods.At(0))) / method_size; CHECK_LT(throws_index, static_cast<int>(num_virtuals)); mirror::ObjectArray<mirror::Class>* declared_exceptions = - proxy_class->GetThrows()->Get(throws_index); + proxy_class->GetProxyThrows()->Get(throws_index); mirror::Class* exception_class = exception->GetClass(); for (int32_t i = 0; i < declared_exceptions->GetLength() && !declares_exception; i++) { mirror::Class* declared_exception = declared_exceptions->Get(i); @@ -201,12 +201,14 @@ static inline ArtMethod* DoGetCalleeSaveMethodCaller(ArtMethod* outer_method, DCHECK(current_code->IsOptimized()); uintptr_t native_pc_offset = current_code->NativeQuickPcOffset(caller_pc); CodeInfo code_info = current_code->GetOptimizedCodeInfo(); + MethodInfo method_info = current_code->GetOptimizedMethodInfo(); CodeInfoEncoding encoding = code_info.ExtractEncoding(); StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding); DCHECK(stack_map.IsValid()); if (stack_map.HasInlineInfo(encoding.stack_map.encoding)) { InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding); caller = GetResolvedMethod(outer_method, + method_info, inline_info, encoding.inline_info.encoding, inline_info.GetDepth(encoding.inline_info.encoding) - 1); diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index 3fd20a66c2..25073a8b79 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -372,10 +372,11 @@ class QuickArgumentVisitor { uintptr_t outer_pc_offset = current_code->NativeQuickPcOffset(outer_pc); CodeInfo code_info = current_code->GetOptimizedCodeInfo(); CodeInfoEncoding encoding = code_info.ExtractEncoding(); + MethodInfo method_info = current_code->GetOptimizedMethodInfo(); InvokeInfo invoke(code_info.GetInvokeInfoForNativePcOffset(outer_pc_offset, encoding)); if (invoke.IsValid()) { *invoke_type = static_cast<InvokeType>(invoke.GetInvokeType(encoding.invoke_info.encoding)); - *dex_method_index = invoke.GetMethodIndex(encoding.invoke_info.encoding); + *dex_method_index = invoke.GetMethodIndex(encoding.invoke_info.encoding, method_info); return true; } return false; diff --git a/runtime/gc/collector/concurrent_copying.cc b/runtime/gc/collector/concurrent_copying.cc index aea9708ddc..7136f101aa 100644 --- a/runtime/gc/collector/concurrent_copying.cc +++ b/runtime/gc/collector/concurrent_copying.cc @@ -2171,7 +2171,9 @@ mirror::Object* ConcurrentCopying::Copy(mirror::Object* from_ref) { fall_back_to_non_moving = true; to_ref = heap_->non_moving_space_->Alloc(Thread::Current(), obj_size, &non_moving_space_bytes_allocated, nullptr, &dummy); - CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed"; + CHECK(to_ref != nullptr) << "Fall-back non-moving space allocation failed for a " + << obj_size << " byte object in region type " + << region_space_->GetRegionType(from_ref); bytes_allocated = non_moving_space_bytes_allocated; // Mark it in the mark bitmap. accounting::ContinuousSpaceBitmap* mark_bitmap = diff --git a/runtime/image.cc b/runtime/image.cc index 5fbb7a6f6e..b153ea0e02 100644 --- a/runtime/image.cc +++ b/runtime/image.cc @@ -25,7 +25,7 @@ namespace art { const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' }; -const uint8_t ImageHeader::kImageVersion[] = { '0', '4', '1', '\0' }; // hash-based DexCache fields +const uint8_t ImageHeader::kImageVersion[] = { '0', '4', '3', '\0' }; // hash-based DexCache fields ImageHeader::ImageHeader(uint32_t image_begin, uint32_t image_size, diff --git a/runtime/interpreter/interpreter_intrinsics.cc b/runtime/interpreter/interpreter_intrinsics.cc index 5e901cdfd8..0ae7307052 100644 --- a/runtime/interpreter/interpreter_intrinsics.cc +++ b/runtime/interpreter/interpreter_intrinsics.cc @@ -20,19 +20,29 @@ namespace art { namespace interpreter { -#define BINARY_SIMPLE_INTRINSIC(name, op, get, set, offset) \ -static ALWAYS_INLINE bool name(ShadowFrame* shadow_frame, \ - const Instruction* inst, \ - uint16_t inst_data, \ - JValue* result_register) \ - REQUIRES_SHARED(Locks::mutator_lock_) { \ - uint32_t arg[Instruction::kMaxVarArgRegs] = {}; \ - inst->GetVarArgs(arg, inst_data); \ - result_register->set(op(shadow_frame->get(arg[0]), shadow_frame->get(arg[offset]))); \ - return true; \ + +#define BINARY_INTRINSIC(name, op, get1, get2, set) \ +static ALWAYS_INLINE bool name(ShadowFrame* shadow_frame, \ + const Instruction* inst, \ + uint16_t inst_data, \ + JValue* result_register) \ + REQUIRES_SHARED(Locks::mutator_lock_) { \ + uint32_t arg[Instruction::kMaxVarArgRegs] = {}; \ + inst->GetVarArgs(arg, inst_data); \ + result_register->set(op(shadow_frame->get1, shadow_frame->get2)); \ + return true; \ } -#define UNARY_SIMPLE_INTRINSIC(name, op, get, set) \ +#define BINARY_II_INTRINSIC(name, op, set) \ + BINARY_INTRINSIC(name, op, GetVReg(arg[0]), GetVReg(arg[1]), set) + +#define BINARY_JJ_INTRINSIC(name, op, set) \ + BINARY_INTRINSIC(name, op, GetVRegLong(arg[0]), GetVRegLong(arg[2]), set) + +#define BINARY_JI_INTRINSIC(name, op, set) \ + BINARY_INTRINSIC(name, op, GetVRegLong(arg[0]), GetVReg(arg[2]), set) + +#define UNARY_INTRINSIC(name, op, get, set) \ static ALWAYS_INLINE bool name(ShadowFrame* shadow_frame, \ const Instruction* inst, \ uint16_t inst_data, \ @@ -44,40 +54,277 @@ static ALWAYS_INLINE bool name(ShadowFrame* shadow_frame, \ return true; \ } + +// java.lang.Integer.reverse(I)I +UNARY_INTRINSIC(MterpIntegerReverse, ReverseBits32, GetVReg, SetI); + +// java.lang.Integer.reverseBytes(I)I +UNARY_INTRINSIC(MterpIntegerReverseBytes, BSWAP, GetVReg, SetI); + +// java.lang.Integer.bitCount(I)I +UNARY_INTRINSIC(MterpIntegerBitCount, POPCOUNT, GetVReg, SetI); + +// java.lang.Integer.compare(II)I +BINARY_II_INTRINSIC(MterpIntegerCompare, Compare, SetI); + +// java.lang.Integer.highestOneBit(I)I +UNARY_INTRINSIC(MterpIntegerHighestOneBit, HighestOneBitValue, GetVReg, SetI); + +// java.lang.Integer.LowestOneBit(I)I +UNARY_INTRINSIC(MterpIntegerLowestOneBit, LowestOneBitValue, GetVReg, SetI); + +// java.lang.Integer.numberOfLeadingZeros(I)I +UNARY_INTRINSIC(MterpIntegerNumberOfLeadingZeros, JAVASTYLE_CLZ, GetVReg, SetI); + +// java.lang.Integer.numberOfTrailingZeros(I)I +UNARY_INTRINSIC(MterpIntegerNumberOfTrailingZeros, JAVASTYLE_CTZ, GetVReg, SetI); + +// java.lang.Integer.rotateRight(II)I +BINARY_II_INTRINSIC(MterpIntegerRotateRight, (Rot<int32_t, false>), SetI); + +// java.lang.Integer.rotateLeft(II)I +BINARY_II_INTRINSIC(MterpIntegerRotateLeft, (Rot<int32_t, true>), SetI); + +// java.lang.Integer.signum(I)I +UNARY_INTRINSIC(MterpIntegerSignum, Signum, GetVReg, SetI); + +// java.lang.Long.reverse(I)I +UNARY_INTRINSIC(MterpLongReverse, ReverseBits64, GetVRegLong, SetJ); + +// java.lang.Long.reverseBytes(J)J +UNARY_INTRINSIC(MterpLongReverseBytes, BSWAP, GetVRegLong, SetJ); + +// java.lang.Long.bitCount(J)I +UNARY_INTRINSIC(MterpLongBitCount, POPCOUNT, GetVRegLong, SetI); + +// java.lang.Long.compare(JJ)I +BINARY_JJ_INTRINSIC(MterpLongCompare, Compare, SetI); + +// java.lang.Long.highestOneBit(J)J +UNARY_INTRINSIC(MterpLongHighestOneBit, HighestOneBitValue, GetVRegLong, SetJ); + +// java.lang.Long.lowestOneBit(J)J +UNARY_INTRINSIC(MterpLongLowestOneBit, LowestOneBitValue, GetVRegLong, SetJ); + +// java.lang.Long.numberOfLeadingZeros(J)I +UNARY_INTRINSIC(MterpLongNumberOfLeadingZeros, JAVASTYLE_CLZ, GetVRegLong, SetJ); + +// java.lang.Long.numberOfTrailingZeros(J)I +UNARY_INTRINSIC(MterpLongNumberOfTrailingZeros, JAVASTYLE_CTZ, GetVRegLong, SetJ); + +// java.lang.Long.rotateRight(JI)J +BINARY_JJ_INTRINSIC(MterpLongRotateRight, (Rot<int64_t, false>), SetJ); + +// java.lang.Long.rotateLeft(JI)J +BINARY_JJ_INTRINSIC(MterpLongRotateLeft, (Rot<int64_t, true>), SetJ); + +// java.lang.Long.signum(J)I +UNARY_INTRINSIC(MterpLongSignum, Signum, GetVRegLong, SetI); + +// java.lang.Short.reverseBytes(S)S +UNARY_INTRINSIC(MterpShortReverseBytes, BSWAP, GetVRegShort, SetS); + // java.lang.Math.min(II)I -BINARY_SIMPLE_INTRINSIC(MterpMathMinIntInt, std::min, GetVReg, SetI, 1); +BINARY_II_INTRINSIC(MterpMathMinIntInt, std::min, SetI); + // java.lang.Math.min(JJ)J -BINARY_SIMPLE_INTRINSIC(MterpMathMinLongLong, std::min, GetVRegLong, SetJ, 2); +BINARY_JJ_INTRINSIC(MterpMathMinLongLong, std::min, SetJ); + // java.lang.Math.max(II)I -BINARY_SIMPLE_INTRINSIC(MterpMathMaxIntInt, std::max, GetVReg, SetI, 1); +BINARY_II_INTRINSIC(MterpMathMaxIntInt, std::max, SetI); + // java.lang.Math.max(JJ)J -BINARY_SIMPLE_INTRINSIC(MterpMathMaxLongLong, std::max, GetVRegLong, SetJ, 2); +BINARY_JJ_INTRINSIC(MterpMathMaxLongLong, std::max, SetJ); + // java.lang.Math.abs(I)I -UNARY_SIMPLE_INTRINSIC(MterpMathAbsInt, std::abs, GetVReg, SetI); +UNARY_INTRINSIC(MterpMathAbsInt, std::abs, GetVReg, SetI); + // java.lang.Math.abs(J)J -UNARY_SIMPLE_INTRINSIC(MterpMathAbsLong, std::abs, GetVRegLong, SetJ); +UNARY_INTRINSIC(MterpMathAbsLong, std::abs, GetVRegLong, SetJ); + // java.lang.Math.abs(F)F -UNARY_SIMPLE_INTRINSIC(MterpMathAbsFloat, 0x7fffffff&, GetVReg, SetI); +UNARY_INTRINSIC(MterpMathAbsFloat, 0x7fffffff&, GetVReg, SetI); + // java.lang.Math.abs(D)D -UNARY_SIMPLE_INTRINSIC(MterpMathAbsDouble, INT64_C(0x7fffffffffffffff)&, GetVRegLong, SetJ); +UNARY_INTRINSIC(MterpMathAbsDouble, INT64_C(0x7fffffffffffffff)&, GetVRegLong, SetJ); + // java.lang.Math.sqrt(D)D -UNARY_SIMPLE_INTRINSIC(MterpMathSqrt, std::sqrt, GetVRegDouble, SetD); +UNARY_INTRINSIC(MterpMathSqrt, std::sqrt, GetVRegDouble, SetD); + // java.lang.Math.ceil(D)D -UNARY_SIMPLE_INTRINSIC(MterpMathCeil, std::ceil, GetVRegDouble, SetD); +UNARY_INTRINSIC(MterpMathCeil, std::ceil, GetVRegDouble, SetD); + // java.lang.Math.floor(D)D -UNARY_SIMPLE_INTRINSIC(MterpMathFloor, std::floor, GetVRegDouble, SetD); +UNARY_INTRINSIC(MterpMathFloor, std::floor, GetVRegDouble, SetD); + // java.lang.Math.sin(D)D -UNARY_SIMPLE_INTRINSIC(MterpMathSin, std::sin, GetVRegDouble, SetD); +UNARY_INTRINSIC(MterpMathSin, std::sin, GetVRegDouble, SetD); + // java.lang.Math.cos(D)D -UNARY_SIMPLE_INTRINSIC(MterpMathCos, std::cos, GetVRegDouble, SetD); +UNARY_INTRINSIC(MterpMathCos, std::cos, GetVRegDouble, SetD); + // java.lang.Math.tan(D)D -UNARY_SIMPLE_INTRINSIC(MterpMathTan, std::tan, GetVRegDouble, SetD); +UNARY_INTRINSIC(MterpMathTan, std::tan, GetVRegDouble, SetD); + // java.lang.Math.asin(D)D -UNARY_SIMPLE_INTRINSIC(MterpMathAsin, std::asin, GetVRegDouble, SetD); +UNARY_INTRINSIC(MterpMathAsin, std::asin, GetVRegDouble, SetD); + // java.lang.Math.acos(D)D -UNARY_SIMPLE_INTRINSIC(MterpMathAcos, std::acos, GetVRegDouble, SetD); +UNARY_INTRINSIC(MterpMathAcos, std::acos, GetVRegDouble, SetD); + // java.lang.Math.atan(D)D -UNARY_SIMPLE_INTRINSIC(MterpMathAtan, std::atan, GetVRegDouble, SetD); +UNARY_INTRINSIC(MterpMathAtan, std::atan, GetVRegDouble, SetD); + +// java.lang.String.charAt(I)C +static ALWAYS_INLINE bool MterpStringCharAt(ShadowFrame* shadow_frame, + const Instruction* inst, + uint16_t inst_data, + JValue* result_register) + REQUIRES_SHARED(Locks::mutator_lock_) { + uint32_t arg[Instruction::kMaxVarArgRegs] = {}; + inst->GetVarArgs(arg, inst_data); + mirror::String* str = shadow_frame->GetVRegReference(arg[0])->AsString(); + int length = str->GetLength(); + int index = shadow_frame->GetVReg(arg[1]); + uint16_t res; + if (UNLIKELY(index < 0) || (index >= length)) { + return false; // Punt and let non-intrinsic version deal with the throw. + } + if (str->IsCompressed()) { + res = str->GetValueCompressed()[index]; + } else { + res = str->GetValue()[index]; + } + result_register->SetC(res); + return true; +} + +// java.lang.String.compareTo(Ljava/lang/string)I +static ALWAYS_INLINE bool MterpStringCompareTo(ShadowFrame* shadow_frame, + const Instruction* inst, + uint16_t inst_data, + JValue* result_register) + REQUIRES_SHARED(Locks::mutator_lock_) { + uint32_t arg[Instruction::kMaxVarArgRegs] = {}; + inst->GetVarArgs(arg, inst_data); + mirror::String* str = shadow_frame->GetVRegReference(arg[0])->AsString(); + mirror::Object* arg1 = shadow_frame->GetVRegReference(arg[1]); + if (arg1 == nullptr) { + return false; + } + result_register->SetI(str->CompareTo(arg1->AsString())); + return true; +} + +#define STRING_INDEXOF_INTRINSIC(name, starting_pos) \ +static ALWAYS_INLINE bool Mterp##name(ShadowFrame* shadow_frame, \ + const Instruction* inst, \ + uint16_t inst_data, \ + JValue* result_register) \ + REQUIRES_SHARED(Locks::mutator_lock_) { \ + uint32_t arg[Instruction::kMaxVarArgRegs] = {}; \ + inst->GetVarArgs(arg, inst_data); \ + mirror::String* str = shadow_frame->GetVRegReference(arg[0])->AsString(); \ + int ch = shadow_frame->GetVReg(arg[1]); \ + if (ch >= 0x10000) { \ + /* Punt if supplementary char. */ \ + return false; \ + } \ + result_register->SetI(str->FastIndexOf(ch, starting_pos)); \ + return true; \ +} + +// java.lang.String.indexOf(I)I +STRING_INDEXOF_INTRINSIC(StringIndexOf, 0); + +// java.lang.String.indexOf(II)I +STRING_INDEXOF_INTRINSIC(StringIndexOfAfter, shadow_frame->GetVReg(arg[2])); + +#define SIMPLE_STRING_INTRINSIC(name, operation) \ +static ALWAYS_INLINE bool Mterp##name(ShadowFrame* shadow_frame, \ + const Instruction* inst, \ + uint16_t inst_data, \ + JValue* result_register) \ + REQUIRES_SHARED(Locks::mutator_lock_) { \ + uint32_t arg[Instruction::kMaxVarArgRegs] = {}; \ + inst->GetVarArgs(arg, inst_data); \ + mirror::String* str = shadow_frame->GetVRegReference(arg[0])->AsString(); \ + result_register->operation; \ + return true; \ +} + +// java.lang.String.isEmpty()Z +SIMPLE_STRING_INTRINSIC(StringIsEmpty, SetZ(str->GetLength() == 0)) + +// java.lang.String.length()I +SIMPLE_STRING_INTRINSIC(StringLength, SetI(str->GetLength())) + +// java.lang.String.getCharsNoCheck(II[CI)V +static ALWAYS_INLINE bool MterpStringGetCharsNoCheck(ShadowFrame* shadow_frame, + const Instruction* inst, + uint16_t inst_data, + JValue* result_register ATTRIBUTE_UNUSED) + REQUIRES_SHARED(Locks::mutator_lock_) { + // Start, end & index already checked by caller - won't throw. Destination is uncompressed. + uint32_t arg[Instruction::kMaxVarArgRegs] = {}; + inst->GetVarArgs(arg, inst_data); + mirror::String* str = shadow_frame->GetVRegReference(arg[0])->AsString(); + int32_t start = shadow_frame->GetVReg(arg[1]); + int32_t end = shadow_frame->GetVReg(arg[2]); + int32_t index = shadow_frame->GetVReg(arg[4]); + mirror::CharArray* array = shadow_frame->GetVRegReference(arg[3])->AsCharArray(); + uint16_t* dst = array->GetData() + index; + int32_t len = (end - start); + if (str->IsCompressed()) { + const uint8_t* src_8 = str->GetValueCompressed() + start; + for (int i = 0; i < len; i++) { + dst[i] = src_8[i]; + } + } else { + uint16_t* src_16 = str->GetValue() + start; + memcpy(dst, src_16, len * sizeof(uint16_t)); + } + return true; +} + +// java.lang.String.equalsLjava/lang/Object;)Z +static ALWAYS_INLINE bool MterpStringEquals(ShadowFrame* shadow_frame, + const Instruction* inst, + uint16_t inst_data, + JValue* result_register) + REQUIRES_SHARED(Locks::mutator_lock_) { + uint32_t arg[Instruction::kMaxVarArgRegs] = {}; + inst->GetVarArgs(arg, inst_data); + mirror::String* str = shadow_frame->GetVRegReference(arg[0])->AsString(); + mirror::Object* obj = shadow_frame->GetVRegReference(arg[1]); + bool res = false; // Assume not equal. + if ((obj != nullptr) && obj->IsString()) { + mirror::String* str2 = obj->AsString(); + if (str->GetCount() == str2->GetCount()) { + // Length & compression status are same. Can use block compare. + void* bytes1; + void* bytes2; + int len = str->GetLength(); + if (str->IsCompressed()) { + bytes1 = str->GetValueCompressed(); + bytes2 = str2->GetValueCompressed(); + } else { + len *= sizeof(uint16_t); + bytes1 = str->GetValue(); + bytes2 = str2->GetValue(); + } + res = (memcmp(bytes1, bytes2, len) == 0); + } + } + result_register->SetZ(res); + return true; +} + +// Macro to help keep track of what's left to implement. +#define UNIMPLEMENTED_CASE(name) \ + case Intrinsics::k##name: \ + res = false; \ + break; #define INTRINSIC_CASE(name) \ case Intrinsics::k##name: \ @@ -93,26 +340,136 @@ bool MterpHandleIntrinsic(ShadowFrame* shadow_frame, Intrinsics intrinsic = static_cast<Intrinsics>(called_method->GetIntrinsic()); bool res = false; // Assume failure switch (intrinsic) { - INTRINSIC_CASE(MathMinIntInt) + UNIMPLEMENTED_CASE(DoubleDoubleToRawLongBits /* (D)J */) + UNIMPLEMENTED_CASE(DoubleDoubleToLongBits /* (D)J */) + UNIMPLEMENTED_CASE(DoubleIsInfinite /* (D)Z */) + UNIMPLEMENTED_CASE(DoubleIsNaN /* (D)Z */) + UNIMPLEMENTED_CASE(DoubleLongBitsToDouble /* (J)D */) + UNIMPLEMENTED_CASE(FloatFloatToRawIntBits /* (F)I */) + UNIMPLEMENTED_CASE(FloatFloatToIntBits /* (F)I */) + UNIMPLEMENTED_CASE(FloatIsInfinite /* (F)Z */) + UNIMPLEMENTED_CASE(FloatIsNaN /* (F)Z */) + UNIMPLEMENTED_CASE(FloatIntBitsToFloat /* (I)F */) + INTRINSIC_CASE(IntegerReverse) + INTRINSIC_CASE(IntegerReverseBytes) + INTRINSIC_CASE(IntegerBitCount) + INTRINSIC_CASE(IntegerCompare) + INTRINSIC_CASE(IntegerHighestOneBit) + INTRINSIC_CASE(IntegerLowestOneBit) + INTRINSIC_CASE(IntegerNumberOfLeadingZeros) + INTRINSIC_CASE(IntegerNumberOfTrailingZeros) + INTRINSIC_CASE(IntegerRotateRight) + INTRINSIC_CASE(IntegerRotateLeft) + INTRINSIC_CASE(IntegerSignum) + INTRINSIC_CASE(LongReverse) + INTRINSIC_CASE(LongReverseBytes) + INTRINSIC_CASE(LongBitCount) + INTRINSIC_CASE(LongCompare) + INTRINSIC_CASE(LongHighestOneBit) + INTRINSIC_CASE(LongLowestOneBit) + INTRINSIC_CASE(LongNumberOfLeadingZeros) + INTRINSIC_CASE(LongNumberOfTrailingZeros) + INTRINSIC_CASE(LongRotateRight) + INTRINSIC_CASE(LongRotateLeft) + INTRINSIC_CASE(LongSignum) + INTRINSIC_CASE(ShortReverseBytes) + INTRINSIC_CASE(MathAbsDouble) + INTRINSIC_CASE(MathAbsFloat) + INTRINSIC_CASE(MathAbsLong) + INTRINSIC_CASE(MathAbsInt) + UNIMPLEMENTED_CASE(MathMinDoubleDouble /* (DD)D */) + UNIMPLEMENTED_CASE(MathMinFloatFloat /* (FF)F */) INTRINSIC_CASE(MathMinLongLong) - INTRINSIC_CASE(MathMaxIntInt) + INTRINSIC_CASE(MathMinIntInt) + UNIMPLEMENTED_CASE(MathMaxDoubleDouble /* (DD)D */) + UNIMPLEMENTED_CASE(MathMaxFloatFloat /* (FF)F */) INTRINSIC_CASE(MathMaxLongLong) - INTRINSIC_CASE(MathAbsInt) - INTRINSIC_CASE(MathAbsLong) - INTRINSIC_CASE(MathAbsFloat) - INTRINSIC_CASE(MathAbsDouble) - INTRINSIC_CASE(MathSqrt) - INTRINSIC_CASE(MathCeil) - INTRINSIC_CASE(MathFloor) - INTRINSIC_CASE(MathSin) + INTRINSIC_CASE(MathMaxIntInt) INTRINSIC_CASE(MathCos) - INTRINSIC_CASE(MathTan) - INTRINSIC_CASE(MathAsin) + INTRINSIC_CASE(MathSin) INTRINSIC_CASE(MathAcos) + INTRINSIC_CASE(MathAsin) INTRINSIC_CASE(MathAtan) - default: - res = false; // Punt + UNIMPLEMENTED_CASE(MathAtan2 /* (DD)D */) + UNIMPLEMENTED_CASE(MathCbrt /* (D)D */) + UNIMPLEMENTED_CASE(MathCosh /* (D)D */) + UNIMPLEMENTED_CASE(MathExp /* (D)D */) + UNIMPLEMENTED_CASE(MathExpm1 /* (D)D */) + UNIMPLEMENTED_CASE(MathHypot /* (DD)D */) + UNIMPLEMENTED_CASE(MathLog /* (D)D */) + UNIMPLEMENTED_CASE(MathLog10 /* (D)D */) + UNIMPLEMENTED_CASE(MathNextAfter /* (DD)D */) + UNIMPLEMENTED_CASE(MathSinh /* (D)D */) + INTRINSIC_CASE(MathTan) + UNIMPLEMENTED_CASE(MathTanh /* (D)D */) + INTRINSIC_CASE(MathSqrt) + INTRINSIC_CASE(MathCeil) + INTRINSIC_CASE(MathFloor) + UNIMPLEMENTED_CASE(MathRint /* (D)D */) + UNIMPLEMENTED_CASE(MathRoundDouble /* (D)J */) + UNIMPLEMENTED_CASE(MathRoundFloat /* (F)I */) + UNIMPLEMENTED_CASE(SystemArrayCopyChar /* ([CI[CII)V */) + UNIMPLEMENTED_CASE(SystemArrayCopy /* (Ljava/lang/Object;ILjava/lang/Object;II)V */) + UNIMPLEMENTED_CASE(ThreadCurrentThread /* ()Ljava/lang/Thread; */) + UNIMPLEMENTED_CASE(MemoryPeekByte /* (J)B */) + UNIMPLEMENTED_CASE(MemoryPeekIntNative /* (J)I */) + UNIMPLEMENTED_CASE(MemoryPeekLongNative /* (J)J */) + UNIMPLEMENTED_CASE(MemoryPeekShortNative /* (J)S */) + UNIMPLEMENTED_CASE(MemoryPokeByte /* (JB)V */) + UNIMPLEMENTED_CASE(MemoryPokeIntNative /* (JI)V */) + UNIMPLEMENTED_CASE(MemoryPokeLongNative /* (JJ)V */) + UNIMPLEMENTED_CASE(MemoryPokeShortNative /* (JS)V */) + INTRINSIC_CASE(StringCharAt) + INTRINSIC_CASE(StringCompareTo) + INTRINSIC_CASE(StringEquals) + INTRINSIC_CASE(StringGetCharsNoCheck) + INTRINSIC_CASE(StringIndexOf) + INTRINSIC_CASE(StringIndexOfAfter) + UNIMPLEMENTED_CASE(StringStringIndexOf /* (Ljava/lang/String;)I */) + UNIMPLEMENTED_CASE(StringStringIndexOfAfter /* (Ljava/lang/String;I)I */) + INTRINSIC_CASE(StringIsEmpty) + INTRINSIC_CASE(StringLength) + UNIMPLEMENTED_CASE(StringNewStringFromBytes /* ([BIII)Ljava/lang/String; */) + UNIMPLEMENTED_CASE(StringNewStringFromChars /* (II[C)Ljava/lang/String; */) + UNIMPLEMENTED_CASE(StringNewStringFromString /* (Ljava/lang/String;)Ljava/lang/String; */) + UNIMPLEMENTED_CASE(StringBufferAppend /* (Ljava/lang/String;)Ljava/lang/StringBuffer; */) + UNIMPLEMENTED_CASE(StringBufferLength /* ()I */) + UNIMPLEMENTED_CASE(StringBufferToString /* ()Ljava/lang/String; */) + UNIMPLEMENTED_CASE(StringBuilderAppend /* (Ljava/lang/String;)Ljava/lang/StringBuilder; */) + UNIMPLEMENTED_CASE(StringBuilderLength /* ()I */) + UNIMPLEMENTED_CASE(StringBuilderToString /* ()Ljava/lang/String; */) + UNIMPLEMENTED_CASE(UnsafeCASInt /* (Ljava/lang/Object;JII)Z */) + UNIMPLEMENTED_CASE(UnsafeCASLong /* (Ljava/lang/Object;JJJ)Z */) + UNIMPLEMENTED_CASE(UnsafeCASObject /* (Ljava/lang/Object;JLjava/lang/Object;Ljava/lang/Object;)Z */) + UNIMPLEMENTED_CASE(UnsafeGet /* (Ljava/lang/Object;J)I */) + UNIMPLEMENTED_CASE(UnsafeGetVolatile /* (Ljava/lang/Object;J)I */) + UNIMPLEMENTED_CASE(UnsafeGetObject /* (Ljava/lang/Object;J)Ljava/lang/Object; */) + UNIMPLEMENTED_CASE(UnsafeGetObjectVolatile /* (Ljava/lang/Object;J)Ljava/lang/Object; */) + UNIMPLEMENTED_CASE(UnsafeGetLong /* (Ljava/lang/Object;J)J */) + UNIMPLEMENTED_CASE(UnsafeGetLongVolatile /* (Ljava/lang/Object;J)J */) + UNIMPLEMENTED_CASE(UnsafePut /* (Ljava/lang/Object;JI)V */) + UNIMPLEMENTED_CASE(UnsafePutOrdered /* (Ljava/lang/Object;JI)V */) + UNIMPLEMENTED_CASE(UnsafePutVolatile /* (Ljava/lang/Object;JI)V */) + UNIMPLEMENTED_CASE(UnsafePutObject /* (Ljava/lang/Object;JLjava/lang/Object;)V */) + UNIMPLEMENTED_CASE(UnsafePutObjectOrdered /* (Ljava/lang/Object;JLjava/lang/Object;)V */) + UNIMPLEMENTED_CASE(UnsafePutObjectVolatile /* (Ljava/lang/Object;JLjava/lang/Object;)V */) + UNIMPLEMENTED_CASE(UnsafePutLong /* (Ljava/lang/Object;JJ)V */) + UNIMPLEMENTED_CASE(UnsafePutLongOrdered /* (Ljava/lang/Object;JJ)V */) + UNIMPLEMENTED_CASE(UnsafePutLongVolatile /* (Ljava/lang/Object;JJ)V */) + UNIMPLEMENTED_CASE(UnsafeGetAndAddInt /* (Ljava/lang/Object;JI)I */) + UNIMPLEMENTED_CASE(UnsafeGetAndAddLong /* (Ljava/lang/Object;JJ)J */) + UNIMPLEMENTED_CASE(UnsafeGetAndSetInt /* (Ljava/lang/Object;JI)I */) + UNIMPLEMENTED_CASE(UnsafeGetAndSetLong /* (Ljava/lang/Object;JJ)J */) + UNIMPLEMENTED_CASE(UnsafeGetAndSetObject /* (Ljava/lang/Object;JLjava/lang/Object;)Ljava/lang/Object; */) + UNIMPLEMENTED_CASE(UnsafeLoadFence /* ()V */) + UNIMPLEMENTED_CASE(UnsafeStoreFence /* ()V */) + UNIMPLEMENTED_CASE(UnsafeFullFence /* ()V */) + UNIMPLEMENTED_CASE(ReferenceGetReferent /* ()Ljava/lang/Object; */) + UNIMPLEMENTED_CASE(IntegerValueOf /* (I)Ljava/lang/Integer; */) + case Intrinsics::kNone: + res = false; break; + // Note: no default case to ensure we catch any newly added intrinsics. } return res; } diff --git a/runtime/interpreter/unstarted_runtime.cc b/runtime/interpreter/unstarted_runtime.cc index eb0a9d161a..70be30c22c 100644 --- a/runtime/interpreter/unstarted_runtime.cc +++ b/runtime/interpreter/unstarted_runtime.cc @@ -1131,53 +1131,6 @@ void UnstartedRuntime::UnstartedDoubleDoubleToRawLongBits( result->SetJ(bit_cast<int64_t, double>(in)); } -static ObjPtr<mirror::Object> GetDexFromDexCache(Thread* self, mirror::DexCache* dex_cache) - REQUIRES_SHARED(Locks::mutator_lock_) { - const DexFile* dex_file = dex_cache->GetDexFile(); - if (dex_file == nullptr) { - return nullptr; - } - - // Create the direct byte buffer. - JNIEnv* env = self->GetJniEnv(); - DCHECK(env != nullptr); - void* address = const_cast<void*>(reinterpret_cast<const void*>(dex_file->Begin())); - ScopedLocalRef<jobject> byte_buffer(env, env->NewDirectByteBuffer(address, dex_file->Size())); - if (byte_buffer.get() == nullptr) { - DCHECK(self->IsExceptionPending()); - return nullptr; - } - - jvalue args[1]; - args[0].l = byte_buffer.get(); - - ScopedLocalRef<jobject> dex(env, env->CallStaticObjectMethodA( - WellKnownClasses::com_android_dex_Dex, - WellKnownClasses::com_android_dex_Dex_create, - args)); - - return self->DecodeJObject(dex.get()); -} - -void UnstartedRuntime::UnstartedDexCacheGetDexNative( - Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { - // We will create the Dex object, but the image writer will release it before creating the - // art file. - mirror::Object* src = shadow_frame->GetVRegReference(arg_offset); - bool have_dex = false; - if (src != nullptr) { - ObjPtr<mirror::Object> dex = GetDexFromDexCache(self, src->AsDexCache()); - if (dex != nullptr) { - have_dex = true; - result->SetL(dex); - } - } - if (!have_dex) { - self->ClearException(); - Runtime::Current()->AbortTransactionAndThrowAbortError(self, "Could not create Dex object"); - } -} - static void UnstartedMemoryPeek( Primitive::Type type, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { int64_t address = shadow_frame->GetVRegLong(arg_offset); @@ -1336,12 +1289,14 @@ void UnstartedRuntime::UnstartedStringDoReplace( Thread* self, ShadowFrame* shadow_frame, JValue* result, size_t arg_offset) { jchar old_c = shadow_frame->GetVReg(arg_offset + 1); jchar new_c = shadow_frame->GetVReg(arg_offset + 2); - ObjPtr<mirror::String> string = shadow_frame->GetVRegReference(arg_offset)->AsString(); + StackHandleScope<1> hs(self); + Handle<mirror::String> string = + hs.NewHandle(shadow_frame->GetVRegReference(arg_offset)->AsString()); if (string == nullptr) { AbortTransactionOrFail(self, "String.replaceWithMatch with null object"); return; } - result->SetL(string->DoReplace(self, old_c, new_c)); + result->SetL(mirror::String::DoReplace(self, string, old_c, new_c)); } // This allows creating the new style of String objects during compilation. diff --git a/runtime/interpreter/unstarted_runtime_list.h b/runtime/interpreter/unstarted_runtime_list.h index 2560a92f8f..47910357d5 100644 --- a/runtime/interpreter/unstarted_runtime_list.h +++ b/runtime/interpreter/unstarted_runtime_list.h @@ -52,7 +52,6 @@ V(MathPow, "double java.lang.Math.pow(double, double)") \ V(ObjectHashCode, "int java.lang.Object.hashCode()") \ V(DoubleDoubleToRawLongBits, "long java.lang.Double.doubleToRawLongBits(double)") \ - V(DexCacheGetDexNative, "com.android.dex.Dex java.lang.DexCache.getDexNative()") \ V(MemoryPeekByte, "byte libcore.io.Memory.peekByte(long)") \ V(MemoryPeekShort, "short libcore.io.Memory.peekShortNative(long)") \ V(MemoryPeekInt, "int libcore.io.Memory.peekIntNative(long)") \ diff --git a/runtime/jdwp/jdwp_adb.cc b/runtime/jdwp/jdwp_adb.cc index b13d565ec2..0aa04c10ca 100644 --- a/runtime/jdwp/jdwp_adb.cc +++ b/runtime/jdwp/jdwp_adb.cc @@ -227,7 +227,7 @@ bool JdwpAdbState::Accept() { const int sleep_max_ms = 2*1000; char buff[5]; - int sock = socket(PF_UNIX, SOCK_STREAM, 0); + int sock = socket(AF_UNIX, SOCK_SEQPACKET, 0); if (sock < 0) { PLOG(ERROR) << "Could not create ADB control socket"; return false; @@ -264,7 +264,7 @@ bool JdwpAdbState::Accept() { * up after a few minutes in case somebody ships an app with * the debuggable flag set. */ - int ret = connect(ControlSock(), &control_addr_.controlAddrPlain, control_addr_len_); + int ret = connect(ControlSock(), &control_addr_.controlAddrPlain, control_addr_len_); if (!ret) { int control_sock = ControlSock(); #ifdef ART_TARGET_ANDROID @@ -278,7 +278,7 @@ bool JdwpAdbState::Accept() { /* now try to send our pid to the ADB daemon */ ret = TEMP_FAILURE_RETRY(send(control_sock, buff, 4, 0)); - if (ret >= 0) { + if (ret == 4) { VLOG(jdwp) << StringPrintf("PID sent as '%.*s' to ADB", 4, buff); break; } diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index e7b23dcfa0..fc41f94f97 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -211,6 +211,7 @@ class ScopedCodeCacheWrite : ScopedTrace { uint8_t* JitCodeCache::CommitCode(Thread* self, ArtMethod* method, uint8_t* stack_map, + uint8_t* method_info, uint8_t* roots_data, size_t frame_size_in_bytes, size_t core_spill_mask, @@ -225,6 +226,7 @@ uint8_t* JitCodeCache::CommitCode(Thread* self, uint8_t* result = CommitCodeInternal(self, method, stack_map, + method_info, roots_data, frame_size_in_bytes, core_spill_mask, @@ -242,6 +244,7 @@ uint8_t* JitCodeCache::CommitCode(Thread* self, result = CommitCodeInternal(self, method, stack_map, + method_info, roots_data, frame_size_in_bytes, core_spill_mask, @@ -510,6 +513,7 @@ void JitCodeCache::CopyInlineCacheInto(const InlineCache& ic, uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, ArtMethod* method, uint8_t* stack_map, + uint8_t* method_info, uint8_t* roots_data, size_t frame_size_in_bytes, size_t core_spill_mask, @@ -547,6 +551,7 @@ uint8_t* JitCodeCache::CommitCodeInternal(Thread* self, method_header = OatQuickMethodHeader::FromCodePointer(code_ptr); new (method_header) OatQuickMethodHeader( code_ptr - stack_map, + code_ptr - method_info, frame_size_in_bytes, core_spill_mask, fp_spill_mask, @@ -739,12 +744,14 @@ void JitCodeCache::ClearData(Thread* self, size_t JitCodeCache::ReserveData(Thread* self, size_t stack_map_size, + size_t method_info_size, size_t number_of_roots, ArtMethod* method, uint8_t** stack_map_data, + uint8_t** method_info_data, uint8_t** roots_data) { size_t table_size = ComputeRootTableSize(number_of_roots); - size_t size = RoundUp(stack_map_size + table_size, sizeof(void*)); + size_t size = RoundUp(stack_map_size + method_info_size + table_size, sizeof(void*)); uint8_t* result = nullptr; { @@ -774,11 +781,13 @@ size_t JitCodeCache::ReserveData(Thread* self, if (result != nullptr) { *roots_data = result; *stack_map_data = result + table_size; + *method_info_data = *stack_map_data + stack_map_size; FillRootTableLength(*roots_data, number_of_roots); return size; } else { *roots_data = nullptr; *stack_map_data = nullptr; + *method_info_data = nullptr; return 0; } } diff --git a/runtime/jit/jit_code_cache.h b/runtime/jit/jit_code_cache.h index c970979eaa..db214e7983 100644 --- a/runtime/jit/jit_code_cache.h +++ b/runtime/jit/jit_code_cache.h @@ -105,6 +105,7 @@ class JitCodeCache { uint8_t* CommitCode(Thread* self, ArtMethod* method, uint8_t* stack_map, + uint8_t* method_info, uint8_t* roots_data, size_t frame_size_in_bytes, size_t core_spill_mask, @@ -129,10 +130,12 @@ class JitCodeCache { // for storing `number_of_roots` roots. Returns null if there is no more room. // Return the number of bytes allocated. size_t ReserveData(Thread* self, - size_t size, + size_t stack_map_size, + size_t method_info_size, size_t number_of_roots, ArtMethod* method, uint8_t** stack_map_data, + uint8_t** method_info_data, uint8_t** roots_data) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!lock_); @@ -249,6 +252,7 @@ class JitCodeCache { uint8_t* CommitCodeInternal(Thread* self, ArtMethod* method, uint8_t* stack_map, + uint8_t* method_info, uint8_t* roots_data, size_t frame_size_in_bytes, size_t core_spill_mask, diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc index 00487c6728..e2bd1cbc33 100644 --- a/runtime/jit/profile_saver.cc +++ b/runtime/jit/profile_saver.cc @@ -42,8 +42,6 @@ ProfileSaver::ProfileSaver(const ProfileSaverOptions& options, const std::vector<std::string>& code_paths) : jit_code_cache_(jit_code_cache), shutting_down_(false), - last_save_number_of_methods_(0), - last_save_number_of_classes_(0), last_time_ns_saver_woke_up_(0), jit_activity_notifications_(0), wait_lock_("ProfileSaver wait lock"), @@ -123,15 +121,16 @@ void ProfileSaver::Run() { break; } - uint16_t new_methods = 0; + uint16_t number_of_new_methods = 0; uint64_t start_work = NanoTime(); - bool profile_saved_to_disk = ProcessProfilingInfo(&new_methods); + bool profile_saved_to_disk = ProcessProfilingInfo(/*force_save*/false, &number_of_new_methods); // Update the notification counter based on result. Note that there might be contention on this // but we don't care about to be 100% precise. if (!profile_saved_to_disk) { // If we didn't save to disk it may be because we didn't have enough new methods. - // Set the jit activity notifications to new_methods so we can wake up earlier if needed. - jit_activity_notifications_ = new_methods; + // Set the jit activity notifications to number_of_new_methods so we can wake up earlier + // if needed. + jit_activity_notifications_ = number_of_new_methods; } total_ns_of_work_ += NanoTime() - start_work; } @@ -171,10 +170,10 @@ void ProfileSaver::NotifyJitActivityInternal() { } } -ProfileCompilationInfo* ProfileSaver::GetCachedProfiledInfo(const std::string& filename) { +ProfileSaver::ProfileInfoCache* ProfileSaver::GetCachedProfiledInfo(const std::string& filename) { auto info_it = profile_cache_.find(filename); if (info_it == profile_cache_.end()) { - info_it = profile_cache_.Put(filename, ProfileCompilationInfo()); + info_it = profile_cache_.Put(filename, ProfileInfoCache()); } return &info_it->second; } @@ -248,8 +247,9 @@ void ProfileSaver::FetchAndCacheResolvedClassesAndMethods() { << " (" << classes.GetDexLocation() << ")"; } } - ProfileCompilationInfo* info = GetCachedProfiledInfo(filename); - info->AddMethodsAndClasses(profile_methods_for_location, resolved_classes_for_location); + ProfileInfoCache* cached_info = GetCachedProfiledInfo(filename); + cached_info->profile.AddMethodsAndClasses(profile_methods_for_location, + resolved_classes_for_location); total_number_of_profile_entries_cached += resolved_classes_for_location.size(); } max_number_of_profile_entries_cached_ = std::max( @@ -257,7 +257,7 @@ void ProfileSaver::FetchAndCacheResolvedClassesAndMethods() { total_number_of_profile_entries_cached); } -bool ProfileSaver::ProcessProfilingInfo(uint16_t* new_methods) { +bool ProfileSaver::ProcessProfilingInfo(bool force_save, /*out*/uint16_t* number_of_new_methods) { ScopedTrace trace(__PRETTY_FUNCTION__); SafeMap<std::string, std::set<std::string>> tracked_locations; { @@ -268,10 +268,16 @@ bool ProfileSaver::ProcessProfilingInfo(uint16_t* new_methods) { bool profile_file_saved = false; uint64_t total_number_of_profile_entries_cached = 0; - *new_methods = 0; + if (number_of_new_methods != nullptr) { + *number_of_new_methods = 0; + } for (const auto& it : tracked_locations) { - if (ShuttingDown(Thread::Current())) { + if (!force_save && ShuttingDown(Thread::Current())) { + // The ProfileSaver is in shutdown mode, meaning a stop request was made and + // we need to exit cleanly (by waiting for the saver thread to finish). Unless + // we have a request for a forced save, do not do any processing so that we + // speed up the exit. return true; } const std::string& filename = it.first; @@ -283,16 +289,18 @@ bool ProfileSaver::ProcessProfilingInfo(uint16_t* new_methods) { total_number_of_code_cache_queries_++; } - ProfileCompilationInfo* cached_info = GetCachedProfiledInfo(filename); - cached_info->AddMethodsAndClasses(profile_methods, std::set<DexCacheResolvedClasses>()); + ProfileInfoCache* cached_info = GetCachedProfiledInfo(filename); + ProfileCompilationInfo* cached_profile = &cached_info->profile; + cached_profile->AddMethodsAndClasses(profile_methods, std::set<DexCacheResolvedClasses>()); int64_t delta_number_of_methods = - cached_info->GetNumberOfMethods() - - static_cast<int64_t>(last_save_number_of_methods_); + cached_profile->GetNumberOfMethods() - + static_cast<int64_t>(cached_info->last_save_number_of_methods); int64_t delta_number_of_classes = - cached_info->GetNumberOfResolvedClasses() - - static_cast<int64_t>(last_save_number_of_classes_); + cached_profile->GetNumberOfResolvedClasses() - + static_cast<int64_t>(cached_info->last_save_number_of_classes); - if (delta_number_of_methods < options_.GetMinMethodsToSave() && + if (!force_save && + delta_number_of_methods < options_.GetMinMethodsToSave() && delta_number_of_classes < options_.GetMinClassesToSave()) { VLOG(profiler) << "Not enough information to save to: " << filename << " Number of methods: " << delta_number_of_methods @@ -300,16 +308,19 @@ bool ProfileSaver::ProcessProfilingInfo(uint16_t* new_methods) { total_number_of_skipped_writes_++; continue; } - *new_methods = std::max(static_cast<uint16_t>(delta_number_of_methods), *new_methods); + if (number_of_new_methods != nullptr) { + *number_of_new_methods = std::max(static_cast<uint16_t>(delta_number_of_methods), + *number_of_new_methods); + } uint64_t bytes_written; // Force the save. In case the profile data is corrupted or the the profile // has the wrong version this will "fix" the file to the correct format. - if (cached_info->MergeAndSave(filename, &bytes_written, /*force*/ true)) { - last_save_number_of_methods_ = cached_info->GetNumberOfMethods(); - last_save_number_of_classes_ = cached_info->GetNumberOfResolvedClasses(); + if (cached_profile->MergeAndSave(filename, &bytes_written, /*force*/ true)) { + cached_info->last_save_number_of_methods = cached_profile->GetNumberOfMethods(); + cached_info->last_save_number_of_classes = cached_profile->GetNumberOfResolvedClasses(); // Clear resolved classes. No need to store them around as // they don't change after the first write. - cached_info->ClearResolvedClasses(); + cached_profile->ClearResolvedClasses(); if (bytes_written > 0) { total_number_of_writes_++; total_bytes_written_ += bytes_written; @@ -326,8 +337,8 @@ bool ProfileSaver::ProcessProfilingInfo(uint16_t* new_methods) { total_number_of_failed_writes_++; } total_number_of_profile_entries_cached += - cached_info->GetNumberOfMethods() + - cached_info->GetNumberOfResolvedClasses(); + cached_profile->GetNumberOfMethods() + + cached_profile->GetNumberOfResolvedClasses(); } max_number_of_profile_entries_cached_ = std::max( max_number_of_profile_entries_cached_, @@ -454,6 +465,9 @@ void ProfileSaver::Stop(bool dump_info) { // Wait for the saver thread to stop. CHECK_PTHREAD_CALL(pthread_join, (profiler_pthread, nullptr), "profile saver thread shutdown"); + // Force save everything before destroying the instance. + instance_->ProcessProfilingInfo(/*force_save*/true, /*number_of_new_methods*/nullptr); + { MutexLock profiler_mutex(Thread::Current(), *Locks::profiler_lock_); instance_ = nullptr; @@ -516,8 +530,7 @@ void ProfileSaver::ForceProcessProfiles() { // but we only use this in testing when we now this won't happen. // Refactor the way we handle the instance so that we don't end up in this situation. if (saver != nullptr) { - uint16_t new_methods; - saver->ProcessProfilingInfo(&new_methods); + saver->ProcessProfilingInfo(/*force_save*/true, /*number_of_new_methods*/nullptr); } } @@ -526,10 +539,8 @@ bool ProfileSaver::HasSeenMethod(const std::string& profile, uint16_t method_idx) { MutexLock mu(Thread::Current(), *Locks::profiler_lock_); if (instance_ != nullptr) { - ProfileCompilationInfo* info = instance_->GetCachedProfiledInfo(profile); - if (info != nullptr) { - return info->ContainsMethod(MethodReference(dex_file, method_idx)); - } + const ProfileCompilationInfo& info = instance_->GetCachedProfiledInfo(profile)->profile; + return info.ContainsMethod(MethodReference(dex_file, method_idx)); } return false; } diff --git a/runtime/jit/profile_saver.h b/runtime/jit/profile_saver.h index ec8342ad9e..4dd8e60ee4 100644 --- a/runtime/jit/profile_saver.h +++ b/runtime/jit/profile_saver.h @@ -59,6 +59,14 @@ class ProfileSaver { uint16_t method_idx); private: + // A cache structure which keeps track of the data saved to disk. + // It is used to reduce the number of disk read/writes. + struct ProfileInfoCache { + ProfileCompilationInfo profile; + uint32_t last_save_number_of_methods = 0; + uint32_t last_save_number_of_classes = 0; + }; + ProfileSaver(const ProfileSaverOptions& options, const std::string& output_filename, jit::JitCodeCache* jit_code_cache, @@ -71,9 +79,14 @@ class ProfileSaver { // The run loop for the saver. void Run() REQUIRES(!Locks::profiler_lock_, !wait_lock_); + // Processes the existing profiling info from the jit code cache and returns // true if it needed to be saved to disk. - bool ProcessProfilingInfo(uint16_t* new_methods) + // If number_of_new_methods is not null, after the call it will contain the number of new methods + // written to disk. + // If force_save is true, the saver will ignore any constraints which limit IO (e.g. will write + // the profile to disk even if it's just one new method). + bool ProcessProfilingInfo(bool force_save, /*out*/uint16_t* number_of_new_methods) REQUIRES(!Locks::profiler_lock_) REQUIRES(!Locks::mutator_lock_); @@ -90,7 +103,7 @@ class ProfileSaver { // Retrieves the cached profile compilation info for the given profile file. // If no entry exists, a new empty one will be created, added to the cache and // then returned. - ProfileCompilationInfo* GetCachedProfiledInfo(const std::string& filename); + ProfileInfoCache* GetCachedProfiledInfo(const std::string& filename); // Fetches the current resolved classes and methods from the ClassLinker and stores them in the // profile_cache_ for later save. void FetchAndCacheResolvedClassesAndMethods(); @@ -110,8 +123,6 @@ class ProfileSaver { GUARDED_BY(Locks::profiler_lock_); bool shutting_down_ GUARDED_BY(Locks::profiler_lock_); - uint32_t last_save_number_of_methods_; - uint32_t last_save_number_of_classes_; uint64_t last_time_ns_saver_woke_up_ GUARDED_BY(wait_lock_); uint32_t jit_activity_notifications_; @@ -119,7 +130,7 @@ class ProfileSaver { // profile information. The size of this cache is usually very small and tops // to just a few hundreds entries in the ProfileCompilationInfo objects. // It helps avoiding unnecessary writes to disk. - SafeMap<std::string, ProfileCompilationInfo> profile_cache_; + SafeMap<std::string, ProfileInfoCache> profile_cache_; // Save period condition support. Mutex wait_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; diff --git a/runtime/jit/profile_saver_options.h b/runtime/jit/profile_saver_options.h index a6385d7469..c8d256fec0 100644 --- a/runtime/jit/profile_saver_options.h +++ b/runtime/jit/profile_saver_options.h @@ -21,7 +21,7 @@ namespace art { struct ProfileSaverOptions { public: static constexpr uint32_t kMinSavePeriodMs = 20 * 1000; // 20 seconds - static constexpr uint32_t kSaveResolvedClassesDelayMs = 2 * 1000; // 2 seconds + static constexpr uint32_t kSaveResolvedClassesDelayMs = 5 * 1000; // 5 seconds // Minimum number of JIT samples during launch to include a method into the profile. static constexpr uint32_t kStartupMethodSamples = 1; static constexpr uint32_t kMinMethodsToSave = 10; @@ -37,7 +37,8 @@ struct ProfileSaverOptions { min_methods_to_save_(kMinMethodsToSave), min_classes_to_save_(kMinClassesToSave), min_notification_before_wake_(kMinNotificationBeforeWake), - max_notification_before_wake_(kMaxNotificationBeforeWake) {} + max_notification_before_wake_(kMaxNotificationBeforeWake), + profile_path_("") {} ProfileSaverOptions( bool enabled, @@ -47,7 +48,8 @@ struct ProfileSaverOptions { uint32_t min_methods_to_save, uint32_t min_classes_to_save, uint32_t min_notification_before_wake, - uint32_t max_notification_before_wake): + uint32_t max_notification_before_wake, + const std::string& profile_path): enabled_(enabled), min_save_period_ms_(min_save_period_ms), save_resolved_classes_delay_ms_(save_resolved_classes_delay_ms), @@ -55,7 +57,8 @@ struct ProfileSaverOptions { min_methods_to_save_(min_methods_to_save), min_classes_to_save_(min_classes_to_save), min_notification_before_wake_(min_notification_before_wake), - max_notification_before_wake_(max_notification_before_wake) {} + max_notification_before_wake_(max_notification_before_wake), + profile_path_(profile_path) {} bool IsEnabled() const { return enabled_; @@ -85,6 +88,9 @@ struct ProfileSaverOptions { uint32_t GetMaxNotificationBeforeWake() const { return max_notification_before_wake_; } + std::string GetProfilePath() const { + return profile_path_; + } friend std::ostream & operator<<(std::ostream &os, const ProfileSaverOptions& pso) { os << "enabled_" << pso.enabled_ @@ -106,6 +112,7 @@ struct ProfileSaverOptions { uint32_t min_classes_to_save_; uint32_t min_notification_before_wake_; uint32_t max_notification_before_wake_; + std::string profile_path_; }; } // namespace art diff --git a/runtime/jvalue.h b/runtime/jvalue.h index 398bfbc27a..f61a07c0c0 100644 --- a/runtime/jvalue.h +++ b/runtime/jvalue.h @@ -39,7 +39,9 @@ union PACKED(alignof(mirror::Object*)) JValue { } uint16_t GetC() const { return c; } - void SetC(uint16_t new_c) { c = new_c; } + void SetC(uint16_t new_c) { + j = static_cast<int64_t>(new_c); // Zero-extend to 64 bits. + } double GetD() const { return d; } void SetD(double new_d) { d = new_d; } @@ -66,7 +68,9 @@ union PACKED(alignof(mirror::Object*)) JValue { } uint8_t GetZ() const { return z; } - void SetZ(uint8_t new_z) { z = new_z; } + void SetZ(uint8_t new_z) { + j = static_cast<int64_t>(new_z); // Zero-extend to 64 bits. + } mirror::Object** GetGCRoot() { return &l; } diff --git a/runtime/method_info.h b/runtime/method_info.h new file mode 100644 index 0000000000..5a72125be4 --- /dev/null +++ b/runtime/method_info.h @@ -0,0 +1,77 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_METHOD_INFO_H_ +#define ART_RUNTIME_METHOD_INFO_H_ + +#include "base/logging.h" +#include "leb128.h" +#include "memory_region.h" + +namespace art { + +// Method info is for not dedupe friendly data of a method. Currently it only holds methods indices. +// Putting this data in MethodInfo instead of code infos saves ~5% oat size. +class MethodInfo { + using MethodIndexType = uint16_t; + + public: + // Reading mode + explicit MethodInfo(const uint8_t* ptr) { + if (ptr != nullptr) { + num_method_indices_ = DecodeUnsignedLeb128(&ptr); + region_ = MemoryRegion(const_cast<uint8_t*>(ptr), + num_method_indices_ * sizeof(MethodIndexType)); + } + } + + // Writing mode + MethodInfo(uint8_t* ptr, size_t num_method_indices) : num_method_indices_(num_method_indices) { + DCHECK(ptr != nullptr); + ptr = EncodeUnsignedLeb128(ptr, num_method_indices_); + region_ = MemoryRegion(ptr, num_method_indices_ * sizeof(MethodIndexType)); + } + + static size_t ComputeSize(size_t num_method_indices) { + uint8_t temp[8]; + uint8_t* ptr = temp; + ptr = EncodeUnsignedLeb128(ptr, num_method_indices); + return (ptr - temp) + num_method_indices * sizeof(MethodIndexType); + } + + ALWAYS_INLINE MethodIndexType GetMethodIndex(size_t index) const { + // Use bit functions to avoid pesky alignment requirements. + return region_.LoadBits(index * BitSizeOf<MethodIndexType>(), BitSizeOf<MethodIndexType>()); + } + + void SetMethodIndex(size_t index, MethodIndexType method_index) { + region_.StoreBits(index * BitSizeOf<MethodIndexType>(), + method_index, + BitSizeOf<MethodIndexType>()); + } + + size_t NumMethodIndices() const { + return num_method_indices_; + } + + private: + size_t num_method_indices_ = 0u; + MemoryRegion region_; +}; + +} // namespace art + +#endif // ART_RUNTIME_METHOD_INFO_H_ diff --git a/runtime/mirror/array-inl.h b/runtime/mirror/array-inl.h index f56226bd98..04c80c5cc0 100644 --- a/runtime/mirror/array-inl.h +++ b/runtime/mirror/array-inl.h @@ -402,8 +402,8 @@ inline T PointerArray::GetElementPtrSize(uint32_t idx, PointerSize ptr_size) { return (T)static_cast<uintptr_t>( AsLongArray<kVerifyFlags, kReadBarrierOption>()->GetWithoutChecks(idx)); } - return (T)static_cast<uintptr_t>( - AsIntArray<kVerifyFlags, kReadBarrierOption>()->GetWithoutChecks(idx)); + return (T)static_cast<uintptr_t>(static_cast<uint32_t>( + AsIntArray<kVerifyFlags, kReadBarrierOption>()->GetWithoutChecks(idx))); } template<bool kTransactionActive, bool kUnchecked> diff --git a/runtime/mirror/class-inl.h b/runtime/mirror/class-inl.h index 2cff47e8b4..003b03b2f9 100644 --- a/runtime/mirror/class-inl.h +++ b/runtime/mirror/class-inl.h @@ -29,6 +29,7 @@ #include "dex_file.h" #include "gc/heap-inl.h" #include "iftable.h" +#include "class_ext-inl.h" #include "object_array-inl.h" #include "read_barrier-inl.h" #include "reference-inl.h" @@ -83,6 +84,12 @@ inline ClassLoader* Class::GetClassLoader() { } template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption> +inline ClassExt* Class::GetExtData() { + return GetFieldObject<ClassExt, kVerifyFlags, kReadBarrierOption>( + OFFSET_OF_OBJECT_MEMBER(Class, ext_data_)); +} + +template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption> inline DexCache* Class::GetDexCache() { return GetFieldObject<DexCache, kVerifyFlags, kReadBarrierOption>( OFFSET_OF_OBJECT_MEMBER(Class, dex_cache_)); @@ -841,7 +848,7 @@ inline void Class::AssertInitializedOrInitializingInThread(Thread* self) { } } -inline ObjectArray<Class>* Class::GetInterfaces() { +inline ObjectArray<Class>* Class::GetProxyInterfaces() { CHECK(IsProxyClass()); // First static field. auto* field = GetStaticField(0); @@ -850,7 +857,7 @@ inline ObjectArray<Class>* Class::GetInterfaces() { return GetFieldObject<ObjectArray<Class>>(field_offset); } -inline ObjectArray<ObjectArray<Class>>* Class::GetThrows() { +inline ObjectArray<ObjectArray<Class>>* Class::GetProxyThrows() { CHECK(IsProxyClass()); // Second static field. auto* field = GetStaticField(1); @@ -920,7 +927,7 @@ inline uint32_t Class::NumDirectInterfaces() { } else if (IsArrayClass()) { return 2; } else if (IsProxyClass()) { - ObjectArray<Class>* interfaces = GetInterfaces(); + ObjectArray<Class>* interfaces = GetProxyInterfaces(); return interfaces != nullptr ? interfaces->GetLength() : 0; } else { const DexFile::TypeList* interfaces = GetInterfaceTypeList(); @@ -951,6 +958,10 @@ void Class::VisitNativeRoots(Visitor& visitor, PointerSize pointer_size) { for (ArtMethod& method : GetMethods(pointer_size)) { method.VisitRoots<kReadBarrierOption>(visitor, pointer_size); } + ObjPtr<ClassExt> ext(GetExtData<kDefaultVerifyFlags, kReadBarrierOption>()); + if (!ext.IsNull()) { + ext->VisitNativeRoots<kReadBarrierOption, Visitor>(visitor, pointer_size); + } } inline IterationRange<StrideIterator<ArtMethod>> Class::GetDirectMethods(PointerSize pointer_size) { diff --git a/runtime/mirror/class.cc b/runtime/mirror/class.cc index eb2ec9b3c8..26af488bd2 100644 --- a/runtime/mirror/class.cc +++ b/runtime/mirror/class.cc @@ -64,10 +64,6 @@ void Class::VisitRoots(RootVisitor* visitor) { java_lang_Class_.VisitRootIfNonNull(visitor, RootInfo(kRootStickyClass)); } -ClassExt* Class::GetExtData() { - return GetFieldObject<ClassExt>(OFFSET_OF_OBJECT_MEMBER(Class, ext_data_)); -} - ClassExt* Class::EnsureExtDataPresent(Thread* self) { ObjPtr<ClassExt> existing(GetExtData()); if (!existing.IsNull()) { @@ -946,7 +942,7 @@ ObjPtr<Class> Class::GetDirectInterface(Thread* self, ObjPtr<Class> klass, uint3 DCHECK(interface != nullptr); return interface; } else if (klass->IsProxyClass()) { - ObjPtr<ObjectArray<Class>> interfaces = klass->GetInterfaces(); + ObjPtr<ObjectArray<Class>> interfaces = klass->GetProxyInterfaces(); DCHECK(interfaces != nullptr); return interfaces->Get(idx); } else { diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h index c52b66affe..27aecd5150 100644 --- a/runtime/mirror/class.h +++ b/runtime/mirror/class.h @@ -595,7 +595,7 @@ class MANAGED Class FINAL : public Object { // The size of java.lang.Class.class. static uint32_t ClassClassSize(PointerSize pointer_size) { // The number of vtable entries in java.lang.Class. - uint32_t vtable_entries = Object::kVTableLength + 70; + uint32_t vtable_entries = Object::kVTableLength + 67; return ComputeClassSize(true, vtable_entries, 0, 0, 4, 1, 0, pointer_size); } @@ -1162,6 +1162,8 @@ class MANAGED Class FINAL : public Object { void SetClinitThreadId(pid_t new_clinit_thread_id) REQUIRES_SHARED(Locks::mutator_lock_); + template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, + ReadBarrierOption kReadBarrierOption = kWithReadBarrier> ClassExt* GetExtData() REQUIRES_SHARED(Locks::mutator_lock_); // Returns the ExtData for this class, allocating one if necessary. This should be the only way @@ -1262,10 +1264,10 @@ class MANAGED Class FINAL : public Object { REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); // For proxy class only. - ObjectArray<Class>* GetInterfaces() REQUIRES_SHARED(Locks::mutator_lock_); + ObjectArray<Class>* GetProxyInterfaces() REQUIRES_SHARED(Locks::mutator_lock_); // For proxy class only. - ObjectArray<ObjectArray<Class>>* GetThrows() REQUIRES_SHARED(Locks::mutator_lock_); + ObjectArray<ObjectArray<Class>>* GetProxyThrows() REQUIRES_SHARED(Locks::mutator_lock_); // For reference class only. MemberOffset GetDisableIntrinsicFlagOffset() REQUIRES_SHARED(Locks::mutator_lock_); diff --git a/runtime/mirror/class_ext-inl.h b/runtime/mirror/class_ext-inl.h new file mode 100644 index 0000000000..feaac8580a --- /dev/null +++ b/runtime/mirror/class_ext-inl.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_MIRROR_CLASS_EXT_INL_H_ +#define ART_RUNTIME_MIRROR_CLASS_EXT_INL_H_ + +#include "class_ext.h" + +#include "art_method-inl.h" + +namespace art { +namespace mirror { + +template<ReadBarrierOption kReadBarrierOption, class Visitor> +void ClassExt::VisitNativeRoots(Visitor& visitor, PointerSize pointer_size) { + ObjPtr<PointerArray> arr(GetObsoleteMethods<kDefaultVerifyFlags, kReadBarrierOption>()); + if (arr.IsNull()) { + return; + } + int32_t len = arr->GetLength(); + for (int32_t i = 0; i < len; i++) { + ArtMethod* method = arr->GetElementPtrSize<ArtMethod*, + kDefaultVerifyFlags, + kReadBarrierOption>(i, pointer_size); + if (method != nullptr) { + method->VisitRoots<kReadBarrierOption>(visitor, pointer_size); + } + } +} + +} // namespace mirror +} // namespace art + +#endif // ART_RUNTIME_MIRROR_CLASS_EXT_INL_H_ diff --git a/runtime/mirror/class_ext.cc b/runtime/mirror/class_ext.cc index 7270079a8f..5dc3aca094 100644 --- a/runtime/mirror/class_ext.cc +++ b/runtime/mirror/class_ext.cc @@ -14,7 +14,7 @@ * limitations under the License. */ -#include "class_ext.h" +#include "class_ext-inl.h" #include "art_method-inl.h" #include "base/casts.h" @@ -24,7 +24,6 @@ #include "gc/accounting/card_table-inl.h" #include "object-inl.h" #include "object_array.h" -#include "object_array-inl.h" #include "stack_trace_element.h" #include "utils.h" #include "well_known_classes.h" @@ -34,6 +33,11 @@ namespace mirror { GcRoot<Class> ClassExt::dalvik_system_ClassExt_; +uint32_t ClassExt::ClassSize(PointerSize pointer_size) { + uint32_t vtable_entries = Object::kVTableLength; + return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size); +} + void ClassExt::SetObsoleteArrays(ObjPtr<PointerArray> methods, ObjPtr<ObjectArray<DexCache>> dex_caches) { DCHECK_EQ(GetLockOwnerThreadId(), Thread::Current()->GetThreadId()) diff --git a/runtime/mirror/class_ext.h b/runtime/mirror/class_ext.h index ad8a61b676..fac955a45e 100644 --- a/runtime/mirror/class_ext.h +++ b/runtime/mirror/class_ext.h @@ -17,9 +17,8 @@ #ifndef ART_RUNTIME_MIRROR_CLASS_EXT_H_ #define ART_RUNTIME_MIRROR_CLASS_EXT_H_ -#include "class-inl.h" - #include "array.h" +#include "class.h" #include "dex_cache.h" #include "gc_root.h" #include "object.h" @@ -36,10 +35,7 @@ namespace mirror { // C++ mirror of dalvik.system.ClassExt class MANAGED ClassExt : public Object { public: - static uint32_t ClassSize(PointerSize pointer_size) { - uint32_t vtable_entries = Object::kVTableLength; - return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size); - } + static uint32_t ClassSize(PointerSize pointer_size); // Size of an instance of dalvik.system.ClassExt. static constexpr uint32_t InstanceSize() { @@ -57,8 +53,11 @@ class MANAGED ClassExt : public Object { OFFSET_OF_OBJECT_MEMBER(ClassExt, obsolete_dex_caches_)); } - PointerArray* GetObsoleteMethods() REQUIRES_SHARED(Locks::mutator_lock_) { - return GetFieldObject<PointerArray>(OFFSET_OF_OBJECT_MEMBER(ClassExt, obsolete_methods_)); + template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, + ReadBarrierOption kReadBarrierOption = kWithReadBarrier> + inline PointerArray* GetObsoleteMethods() REQUIRES_SHARED(Locks::mutator_lock_) { + return GetFieldObject<PointerArray, kVerifyFlags, kReadBarrierOption>( + OFFSET_OF_OBJECT_MEMBER(ClassExt, obsolete_methods_)); } ByteArray* GetOriginalDexFileBytes() REQUIRES_SHARED(Locks::mutator_lock_) { @@ -78,6 +77,10 @@ class MANAGED ClassExt : public Object { static void ResetClass(); static void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_); + template<ReadBarrierOption kReadBarrierOption = kWithReadBarrier, class Visitor> + inline void VisitNativeRoots(Visitor& visitor, PointerSize pointer_size) + REQUIRES_SHARED(Locks::mutator_lock_); + static ClassExt* Alloc(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); private: diff --git a/runtime/mirror/dex_cache-inl.h b/runtime/mirror/dex_cache-inl.h index 582ecb23e5..5d3af5071a 100644 --- a/runtime/mirror/dex_cache-inl.h +++ b/runtime/mirror/dex_cache-inl.h @@ -47,7 +47,7 @@ inline void NativeDexCachePair<T>::Initialize(std::atomic<NativeDexCachePair<T>> } inline uint32_t DexCache::ClassSize(PointerSize pointer_size) { - uint32_t vtable_entries = Object::kVTableLength + 5; + const uint32_t vtable_entries = Object::kVTableLength; return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0, pointer_size); } diff --git a/runtime/mirror/dex_cache.h b/runtime/mirror/dex_cache.h index 35707ef4e9..48a9ecd992 100644 --- a/runtime/mirror/dex_cache.h +++ b/runtime/mirror/dex_cache.h @@ -212,10 +212,6 @@ class MANAGED DexCache FINAL : public Object { return GetFieldObject<String>(OFFSET_OF_OBJECT_MEMBER(DexCache, location_)); } - static MemberOffset DexOffset() { - return OFFSET_OF_OBJECT_MEMBER(DexCache, dex_); - } - static MemberOffset StringsOffset() { return OFFSET_OF_OBJECT_MEMBER(DexCache, strings_); } @@ -516,8 +512,11 @@ class MANAGED DexCache FINAL : public Object { static void AtomicStoreRelease16B(std::atomic<ConversionPair64>* target, ConversionPair64 value); #endif - HeapReference<Object> dex_; HeapReference<String> location_; + // Number of elements in the call_sites_ array. Note that this appears here + // because of our packing logic for 32 bit fields. + uint32_t num_resolved_call_sites_; + uint64_t dex_file_; // const DexFile* uint64_t resolved_call_sites_; // GcRoot<CallSite>* array with num_resolved_call_sites_ // elements. @@ -530,7 +529,6 @@ class MANAGED DexCache FINAL : public Object { uint64_t strings_; // std::atomic<StringDexCachePair>*, array with num_strings_ // elements. - uint32_t num_resolved_call_sites_; // Number of elements in the call_sites_ array. uint32_t num_resolved_fields_; // Number of elements in the resolved_fields_ array. uint32_t num_resolved_method_types_; // Number of elements in the resolved_method_types_ array. uint32_t num_resolved_methods_; // Number of elements in the resolved_methods_ array. diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h index 4541ce2a42..f7ab26de0d 100644 --- a/runtime/mirror/object.h +++ b/runtime/mirror/object.h @@ -538,10 +538,10 @@ class MANAGED LOCKABLE Object { PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_) { if (pointer_size == PointerSize::k32) { - intptr_t ptr = reinterpret_cast<intptr_t>(new_value); - DCHECK_EQ(static_cast<int32_t>(ptr), ptr); // Check that we dont lose any non 0 bits. + uintptr_t ptr = reinterpret_cast<uintptr_t>(new_value); + DCHECK_EQ(static_cast<uint32_t>(ptr), ptr); // Check that we dont lose any non 0 bits. SetField32<kTransactionActive, kCheckTransaction, kVerifyFlags>( - field_offset, static_cast<int32_t>(ptr)); + field_offset, static_cast<int32_t>(static_cast<uint32_t>(ptr))); } else { SetField64<kTransactionActive, kCheckTransaction, kVerifyFlags>( field_offset, reinterpret_cast64<int64_t>(new_value)); @@ -591,7 +591,8 @@ class MANAGED LOCKABLE Object { ALWAYS_INLINE T GetFieldPtrWithSize(MemberOffset field_offset, PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_) { if (pointer_size == PointerSize::k32) { - return reinterpret_cast<T>(GetField32<kVerifyFlags, kIsVolatile>(field_offset)); + uint64_t address = static_cast<uint32_t>(GetField32<kVerifyFlags, kIsVolatile>(field_offset)); + return reinterpret_cast<T>(static_cast<uintptr_t>(address)); } else { int64_t v = GetField64<kVerifyFlags, kIsVolatile>(field_offset); return reinterpret_cast64<T>(v); diff --git a/runtime/mirror/string.cc b/runtime/mirror/string.cc index 884b88a6c1..de0e75b083 100644 --- a/runtime/mirror/string.cc +++ b/runtime/mirror/string.cc @@ -89,16 +89,17 @@ inline bool String::AllASCIIExcept(const uint16_t* chars, int32_t length, uint16 return true; } -ObjPtr<String> String::DoReplace(Thread* self, uint16_t old_c, uint16_t new_c) { - DCHECK(IsCompressed() ? ContainsElement(ArrayRef<uint8_t>(value_compressed_, GetLength()), old_c) - : ContainsElement(ArrayRef<uint16_t>(value_, GetLength()), old_c)); - int32_t length = GetLength(); +ObjPtr<String> String::DoReplace(Thread* self, Handle<String> src, uint16_t old_c, uint16_t new_c) { + int32_t length = src->GetLength(); + DCHECK(src->IsCompressed() + ? ContainsElement(ArrayRef<uint8_t>(src->value_compressed_, length), old_c) + : ContainsElement(ArrayRef<uint16_t>(src->value_, length), old_c)); bool compressible = kUseStringCompression && IsASCII(new_c) && - (IsCompressed() || (!IsASCII(old_c) && AllASCIIExcept(value_, length, old_c))); + (src->IsCompressed() || (!IsASCII(old_c) && AllASCIIExcept(src->value_, length, old_c))); gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator(); - const int32_t length_with_flag = String::GetFlaggedCount(GetLength(), compressible); + const int32_t length_with_flag = String::GetFlaggedCount(length, compressible); SetStringCountVisitor visitor(length_with_flag); ObjPtr<String> string = Alloc<true>(self, length_with_flag, allocator_type, visitor); if (UNLIKELY(string == nullptr)) { @@ -109,10 +110,10 @@ ObjPtr<String> String::DoReplace(Thread* self, uint16_t old_c, uint16_t new_c) { return dchecked_integral_cast<uint8_t>((old_c != c) ? c : new_c); }; uint8_t* out = string->value_compressed_; - if (LIKELY(IsCompressed())) { // LIKELY(compressible == IsCompressed()) - std::transform(value_compressed_, value_compressed_ + length, out, replace); + if (LIKELY(src->IsCompressed())) { // LIKELY(compressible == src->IsCompressed()) + std::transform(src->value_compressed_, src->value_compressed_ + length, out, replace); } else { - std::transform(value_, value_ + length, out, replace); + std::transform(src->value_, src->value_ + length, out, replace); } DCHECK(kUseStringCompression && AllASCII(out, length)); } else { @@ -120,10 +121,10 @@ ObjPtr<String> String::DoReplace(Thread* self, uint16_t old_c, uint16_t new_c) { return (old_c != c) ? c : new_c; }; uint16_t* out = string->value_; - if (UNLIKELY(IsCompressed())) { // LIKELY(compressible == IsCompressed()) - std::transform(value_compressed_, value_compressed_ + length, out, replace); + if (UNLIKELY(src->IsCompressed())) { // LIKELY(compressible == src->IsCompressed()) + std::transform(src->value_compressed_, src->value_compressed_ + length, out, replace); } else { - std::transform(value_, value_ + length, out, replace); + std::transform(src->value_, src->value_ + length, out, replace); } DCHECK(!kUseStringCompression || !AllASCII(out, length)); } diff --git a/runtime/mirror/string.h b/runtime/mirror/string.h index dbb5a4c387..b59bbfbd68 100644 --- a/runtime/mirror/string.h +++ b/runtime/mirror/string.h @@ -96,7 +96,7 @@ class MANAGED String FINAL : public Object { // Create a new string where all occurences of `old_c` are replaced with `new_c`. // String.doReplace(char, char) is called from String.replace(char, char) when there is a match. - ObjPtr<String> DoReplace(Thread* self, uint16_t old_c, uint16_t new_c) + static ObjPtr<String> DoReplace(Thread* self, Handle<String> src, uint16_t old_c, uint16_t new_c) REQUIRES_SHARED(Locks::mutator_lock_); ObjPtr<String> Intern() REQUIRES_SHARED(Locks::mutator_lock_); diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc index c8431c0519..381dc7beb0 100644 --- a/runtime/native/java_lang_Class.cc +++ b/runtime/native/java_lang_Class.cc @@ -108,10 +108,50 @@ static jstring Class_getNameNative(JNIEnv* env, jobject javaThis) { return soa.AddLocalReference<jstring>(mirror::Class::ComputeName(hs.NewHandle(c))); } -static jobjectArray Class_getProxyInterfaces(JNIEnv* env, jobject javaThis) { +// TODO: Move this to mirror::Class ? Other mirror types that commonly appear +// as arrays have a GetArrayClass() method. +static ObjPtr<mirror::Class> GetClassArrayClass(Thread* self) + REQUIRES_SHARED(Locks::mutator_lock_) { + ObjPtr<mirror::Class> class_class = mirror::Class::GetJavaLangClass(); + return Runtime::Current()->GetClassLinker()->FindArrayClass(self, &class_class); +} + +static jobjectArray Class_getInterfacesInternal(JNIEnv* env, jobject javaThis) { ScopedFastNativeObjectAccess soa(env); - ObjPtr<mirror::Class> c = DecodeClass(soa, javaThis); - return soa.AddLocalReference<jobjectArray>(c->GetInterfaces()->Clone(soa.Self())); + StackHandleScope<4> hs(soa.Self()); + Handle<mirror::Class> klass = hs.NewHandle(DecodeClass(soa, javaThis)); + + if (klass->IsProxyClass()) { + return soa.AddLocalReference<jobjectArray>(klass->GetProxyInterfaces()->Clone(soa.Self())); + } + + const DexFile::TypeList* iface_list = klass->GetInterfaceTypeList(); + if (iface_list == nullptr) { + return nullptr; + } + + const uint32_t num_ifaces = iface_list->Size(); + Handle<mirror::Class> class_array_class = hs.NewHandle(GetClassArrayClass(soa.Self())); + Handle<mirror::ObjectArray<mirror::Class>> ifaces = hs.NewHandle( + mirror::ObjectArray<mirror::Class>::Alloc(soa.Self(), class_array_class.Get(), num_ifaces)); + if (ifaces.IsNull()) { + DCHECK(soa.Self()->IsExceptionPending()); + return nullptr; + } + + // Check that we aren't in an active transaction, we call SetWithoutChecks + // with kActiveTransaction == false. + DCHECK(!Runtime::Current()->IsActiveTransaction()); + + MutableHandle<mirror::Class> interface(hs.NewHandle<mirror::Class>(nullptr)); + for (uint32_t i = 0; i < num_ifaces; ++i) { + const dex::TypeIndex type_idx = iface_list->GetTypeItem(i).type_idx_; + interface.Assign(ClassLinker::LookupResolvedType( + type_idx, klass->GetDexCache(), klass->GetClassLoader())); + ifaces->SetWithoutChecks<false>(i, interface.Get()); + } + + return soa.AddLocalReference<jobjectArray>(ifaces.Get()); } static mirror::ObjectArray<mirror::Field>* GetDeclaredFields( @@ -501,9 +541,7 @@ static jobjectArray Class_getDeclaredClasses(JNIEnv* env, jobject javaThis) { // Pending exception from GetDeclaredClasses. return nullptr; } - ObjPtr<mirror::Class> class_class = mirror::Class::GetJavaLangClass(); - ObjPtr<mirror::Class> class_array_class = - Runtime::Current()->GetClassLinker()->FindArrayClass(soa.Self(), &class_class); + ObjPtr<mirror::Class> class_array_class = GetClassArrayClass(soa.Self()); if (class_array_class == nullptr) { return nullptr; } @@ -736,8 +774,8 @@ static JNINativeMethod gMethods[] = { FAST_NATIVE_METHOD(Class, getEnclosingMethodNative, "()Ljava/lang/reflect/Method;"), FAST_NATIVE_METHOD(Class, getInnerClassFlags, "(I)I"), FAST_NATIVE_METHOD(Class, getInnerClassName, "()Ljava/lang/String;"), + FAST_NATIVE_METHOD(Class, getInterfacesInternal, "()[Ljava/lang/Class;"), FAST_NATIVE_METHOD(Class, getNameNative, "()Ljava/lang/String;"), - FAST_NATIVE_METHOD(Class, getProxyInterfaces, "()[Ljava/lang/Class;"), FAST_NATIVE_METHOD(Class, getPublicDeclaredFields, "()[Ljava/lang/reflect/Field;"), FAST_NATIVE_METHOD(Class, getSignatureAnnotation, "()[Ljava/lang/String;"), FAST_NATIVE_METHOD(Class, isAnonymousClass, "()Z"), diff --git a/runtime/native/java_lang_DexCache.cc b/runtime/native/java_lang_DexCache.cc deleted file mode 100644 index 8fda4dfaaf..0000000000 --- a/runtime/native/java_lang_DexCache.cc +++ /dev/null @@ -1,109 +0,0 @@ -/* - * Copyright (C) 2008 The Android Open Source Project - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "java_lang_DexCache.h" - -#include "dex_file.h" -#include "dex_file_types.h" -#include "jni_internal.h" -#include "mirror/class-inl.h" -#include "mirror/dex_cache-inl.h" -#include "mirror/object-inl.h" -#include "scoped_fast_native_object_access-inl.h" -#include "well_known_classes.h" - -namespace art { - -static jobject DexCache_getDexNative(JNIEnv* env, jobject javaDexCache) { - ScopedFastNativeObjectAccess soa(env); - ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache); - // Should only be called while holding the lock on the dex cache. - DCHECK_EQ(dex_cache->GetLockOwnerThreadId(), soa.Self()->GetThreadId()); - const DexFile* dex_file = dex_cache->GetDexFile(); - if (dex_file == nullptr) { - return nullptr; - } - void* address = const_cast<void*>(reinterpret_cast<const void*>(dex_file->Begin())); - jobject byte_buffer = env->NewDirectByteBuffer(address, dex_file->Size()); - if (byte_buffer == nullptr) { - DCHECK(soa.Self()->IsExceptionPending()); - return nullptr; - } - - jvalue args[1]; - args[0].l = byte_buffer; - return env->CallStaticObjectMethodA(WellKnownClasses::com_android_dex_Dex, - WellKnownClasses::com_android_dex_Dex_create, - args); -} - -static jobject DexCache_getResolvedType(JNIEnv* env, jobject javaDexCache, jint type_index) { - ScopedFastNativeObjectAccess soa(env); - ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache); - CHECK_LT(static_cast<size_t>(type_index), dex_cache->GetDexFile()->NumTypeIds()); - return soa.AddLocalReference<jobject>(dex_cache->GetResolvedType(dex::TypeIndex(type_index))); -} - -static jobject DexCache_getResolvedString(JNIEnv* env, jobject javaDexCache, jint string_index) { - ScopedFastNativeObjectAccess soa(env); - ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache); - CHECK_LT(static_cast<size_t>(string_index), dex_cache->GetDexFile()->NumStringIds()); - return soa.AddLocalReference<jobject>( - dex_cache->GetResolvedString(dex::StringIndex(string_index))); -} - -static void DexCache_setResolvedType(JNIEnv* env, - jobject javaDexCache, - jint type_index, - jobject type) { - ScopedFastNativeObjectAccess soa(env); - ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache); - const DexFile& dex_file = *dex_cache->GetDexFile(); - CHECK_LT(static_cast<size_t>(type_index), dex_file.NumTypeIds()); - ObjPtr<mirror::Class> t = soa.Decode<mirror::Class>(type); - if (t != nullptr && t->DescriptorEquals(dex_file.StringByTypeIdx(dex::TypeIndex(type_index)))) { - ClassTable* table = - Runtime::Current()->GetClassLinker()->FindClassTable(soa.Self(), dex_cache); - if (table != nullptr && table->TryInsert(t) == t) { - dex_cache->SetResolvedType(dex::TypeIndex(type_index), t); - } - } -} - -static void DexCache_setResolvedString(JNIEnv* env, jobject javaDexCache, jint string_index, - jobject string) { - ScopedFastNativeObjectAccess soa(env); - ObjPtr<mirror::DexCache> dex_cache = soa.Decode<mirror::DexCache>(javaDexCache); - CHECK_LT(static_cast<size_t>(string_index), dex_cache->GetDexFile()->NumStringIds()); - ObjPtr<mirror::String> s = soa.Decode<mirror::String>(string); - if (s != nullptr) { - dex_cache->SetResolvedString(dex::StringIndex(string_index), s); - } -} - -static JNINativeMethod gMethods[] = { - FAST_NATIVE_METHOD(DexCache, getDexNative, "()Lcom/android/dex/Dex;"), - FAST_NATIVE_METHOD(DexCache, getResolvedType, "(I)Ljava/lang/Class;"), - FAST_NATIVE_METHOD(DexCache, getResolvedString, "(I)Ljava/lang/String;"), - FAST_NATIVE_METHOD(DexCache, setResolvedType, "(ILjava/lang/Class;)V"), - FAST_NATIVE_METHOD(DexCache, setResolvedString, "(ILjava/lang/String;)V"), -}; - -void register_java_lang_DexCache(JNIEnv* env) { - REGISTER_NATIVE_METHODS("java/lang/DexCache"); -} - -} // namespace art diff --git a/runtime/native/java_lang_String.cc b/runtime/native/java_lang_String.cc index 2e561ffa46..bf33bf24a0 100644 --- a/runtime/native/java_lang_String.cc +++ b/runtime/native/java_lang_String.cc @@ -101,8 +101,9 @@ static jstring String_intern(JNIEnv* env, jobject java_this) { static jstring String_doReplace(JNIEnv* env, jobject java_this, jchar old_c, jchar new_c) { ScopedFastNativeObjectAccess soa(env); - ObjPtr<mirror::String> result = - soa.Decode<mirror::String>(java_this)->DoReplace(soa.Self(), old_c, new_c); + StackHandleScope<1> hs(soa.Self()); + Handle<mirror::String> string = hs.NewHandle(soa.Decode<mirror::String>(java_this)); + ObjPtr<mirror::String> result = mirror::String::DoReplace(soa.Self(), string, old_c, new_c); return soa.AddLocalReference<jstring>(result); } diff --git a/runtime/native/java_lang_reflect_Executable.cc b/runtime/native/java_lang_reflect_Executable.cc index bc23bedc77..8f226ce621 100644 --- a/runtime/native/java_lang_reflect_Executable.cc +++ b/runtime/native/java_lang_reflect_Executable.cc @@ -194,12 +194,146 @@ static jboolean Executable_isAnnotationPresentNative(JNIEnv* env, return annotations::IsMethodAnnotationPresent(method, klass); } +static jint Executable_compareMethodParametersInternal(JNIEnv* env, + jobject thisMethod, + jobject otherMethod) { + ScopedFastNativeObjectAccess soa(env); + ArtMethod* this_method = ArtMethod::FromReflectedMethod(soa, thisMethod); + ArtMethod* other_method = ArtMethod::FromReflectedMethod(soa, otherMethod); + + this_method = this_method->GetInterfaceMethodIfProxy(kRuntimePointerSize); + other_method = other_method->GetInterfaceMethodIfProxy(kRuntimePointerSize); + + const DexFile::TypeList* this_list = this_method->GetParameterTypeList(); + const DexFile::TypeList* other_list = other_method->GetParameterTypeList(); + + if (this_list == other_list) { + return 0; + } + + if (this_list == nullptr && other_list != nullptr) { + return -1; + } + + if (other_list == nullptr && this_list != nullptr) { + return 1; + } + + const int32_t this_size = this_list->Size(); + const int32_t other_size = other_list->Size(); + + if (this_size != other_size) { + return (this_size - other_size); + } + + for (int32_t i = 0; i < this_size; ++i) { + const DexFile::TypeId& lhs = this_method->GetDexFile()->GetTypeId( + this_list->GetTypeItem(i).type_idx_); + const DexFile::TypeId& rhs = other_method->GetDexFile()->GetTypeId( + other_list->GetTypeItem(i).type_idx_); + + uint32_t lhs_len, rhs_len; + const char* lhs_data = this_method->GetDexFile()->StringDataAndUtf16LengthByIdx( + lhs.descriptor_idx_, &lhs_len); + const char* rhs_data = other_method->GetDexFile()->StringDataAndUtf16LengthByIdx( + rhs.descriptor_idx_, &rhs_len); + + int cmp = strcmp(lhs_data, rhs_data); + if (cmp != 0) { + return (cmp < 0) ? -1 : 1; + } + } + + return 0; +} + +static jobject Executable_getMethodNameInternal(JNIEnv* env, jobject javaMethod) { + ScopedFastNativeObjectAccess soa(env); + ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod); + method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize); + return soa.AddLocalReference<jobject>(method->GetNameAsString(soa.Self())); +} + +static jobject Executable_getMethodReturnTypeInternal(JNIEnv* env, jobject javaMethod) { + ScopedFastNativeObjectAccess soa(env); + ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod); + method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize); + ObjPtr<mirror::Class> return_type(method->GetReturnType(true /* resolve */)); + if (return_type.IsNull()) { + CHECK(soa.Self()->IsExceptionPending()); + return nullptr; + } + + return soa.AddLocalReference<jobject>(return_type); +} + +// TODO: Move this to mirror::Class ? Other mirror types that commonly appear +// as arrays have a GetArrayClass() method. This is duplicated in +// java_lang_Class.cc as well. +static ObjPtr<mirror::Class> GetClassArrayClass(Thread* self) + REQUIRES_SHARED(Locks::mutator_lock_) { + ObjPtr<mirror::Class> class_class = mirror::Class::GetJavaLangClass(); + return Runtime::Current()->GetClassLinker()->FindArrayClass(self, &class_class); +} + +static jobjectArray Executable_getParameterTypesInternal(JNIEnv* env, jobject javaMethod) { + ScopedFastNativeObjectAccess soa(env); + ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod); + method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize); + + const DexFile::TypeList* params = method->GetParameterTypeList(); + if (params == nullptr) { + return nullptr; + } + + const uint32_t num_params = params->Size(); + + StackHandleScope<3> hs(soa.Self()); + Handle<mirror::Class> class_array_class = hs.NewHandle(GetClassArrayClass(soa.Self())); + Handle<mirror::ObjectArray<mirror::Class>> ptypes = hs.NewHandle( + mirror::ObjectArray<mirror::Class>::Alloc(soa.Self(), class_array_class.Get(), num_params)); + if (ptypes.IsNull()) { + DCHECK(soa.Self()->IsExceptionPending()); + return nullptr; + } + + MutableHandle<mirror::Class> param(hs.NewHandle<mirror::Class>(nullptr)); + for (uint32_t i = 0; i < num_params; ++i) { + const dex::TypeIndex type_idx = params->GetTypeItem(i).type_idx_; + param.Assign(Runtime::Current()->GetClassLinker()->ResolveType(type_idx, method)); + if (param.Get() == nullptr) { + DCHECK(soa.Self()->IsExceptionPending()); + return nullptr; + } + ptypes->SetWithoutChecks<false>(i, param.Get()); + } + + return soa.AddLocalReference<jobjectArray>(ptypes.Get()); +} + +static jint Executable_getParameterCountInternal(JNIEnv* env, jobject javaMethod) { + ScopedFastNativeObjectAccess soa(env); + ArtMethod* method = ArtMethod::FromReflectedMethod(soa, javaMethod); + method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize); + + const DexFile::TypeList* params = method->GetParameterTypeList(); + return (params == nullptr) ? 0 : params->Size(); +} + + static JNINativeMethod gMethods[] = { + FAST_NATIVE_METHOD(Executable, compareMethodParametersInternal, + "(Ljava/lang/reflect/Method;)I"), FAST_NATIVE_METHOD(Executable, getAnnotationNative, - "(Ljava/lang/Class;)Ljava/lang/annotation/Annotation;"), - FAST_NATIVE_METHOD(Executable, getDeclaredAnnotationsNative, "()[Ljava/lang/annotation/Annotation;"), + "(Ljava/lang/Class;)Ljava/lang/annotation/Annotation;"), + FAST_NATIVE_METHOD(Executable, getDeclaredAnnotationsNative, + "()[Ljava/lang/annotation/Annotation;"), FAST_NATIVE_METHOD(Executable, getParameterAnnotationsNative, - "()[[Ljava/lang/annotation/Annotation;"), + "()[[Ljava/lang/annotation/Annotation;"), + FAST_NATIVE_METHOD(Executable, getMethodNameInternal, "()Ljava/lang/String;"), + FAST_NATIVE_METHOD(Executable, getMethodReturnTypeInternal, "()Ljava/lang/Class;"), + FAST_NATIVE_METHOD(Executable, getParameterTypesInternal, "()[Ljava/lang/Class;"), + FAST_NATIVE_METHOD(Executable, getParameterCountInternal, "()I"), FAST_NATIVE_METHOD(Executable, getParameters0, "()[Ljava/lang/reflect/Parameter;"), FAST_NATIVE_METHOD(Executable, getSignatureAnnotation, "()[Ljava/lang/String;"), FAST_NATIVE_METHOD(Executable, isAnnotationPresentNative, "(Ljava/lang/Class;)Z"), diff --git a/runtime/native/java_lang_reflect_Field.cc b/runtime/native/java_lang_reflect_Field.cc index 9cf80a5bf5..9198964f87 100644 --- a/runtime/native/java_lang_reflect_Field.cc +++ b/runtime/native/java_lang_reflect_Field.cc @@ -456,6 +456,13 @@ static jlong Field_getArtField(JNIEnv* env, jobject javaField) { return reinterpret_cast<jlong>(field); } +static jobject Field_getNameInternal(JNIEnv* env, jobject javaField) { + ScopedFastNativeObjectAccess soa(env); + ArtField* field = soa.Decode<mirror::Field>(javaField)->GetArtField(); + return soa.AddLocalReference<jobject>( + field->GetStringName(soa.Self(), true /* resolve */)); +} + static jobjectArray Field_getDeclaredAnnotations(JNIEnv* env, jobject javaField) { ScopedFastNativeObjectAccess soa(env); ArtField* field = soa.Decode<mirror::Field>(javaField)->GetArtField(); @@ -506,6 +513,7 @@ static JNINativeMethod gMethods[] = { FAST_NATIVE_METHOD(Field, getFloat, "(Ljava/lang/Object;)F"), FAST_NATIVE_METHOD(Field, getInt, "(Ljava/lang/Object;)I"), FAST_NATIVE_METHOD(Field, getLong, "(Ljava/lang/Object;)J"), + FAST_NATIVE_METHOD(Field, getNameInternal, "()Ljava/lang/String;"), FAST_NATIVE_METHOD(Field, getShort, "(Ljava/lang/Object;)S"), FAST_NATIVE_METHOD(Field, isAnnotationPresentNative, "(Ljava/lang/Class;)Z"), FAST_NATIVE_METHOD(Field, set, "(Ljava/lang/Object;Ljava/lang/Object;)V"), diff --git a/runtime/native/java_lang_reflect_Method.cc b/runtime/native/java_lang_reflect_Method.cc index 6e5e3d9337..6f0130eb15 100644 --- a/runtime/native/java_lang_reflect_Method.cc +++ b/runtime/native/java_lang_reflect_Method.cc @@ -55,7 +55,8 @@ static jobjectArray Method_getExceptionTypes(JNIEnv* env, jobject javaMethod) { ++i; } CHECK_NE(throws_index, -1); - mirror::ObjectArray<mirror::Class>* declared_exceptions = klass->GetThrows()->Get(throws_index); + mirror::ObjectArray<mirror::Class>* declared_exceptions = + klass->GetProxyThrows()->Get(throws_index); return soa.AddLocalReference<jobjectArray>(declared_exceptions->Clone(soa.Self())); } else { mirror::ObjectArray<mirror::Class>* result_array = diff --git a/runtime/oat.h b/runtime/oat.h index df43107646..190d533eff 100644 --- a/runtime/oat.h +++ b/runtime/oat.h @@ -32,7 +32,7 @@ class InstructionSetFeatures; class PACKED(4) OatHeader { public: static constexpr uint8_t kOatMagic[] = { 'o', 'a', 't', '\n' }; - static constexpr uint8_t kOatVersion[] = { '1', '1', '5', '\0' }; // hash-based DexCache fields + static constexpr uint8_t kOatVersion[] = { '1', '1', '6', '\0' }; // Add method infos. static constexpr const char* kImageLocationKey = "image-location"; static constexpr const char* kDex2OatCmdLineKey = "dex2oat-cmdline"; diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc index 48bf1e72a4..3396ce0b57 100644 --- a/runtime/oat_file_assistant.cc +++ b/runtime/oat_file_assistant.cc @@ -725,58 +725,23 @@ const std::vector<uint32_t>* OatFileAssistant::GetRequiredDexChecksums() { return required_dex_checksums_found_ ? &cached_required_dex_checksums_ : nullptr; } -// TODO: Use something better than xor for the combined image checksum. std::unique_ptr<OatFileAssistant::ImageInfo> OatFileAssistant::ImageInfo::GetRuntimeImageInfo(InstructionSet isa, std::string* error_msg) { CHECK(error_msg != nullptr); - // Use the currently loaded image to determine the image locations for all - // the image spaces, regardless of the isa requested. Otherwise we would - // need to read from the boot image's oat file to determine the rest of the - // image locations in the case of multi-image. Runtime* runtime = Runtime::Current(); - std::vector<gc::space::ImageSpace*> image_spaces = runtime->GetHeap()->GetBootImageSpaces(); - if (image_spaces.empty()) { - *error_msg = "There are no boot image spaces"; + std::unique_ptr<ImageInfo> info(new ImageInfo()); + info->location = runtime->GetImageLocation(); + + std::unique_ptr<ImageHeader> image_header( + gc::space::ImageSpace::ReadImageHeader(info->location.c_str(), isa, error_msg)); + if (image_header == nullptr) { return nullptr; } - std::unique_ptr<ImageInfo> info(new ImageInfo()); - info->location = image_spaces[0]->GetImageLocation(); - - // TODO: Special casing on isa == kRuntimeISA is presumably motivated by - // performance: 'it's faster to use an already loaded image header than read - // the image header from disk'. But the loaded image is not necessarily the - // same as kRuntimeISA, so this behavior is suspect (b/35659889). - if (isa == kRuntimeISA) { - const ImageHeader& image_header = image_spaces[0]->GetImageHeader(); - info->oat_data_begin = reinterpret_cast<uintptr_t>(image_header.GetOatDataBegin()); - info->patch_delta = image_header.GetPatchDelta(); - - info->oat_checksum = 0; - for (gc::space::ImageSpace* image_space : image_spaces) { - info->oat_checksum ^= image_space->GetImageHeader().GetOatChecksum(); - } - } else { - std::unique_ptr<ImageHeader> image_header( - gc::space::ImageSpace::ReadImageHeader(info->location.c_str(), isa, error_msg)); - if (image_header == nullptr) { - return nullptr; - } - info->oat_data_begin = reinterpret_cast<uintptr_t>(image_header->GetOatDataBegin()); - info->patch_delta = image_header->GetPatchDelta(); - - info->oat_checksum = 0; - for (gc::space::ImageSpace* image_space : image_spaces) { - std::string location = image_space->GetImageLocation(); - image_header.reset( - gc::space::ImageSpace::ReadImageHeader(location.c_str(), isa, error_msg)); - if (image_header == nullptr) { - return nullptr; - } - info->oat_checksum ^= image_header->GetOatChecksum(); - } - } + info->oat_checksum = image_header->GetOatChecksum(); + info->oat_data_begin = reinterpret_cast<uintptr_t>(image_header->GetOatDataBegin()); + info->patch_delta = image_header->GetPatchDelta(); return info; } @@ -792,16 +757,6 @@ const OatFileAssistant::ImageInfo* OatFileAssistant::GetImageInfo() { return cached_image_info_.get(); } -uint32_t OatFileAssistant::CalculateCombinedImageChecksum(InstructionSet isa) { - std::string error_msg; - std::unique_ptr<ImageInfo> info = ImageInfo::GetRuntimeImageInfo(isa, &error_msg); - if (info == nullptr) { - LOG(WARNING) << "Unable to get runtime image info for checksum: " << error_msg; - return 0; - } - return info->oat_checksum; -} - OatFileAssistant::OatFileInfo& OatFileAssistant::GetBestInfo() { bool use_oat = oat_.IsUseable() || odex_.Status() == kOatCannotOpen; return use_oat ? oat_ : odex_; diff --git a/runtime/oat_file_assistant.h b/runtime/oat_file_assistant.h index eec87f0768..d61e9949b6 100644 --- a/runtime/oat_file_assistant.h +++ b/runtime/oat_file_assistant.h @@ -276,8 +276,6 @@ class OatFileAssistant { std::string* oat_filename, std::string* error_msg); - static uint32_t CalculateCombinedImageChecksum(InstructionSet isa = kRuntimeISA); - private: struct ImageInfo { uint32_t oat_checksum = 0; diff --git a/runtime/oat_quick_method_header.cc b/runtime/oat_quick_method_header.cc index b4e4285dc7..8eef5867e2 100644 --- a/runtime/oat_quick_method_header.cc +++ b/runtime/oat_quick_method_header.cc @@ -22,13 +22,14 @@ namespace art { -OatQuickMethodHeader::OatQuickMethodHeader( - uint32_t vmap_table_offset, - uint32_t frame_size_in_bytes, - uint32_t core_spill_mask, - uint32_t fp_spill_mask, - uint32_t code_size) +OatQuickMethodHeader::OatQuickMethodHeader(uint32_t vmap_table_offset, + uint32_t method_info_offset, + uint32_t frame_size_in_bytes, + uint32_t core_spill_mask, + uint32_t fp_spill_mask, + uint32_t code_size) : vmap_table_offset_(vmap_table_offset), + method_info_offset_(method_info_offset), frame_info_(frame_size_in_bytes, core_spill_mask, fp_spill_mask), code_size_(code_size) {} diff --git a/runtime/oat_quick_method_header.h b/runtime/oat_quick_method_header.h index 3cdde5a065..f2a2af2a5f 100644 --- a/runtime/oat_quick_method_header.h +++ b/runtime/oat_quick_method_header.h @@ -20,6 +20,7 @@ #include "arch/instruction_set.h" #include "base/macros.h" #include "quick/quick_method_frame_info.h" +#include "method_info.h" #include "stack_map.h" #include "utils.h" @@ -30,11 +31,13 @@ class ArtMethod; // OatQuickMethodHeader precedes the raw code chunk generated by the compiler. class PACKED(4) OatQuickMethodHeader { public: - explicit OatQuickMethodHeader(uint32_t vmap_table_offset = 0U, - uint32_t frame_size_in_bytes = 0U, - uint32_t core_spill_mask = 0U, - uint32_t fp_spill_mask = 0U, - uint32_t code_size = 0U); + OatQuickMethodHeader() = default; + explicit OatQuickMethodHeader(uint32_t vmap_table_offset, + uint32_t method_info_offset, + uint32_t frame_size_in_bytes, + uint32_t core_spill_mask, + uint32_t fp_spill_mask, + uint32_t code_size); ~OatQuickMethodHeader(); @@ -63,8 +66,7 @@ class PACKED(4) OatQuickMethodHeader { const void* GetOptimizedCodeInfoPtr() const { DCHECK(IsOptimized()); - const void* data = reinterpret_cast<const void*>(code_ - vmap_table_offset_); - return data; + return reinterpret_cast<const void*>(code_ - vmap_table_offset_); } uint8_t* GetOptimizedCodeInfoPtr() { @@ -76,6 +78,20 @@ class PACKED(4) OatQuickMethodHeader { return CodeInfo(GetOptimizedCodeInfoPtr()); } + const void* GetOptimizedMethodInfoPtr() const { + DCHECK(IsOptimized()); + return reinterpret_cast<const void*>(code_ - method_info_offset_); + } + + uint8_t* GetOptimizedMethodInfoPtr() { + DCHECK(IsOptimized()); + return code_ - method_info_offset_; + } + + MethodInfo GetOptimizedMethodInfo() const { + return MethodInfo(reinterpret_cast<const uint8_t*>(GetOptimizedMethodInfoPtr())); + } + const uint8_t* GetCode() const { return code_; } @@ -100,6 +116,18 @@ class PACKED(4) OatQuickMethodHeader { return &vmap_table_offset_; } + uint32_t GetMethodInfoOffset() const { + return method_info_offset_; + } + + void SetMethodInfoOffset(uint32_t offset) { + method_info_offset_ = offset; + } + + const uint32_t* GetMethodInfoOffsetAddr() const { + return &method_info_offset_; + } + const uint8_t* GetVmapTable() const { CHECK(!IsOptimized()) << "Unimplemented vmap table for optimizing compiler"; return (vmap_table_offset_ == 0) ? nullptr : code_ - vmap_table_offset_; @@ -160,12 +188,17 @@ class PACKED(4) OatQuickMethodHeader { static constexpr uint32_t kCodeSizeMask = ~kShouldDeoptimizeMask; // The offset in bytes from the start of the vmap table to the end of the header. - uint32_t vmap_table_offset_; + uint32_t vmap_table_offset_ = 0u; + // The offset in bytes from the start of the method info to the end of the header. + // The method info offset is not in the CodeInfo since CodeInfo has good dedupe properties that + // would be lost from doing so. The method info memory region contains method indices since they + // are hard to dedupe. + uint32_t method_info_offset_ = 0u; // The stack frame information. QuickMethodFrameInfo frame_info_; // The code size in bytes. The highest bit is used to signify if the compiled // code with the method header has should_deoptimize flag. - uint32_t code_size_; + uint32_t code_size_ = 0u; // The actual code. uint8_t code_[0]; }; diff --git a/runtime/openjdkjvmti/Android.bp b/runtime/openjdkjvmti/Android.bp index c01e3f4152..dd49ad0cfb 100644 --- a/runtime/openjdkjvmti/Android.bp +++ b/runtime/openjdkjvmti/Android.bp @@ -13,6 +13,12 @@ // See the License for the specific language governing permissions and // limitations under the License. +cc_library_headers { + name: "libopenjdkjvmti_headers", + host_supported: true, + export_include_dirs: ["include"], +} + cc_defaults { name: "libopenjdkjvmti_defaults", defaults: ["art_defaults"], @@ -40,6 +46,7 @@ cc_defaults { "ti_timers.cc", "transform.cc"], include_dirs: ["art/runtime"], + header_libs: ["libopenjdkjvmti_headers"], shared_libs: [ "libbase", "libnativehelper", diff --git a/runtime/openjdkjvmti/OpenjdkJvmTi.cc b/runtime/openjdkjvmti/OpenjdkJvmTi.cc index 5e0d4bdd07..5401e5cdf8 100644 --- a/runtime/openjdkjvmti/OpenjdkJvmTi.cc +++ b/runtime/openjdkjvmti/OpenjdkJvmTi.cc @@ -35,7 +35,7 @@ #include <jni.h> -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" #include "art_jvmti.h" #include "base/logging.h" diff --git a/runtime/openjdkjvmti/jvmti.h b/runtime/openjdkjvmti/include/jvmti.h index de07c163fc..de07c163fc 100644 --- a/runtime/openjdkjvmti/jvmti.h +++ b/runtime/openjdkjvmti/include/jvmti.h diff --git a/runtime/openjdkjvmti/ti_class.cc b/runtime/openjdkjvmti/ti_class.cc index 4282e38b17..2d1b25ed26 100644 --- a/runtime/openjdkjvmti/ti_class.cc +++ b/runtime/openjdkjvmti/ti_class.cc @@ -62,6 +62,7 @@ #include "thread-inl.h" #include "thread_list.h" #include "ti_class_loader.h" +#include "ti_phase.h" #include "ti_redefine.h" #include "utils.h" @@ -142,6 +143,18 @@ struct ClassCallback : public art::ClassLoadCallback { // It is a primitive or array. Just return return; } + jvmtiPhase phase = PhaseUtil::GetPhaseUnchecked(); + if (UNLIKELY(phase != JVMTI_PHASE_START && phase != JVMTI_PHASE_LIVE)) { + // We want to wait until we are at least in the START phase so that all WellKnownClasses and + // mirror classes have been initialized and loaded. The runtime relies on these classes having + // specific fields and methods present. Since PreDefine hooks don't need to abide by this + // restriction we will simply not send the event for these classes. + LOG(WARNING) << "Ignoring load of class <" << descriptor << "> as it is being loaded during " + << "runtime initialization."; + return; + } + + // Strip the 'L' and ';' from the descriptor std::string name(std::string(descriptor).substr(1, strlen(descriptor) - 2)); art::Thread* self = art::Thread::Current(); diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc index 9113f83cd4..4d787db5ac 100644 --- a/runtime/parsed_options.cc +++ b/runtime/parsed_options.cc @@ -708,6 +708,7 @@ void ParsedOptions::Usage(const char* fmt, ...) { UsageMessage(stream, " -Xps-min-classes-to-save:integervalue\n"); UsageMessage(stream, " -Xps-min-notification-before-wake:integervalue\n"); UsageMessage(stream, " -Xps-max-notification-before-wake:integervalue\n"); + UsageMessage(stream, " -Xps-profile-path:file-path\n"); UsageMessage(stream, " -Xcompiler:filename\n"); UsageMessage(stream, " -Xcompiler-option dex2oat-option\n"); UsageMessage(stream, " -Ximage-compiler-option dex2oat-option\n"); diff --git a/runtime/reflection.cc b/runtime/reflection.cc index 3c64d40720..87bc7df214 100644 --- a/runtime/reflection.cc +++ b/runtime/reflection.cc @@ -671,14 +671,14 @@ jobject InvokeMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject javaM soa.Self()->ClearException(); jclass exception_class = soa.Env()->FindClass("java/lang/reflect/InvocationTargetException"); if (exception_class == nullptr) { - soa.Self()->AssertPendingOOMException(); + soa.Self()->AssertPendingException(); return nullptr; } jmethodID mid = soa.Env()->GetMethodID(exception_class, "<init>", "(Ljava/lang/Throwable;)V"); CHECK(mid != nullptr); jobject exception_instance = soa.Env()->NewObject(exception_class, mid, th); if (exception_instance == nullptr) { - soa.Self()->AssertPendingOOMException(); + soa.Self()->AssertPendingException(); return nullptr; } soa.Env()->Throw(reinterpret_cast<jthrowable>(exception_instance)); diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 9fd2c88c3c..44f8281abf 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -106,7 +106,6 @@ #include "native/dalvik_system_VMStack.h" #include "native/dalvik_system_ZygoteHooks.h" #include "native/java_lang_Class.h" -#include "native/java_lang_DexCache.h" #include "native/java_lang_Object.h" #include "native/java_lang_String.h" #include "native/java_lang_StringFactory.h" @@ -286,6 +285,13 @@ Runtime::~Runtime() { LOG(WARNING) << "Current thread not detached in Runtime shutdown"; } + if (jit_ != nullptr) { + // Stop the profile saver thread before marking the runtime as shutting down. + // The saver will try to dump the profiles before being sopped and that + // requires holding the mutator lock. + jit_->StopProfileSaver(); + } + { ScopedTrace trace2("Wait for shutdown cond"); MutexLock mu(self, *Locks::runtime_shutdown_lock_); @@ -327,8 +333,6 @@ Runtime::~Runtime() { // Delete thread pool before the thread list since we don't want to wait forever on the // JIT compiler threads. jit_->DeleteThreadPool(); - // Similarly, stop the profile saver thread before deleting the thread list. - jit_->StopProfileSaver(); } // TODO Maybe do some locking. @@ -1539,7 +1543,6 @@ void Runtime::RegisterRuntimeNativeMethods(JNIEnv* env) { register_dalvik_system_VMStack(env); register_dalvik_system_ZygoteHooks(env); register_java_lang_Class(env); - register_java_lang_DexCache(env); register_java_lang_Object(env); register_java_lang_invoke_MethodHandleImpl(env); register_java_lang_ref_FinalizerReference(env); @@ -2152,6 +2155,19 @@ void Runtime::CreateJit() { jit_.reset(jit::Jit::Create(jit_options_.get(), &error_msg)); if (jit_.get() == nullptr) { LOG(WARNING) << "Failed to create JIT " << error_msg; + return; + } + + // In case we have a profile path passed as a command line argument, + // register the current class path for profiling now. Note that we cannot do + // this before we create the JIT and having it here is the most convenient way. + // This is used when testing profiles with dalvikvm command as there is no + // framework to register the dex files for profiling. + if (jit_options_->GetSaveProfilingInfo() && + !jit_options_->GetProfileSaverOptions().GetProfilePath().empty()) { + std::vector<std::string> dex_filenames; + Split(class_path_string_, ':', &dex_filenames); + RegisterAppInfo(dex_filenames, jit_options_->GetProfileSaverOptions().GetProfilePath()); } } diff --git a/runtime/stack.cc b/runtime/stack.cc index 51a24e4e01..0628643a09 100644 --- a/runtime/stack.cc +++ b/runtime/stack.cc @@ -142,8 +142,10 @@ ArtMethod* StackVisitor::GetMethod() const { InlineInfo inline_info = GetCurrentInlineInfo(); const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader(); CodeInfoEncoding encoding = method_header->GetOptimizedCodeInfo().ExtractEncoding(); + MethodInfo method_info = method_header->GetOptimizedMethodInfo(); DCHECK(walk_kind_ != StackWalkKind::kSkipInlinedFrames); return GetResolvedMethod(*GetCurrentQuickFrame(), + method_info, inline_info, encoding.inline_info.encoding, depth_in_stack_map); diff --git a/runtime/stack.h b/runtime/stack.h index 90a0aee353..5c9614aba4 100644 --- a/runtime/stack.h +++ b/runtime/stack.h @@ -197,6 +197,11 @@ class ShadowFrame { return *reinterpret_cast<const int32_t*>(vreg); } + // Shorts are extended to Ints in VRegs. Interpreter intrinsics needs them as shorts. + int16_t GetVRegShort(size_t i) const { + return static_cast<int16_t>(GetVReg(i)); + } + uint32_t* GetVRegAddr(size_t i) { return &vregs_[i]; } diff --git a/runtime/stack_map.cc b/runtime/stack_map.cc index d657311ae9..250ff2af1a 100644 --- a/runtime/stack_map.cc +++ b/runtime/stack_map.cc @@ -118,7 +118,8 @@ void CodeInfo::Dump(VariableIndentationOutputStream* vios, uint32_t code_offset, uint16_t number_of_dex_registers, bool dump_stack_maps, - InstructionSet instruction_set) const { + InstructionSet instruction_set, + const MethodInfo& method_info) const { CodeInfoEncoding encoding = ExtractEncoding(); size_t number_of_stack_maps = GetNumberOfStackMaps(encoding); vios->Stream() @@ -139,6 +140,7 @@ void CodeInfo::Dump(VariableIndentationOutputStream* vios, stack_map.Dump(vios, *this, encoding, + method_info, code_offset, number_of_dex_registers, instruction_set, @@ -189,6 +191,7 @@ void DexRegisterMap::Dump(VariableIndentationOutputStream* vios, void StackMap::Dump(VariableIndentationOutputStream* vios, const CodeInfo& code_info, const CodeInfoEncoding& encoding, + const MethodInfo& method_info, uint32_t code_offset, uint16_t number_of_dex_registers, InstructionSet instruction_set, @@ -222,12 +225,13 @@ void StackMap::Dump(VariableIndentationOutputStream* vios, // We do not know the length of the dex register maps of inlined frames // at this level, so we just pass null to `InlineInfo::Dump` to tell // it not to look at these maps. - inline_info.Dump(vios, code_info, nullptr); + inline_info.Dump(vios, code_info, method_info, nullptr); } } void InlineInfo::Dump(VariableIndentationOutputStream* vios, const CodeInfo& code_info, + const MethodInfo& method_info, uint16_t number_of_dex_registers[]) const { InlineInfoEncoding inline_info_encoding = code_info.ExtractEncoding().inline_info.encoding; vios->Stream() << "InlineInfo with depth " @@ -245,7 +249,7 @@ void InlineInfo::Dump(VariableIndentationOutputStream* vios, } else { vios->Stream() << std::dec - << ", method_index=" << GetMethodIndexAtDepth(inline_info_encoding, i); + << ", method_index=" << GetMethodIndexAtDepth(inline_info_encoding, method_info, i); } vios->Stream() << ")\n"; if (HasDexRegisterMapAtDepth(inline_info_encoding, i) && (number_of_dex_registers != nullptr)) { diff --git a/runtime/stack_map.h b/runtime/stack_map.h index d936ce938e..ffa17c9543 100644 --- a/runtime/stack_map.h +++ b/runtime/stack_map.h @@ -23,6 +23,7 @@ #include "bit_memory_region.h" #include "dex_file.h" #include "memory_region.h" +#include "method_info.h" #include "leb128.h" namespace art { @@ -367,7 +368,8 @@ class DexRegisterLocationCatalog { return region_.size(); } - void Dump(VariableIndentationOutputStream* vios, const CodeInfo& code_info); + void Dump(VariableIndentationOutputStream* vios, + const CodeInfo& code_info); // Special (invalid) Dex register location catalog entry index meaning // that there is no location for a given Dex register (i.e., it is @@ -862,6 +864,7 @@ class StackMap { void Dump(VariableIndentationOutputStream* vios, const CodeInfo& code_info, const CodeInfoEncoding& encoding, + const MethodInfo& method_info, uint32_t code_offset, uint16_t number_of_dex_registers, InstructionSet instruction_set, @@ -885,12 +888,12 @@ class StackMap { class InlineInfoEncoding { public: - void SetFromSizes(size_t method_index_max, + void SetFromSizes(size_t method_index_idx_max, size_t dex_pc_max, size_t extra_data_max, size_t dex_register_map_size) { total_bit_size_ = kMethodIndexBitOffset; - total_bit_size_ += MinimumBitsToStore(method_index_max); + total_bit_size_ += MinimumBitsToStore(method_index_idx_max); dex_pc_bit_offset_ = dchecked_integral_cast<uint8_t>(total_bit_size_); // Note: We're not encoding the dex pc if there is none. That's the case @@ -908,7 +911,7 @@ class InlineInfoEncoding { total_bit_size_ += MinimumBitsToStore(dex_register_map_size); } - ALWAYS_INLINE FieldEncoding GetMethodIndexEncoding() const { + ALWAYS_INLINE FieldEncoding GetMethodIndexIdxEncoding() const { return FieldEncoding(kMethodIndexBitOffset, dex_pc_bit_offset_); } ALWAYS_INLINE FieldEncoding GetDexPcEncoding() const { @@ -975,16 +978,23 @@ class InlineInfo { } } - ALWAYS_INLINE uint32_t GetMethodIndexAtDepth(const InlineInfoEncoding& encoding, - uint32_t depth) const { + ALWAYS_INLINE uint32_t GetMethodIndexIdxAtDepth(const InlineInfoEncoding& encoding, + uint32_t depth) const { DCHECK(!EncodesArtMethodAtDepth(encoding, depth)); - return encoding.GetMethodIndexEncoding().Load(GetRegionAtDepth(encoding, depth)); + return encoding.GetMethodIndexIdxEncoding().Load(GetRegionAtDepth(encoding, depth)); } - ALWAYS_INLINE void SetMethodIndexAtDepth(const InlineInfoEncoding& encoding, - uint32_t depth, - uint32_t index) { - encoding.GetMethodIndexEncoding().Store(GetRegionAtDepth(encoding, depth), index); + ALWAYS_INLINE void SetMethodIndexIdxAtDepth(const InlineInfoEncoding& encoding, + uint32_t depth, + uint32_t index) { + encoding.GetMethodIndexIdxEncoding().Store(GetRegionAtDepth(encoding, depth), index); + } + + + ALWAYS_INLINE uint32_t GetMethodIndexAtDepth(const InlineInfoEncoding& encoding, + const MethodInfo& method_info, + uint32_t depth) const { + return method_info.GetMethodIndex(GetMethodIndexIdxAtDepth(encoding, depth)); } ALWAYS_INLINE uint32_t GetDexPcAtDepth(const InlineInfoEncoding& encoding, @@ -1012,7 +1022,8 @@ class InlineInfo { ALWAYS_INLINE ArtMethod* GetArtMethodAtDepth(const InlineInfoEncoding& encoding, uint32_t depth) const { uint32_t low_bits = encoding.GetExtraDataEncoding().Load(GetRegionAtDepth(encoding, depth)); - uint32_t high_bits = encoding.GetMethodIndexEncoding().Load(GetRegionAtDepth(encoding, depth)); + uint32_t high_bits = encoding.GetMethodIndexIdxEncoding().Load( + GetRegionAtDepth(encoding, depth)); if (high_bits == 0) { return reinterpret_cast<ArtMethod*>(low_bits); } else { @@ -1040,6 +1051,7 @@ class InlineInfo { void Dump(VariableIndentationOutputStream* vios, const CodeInfo& info, + const MethodInfo& method_info, uint16_t* number_of_dex_registers) const; private: @@ -1219,12 +1231,18 @@ class InvokeInfo { encoding.GetInvokeTypeEncoding().Store(region_, invoke_type); } - ALWAYS_INLINE uint32_t GetMethodIndex(const InvokeInfoEncoding& encoding) const { + ALWAYS_INLINE uint32_t GetMethodIndexIdx(const InvokeInfoEncoding& encoding) const { return encoding.GetMethodIndexEncoding().Load(region_); } - ALWAYS_INLINE void SetMethodIndex(const InvokeInfoEncoding& encoding, uint32_t method_index) { - encoding.GetMethodIndexEncoding().Store(region_, method_index); + ALWAYS_INLINE void SetMethodIndexIdx(const InvokeInfoEncoding& encoding, + uint32_t method_index_idx) { + encoding.GetMethodIndexEncoding().Store(region_, method_index_idx); + } + + ALWAYS_INLINE uint32_t GetMethodIndex(const InvokeInfoEncoding& encoding, + MethodInfo method_info) const { + return method_info.GetMethodIndex(GetMethodIndexIdx(encoding)); } bool IsValid() const { return region_.pointer() != nullptr; } @@ -1542,7 +1560,8 @@ class CodeInfo { uint32_t code_offset, uint16_t number_of_dex_registers, bool dump_stack_maps, - InstructionSet instruction_set) const; + InstructionSet instruction_set, + const MethodInfo& method_info) const; // Check that the code info has valid stack map and abort if it does not. void AssertValidStackMap(const CodeInfoEncoding& encoding) const { diff --git a/runtime/thread.cc b/runtime/thread.cc index 30a4046d73..008c388229 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -16,6 +16,10 @@ #include "thread.h" +#if !defined(__APPLE__) +#include <sched.h> +#endif + #include <pthread.h> #include <signal.h> #include <sys/resource.h> @@ -1591,8 +1595,21 @@ void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) { if (thread != nullptr) { int policy; sched_param sp; +#if !defined(__APPLE__) + // b/36445592 Don't use pthread_getschedparam since pthread may have exited. + policy = sched_getscheduler(tid); + if (policy == -1) { + PLOG(WARNING) << "sched_getscheduler(" << tid << ")"; + } + int sched_getparam_result = sched_getparam(tid, &sp); + if (sched_getparam_result == -1) { + PLOG(WARNING) << "sched_getparam(" << tid << ", &sp)"; + sp.sched_priority = -1; + } +#else CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->tlsPtr_.pthread_self, &policy, &sp), __FUNCTION__); +#endif os << " sched=" << policy << "/" << sp.sched_priority << " handle=" << reinterpret_cast<void*>(thread->tlsPtr_.pthread_self); } diff --git a/runtime/utils.h b/runtime/utils.h index 96e5bfa8ec..24fd2053f5 100644 --- a/runtime/utils.h +++ b/runtime/utils.h @@ -325,6 +325,18 @@ constexpr size_t ArrayCount(const T (&)[size]) { return size; } +// Return -1 if <, 0 if ==, 1 if >. +template <typename T> +inline static int32_t Compare(T lhs, T rhs) { + return (lhs < rhs) ? -1 : ((lhs == rhs) ? 0 : 1); +} + +// Return -1 if < 0, 0 if == 0, 1 if > 0. +template <typename T> +inline static int32_t Signum(T opnd) { + return (opnd < 0) ? -1 : ((opnd == 0) ? 0 : 1); +} + } // namespace art #endif // ART_RUNTIME_UTILS_H_ diff --git a/runtime/vdex_file.h b/runtime/vdex_file.h index 5048bad121..898d07d22c 100644 --- a/runtime/vdex_file.h +++ b/runtime/vdex_file.h @@ -61,7 +61,7 @@ class VdexFile { private: static constexpr uint8_t kVdexMagic[] = { 'v', 'd', 'e', 'x' }; - static constexpr uint8_t kVdexVersion[] = { '0', '0', '4', '\0' }; // dexlayout incompatibility + static constexpr uint8_t kVdexVersion[] = { '0', '0', '5', '\0' }; // access flags uint8_t magic_[4]; uint8_t version_[4]; diff --git a/runtime/verifier/reg_type.cc b/runtime/verifier/reg_type.cc index 52f7e348ce..740b7dd7d4 100644 --- a/runtime/verifier/reg_type.cc +++ b/runtime/verifier/reg_type.cc @@ -309,6 +309,7 @@ PreciseReferenceType::PreciseReferenceType(mirror::Class* klass, const StringPie // Note: no check for IsInstantiable() here. We may produce this in case an InstantiationError // would be thrown at runtime, but we need to continue verification and *not* create a // hard failure or abort. + CheckConstructorInvariants(this); } std::string UnresolvedMergedType::Dump() const { @@ -789,7 +790,7 @@ void RegType::CheckInvariants() const { if (!klass_.IsNull()) { CHECK(!descriptor_.empty()) << *this; std::string temp; - CHECK_EQ(descriptor_.ToString(), klass_.Read()->GetDescriptor(&temp)) << *this; + CHECK_EQ(descriptor_, klass_.Read()->GetDescriptor(&temp)) << *this; } } @@ -820,9 +821,7 @@ UnresolvedMergedType::UnresolvedMergedType(const RegType& resolved, reg_type_cache_(reg_type_cache), resolved_part_(resolved), unresolved_types_(unresolved, false, unresolved.GetAllocator()) { - if (kIsDebugBuild) { - CheckInvariants(); - } + CheckConstructorInvariants(this); } void UnresolvedMergedType::CheckInvariants() const { CHECK(reg_type_cache_ != nullptr); diff --git a/runtime/verifier/reg_type.h b/runtime/verifier/reg_type.h index 472381dd9b..dedf77f7db 100644 --- a/runtime/verifier/reg_type.h +++ b/runtime/verifier/reg_type.h @@ -274,14 +274,17 @@ class RegType { uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) : descriptor_(descriptor), klass_(klass), - cache_id_(cache_id) { + cache_id_(cache_id) {} + + template <typename Class> + void CheckConstructorInvariants(Class* this_ ATTRIBUTE_UNUSED) const + REQUIRES_SHARED(Locks::mutator_lock_) { + static_assert(std::is_final<Class>::value, "Class must be final."); if (kIsDebugBuild) { CheckInvariants(); } } - void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_); - const StringPiece descriptor_; mutable GcRoot<mirror::Class> klass_; // Non-const only due to moving classes. const uint16_t cache_id_; @@ -289,6 +292,8 @@ class RegType { friend class RegTypeCache; private: + virtual void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_); + /* * A basic Join operation on classes. For a pair of types S and T the Join, written S v T = J, is * S <: J, T <: J and for-all U such that S <: U, T <: U then J <: U. That is J is the parent of @@ -339,7 +344,9 @@ class ConflictType FINAL : public RegType { private: ConflictType(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) - : RegType(klass, descriptor, cache_id) {} + : RegType(klass, descriptor, cache_id) { + CheckConstructorInvariants(this); + } static const ConflictType* instance_; }; @@ -368,7 +375,9 @@ class UndefinedType FINAL : public RegType { private: UndefinedType(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) - : RegType(klass, descriptor, cache_id) {} + : RegType(klass, descriptor, cache_id) { + CheckConstructorInvariants(this); + } static const UndefinedType* instance_; }; @@ -387,7 +396,7 @@ class Cat1Type : public PrimitiveType { uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_); }; -class IntegerType : public Cat1Type { +class IntegerType FINAL : public Cat1Type { public: bool IsInteger() const OVERRIDE { return true; } std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); @@ -401,7 +410,9 @@ class IntegerType : public Cat1Type { private: IntegerType(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) - : Cat1Type(klass, descriptor, cache_id) {} + : Cat1Type(klass, descriptor, cache_id) { + CheckConstructorInvariants(this); + } static const IntegerType* instance_; }; @@ -419,7 +430,9 @@ class BooleanType FINAL : public Cat1Type { private: BooleanType(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) - : Cat1Type(klass, descriptor, cache_id) {} + : Cat1Type(klass, descriptor, cache_id) { + CheckConstructorInvariants(this); + } static const BooleanType* instance_; }; @@ -438,7 +451,9 @@ class ByteType FINAL : public Cat1Type { private: ByteType(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) - : Cat1Type(klass, descriptor, cache_id) {} + : Cat1Type(klass, descriptor, cache_id) { + CheckConstructorInvariants(this); + } static const ByteType* instance_; }; @@ -456,7 +471,9 @@ class ShortType FINAL : public Cat1Type { private: ShortType(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) - : Cat1Type(klass, descriptor, cache_id) {} + : Cat1Type(klass, descriptor, cache_id) { + CheckConstructorInvariants(this); + } static const ShortType* instance_; }; @@ -474,7 +491,9 @@ class CharType FINAL : public Cat1Type { private: CharType(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) - : Cat1Type(klass, descriptor, cache_id) {} + : Cat1Type(klass, descriptor, cache_id) { + CheckConstructorInvariants(this); + } static const CharType* instance_; }; @@ -492,7 +511,9 @@ class FloatType FINAL : public Cat1Type { private: FloatType(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) - : Cat1Type(klass, descriptor, cache_id) {} + : Cat1Type(klass, descriptor, cache_id) { + CheckConstructorInvariants(this); + } static const FloatType* instance_; }; @@ -517,7 +538,9 @@ class LongLoType FINAL : public Cat2Type { private: LongLoType(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) - : Cat2Type(klass, descriptor, cache_id) {} + : Cat2Type(klass, descriptor, cache_id) { + CheckConstructorInvariants(this); + } static const LongLoType* instance_; }; @@ -535,7 +558,9 @@ class LongHiType FINAL : public Cat2Type { private: LongHiType(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) - : Cat2Type(klass, descriptor, cache_id) {} + : Cat2Type(klass, descriptor, cache_id) { + CheckConstructorInvariants(this); + } static const LongHiType* instance_; }; @@ -554,7 +579,9 @@ class DoubleLoType FINAL : public Cat2Type { private: DoubleLoType(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) - : Cat2Type(klass, descriptor, cache_id) {} + : Cat2Type(klass, descriptor, cache_id) { + CheckConstructorInvariants(this); + } static const DoubleLoType* instance_; }; @@ -572,7 +599,9 @@ class DoubleHiType FINAL : public Cat2Type { private: DoubleHiType(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) - : Cat2Type(klass, descriptor, cache_id) {} + : Cat2Type(klass, descriptor, cache_id) { + CheckConstructorInvariants(this); + } static const DoubleHiType* instance_; }; @@ -637,7 +666,9 @@ class PreciseConstType FINAL : public ConstantType { public: PreciseConstType(uint32_t constant, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) - : ConstantType(constant, cache_id) {} + : ConstantType(constant, cache_id) { + CheckConstructorInvariants(this); + } bool IsPreciseConstant() const OVERRIDE { return true; } @@ -648,7 +679,9 @@ class PreciseConstLoType FINAL : public ConstantType { public: PreciseConstLoType(uint32_t constant, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) - : ConstantType(constant, cache_id) {} + : ConstantType(constant, cache_id) { + CheckConstructorInvariants(this); + } bool IsPreciseConstantLo() const OVERRIDE { return true; } std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); }; @@ -657,7 +690,9 @@ class PreciseConstHiType FINAL : public ConstantType { public: PreciseConstHiType(uint32_t constant, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) - : ConstantType(constant, cache_id) {} + : ConstantType(constant, cache_id) { + CheckConstructorInvariants(this); + } bool IsPreciseConstantHi() const OVERRIDE { return true; } std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); }; @@ -667,6 +702,7 @@ class ImpreciseConstType FINAL : public ConstantType { ImpreciseConstType(uint32_t constat, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) : ConstantType(constat, cache_id) { + CheckConstructorInvariants(this); } bool IsImpreciseConstant() const OVERRIDE { return true; } std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); @@ -676,7 +712,9 @@ class ImpreciseConstLoType FINAL : public ConstantType { public: ImpreciseConstLoType(uint32_t constant, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) - : ConstantType(constant, cache_id) {} + : ConstantType(constant, cache_id) { + CheckConstructorInvariants(this); + } bool IsImpreciseConstantLo() const OVERRIDE { return true; } std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); }; @@ -685,7 +723,9 @@ class ImpreciseConstHiType FINAL : public ConstantType { public: ImpreciseConstHiType(uint32_t constant, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) - : ConstantType(constant, cache_id) {} + : ConstantType(constant, cache_id) { + CheckConstructorInvariants(this); + } bool IsImpreciseConstantHi() const OVERRIDE { return true; } std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); }; @@ -718,7 +758,9 @@ class UninitializedReferenceType FINAL : public UninitializedType { const StringPiece& descriptor, uint32_t allocation_pc, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) - : UninitializedType(klass, descriptor, allocation_pc, cache_id) {} + : UninitializedType(klass, descriptor, allocation_pc, cache_id) { + CheckConstructorInvariants(this); + } bool IsUninitializedReference() const OVERRIDE { return true; } @@ -735,9 +777,7 @@ class UnresolvedUninitializedRefType FINAL : public UninitializedType { uint32_t allocation_pc, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) : UninitializedType(nullptr, descriptor, allocation_pc, cache_id) { - if (kIsDebugBuild) { - CheckInvariants(); - } + CheckConstructorInvariants(this); } bool IsUnresolvedAndUninitializedReference() const OVERRIDE { return true; } @@ -747,7 +787,7 @@ class UnresolvedUninitializedRefType FINAL : public UninitializedType { std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); private: - void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_); + void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; }; // Similar to UninitializedReferenceType but special case for the this argument @@ -759,9 +799,7 @@ class UninitializedThisReferenceType FINAL : public UninitializedType { uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) : UninitializedType(klass, descriptor, 0, cache_id) { - if (kIsDebugBuild) { - CheckInvariants(); - } + CheckConstructorInvariants(this); } virtual bool IsUninitializedThisReference() const OVERRIDE { return true; } @@ -771,7 +809,7 @@ class UninitializedThisReferenceType FINAL : public UninitializedType { std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); private: - void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_); + void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; }; class UnresolvedUninitializedThisRefType FINAL : public UninitializedType { @@ -780,9 +818,7 @@ class UnresolvedUninitializedThisRefType FINAL : public UninitializedType { uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) : UninitializedType(nullptr, descriptor, 0, cache_id) { - if (kIsDebugBuild) { - CheckInvariants(); - } + CheckConstructorInvariants(this); } bool IsUnresolvedAndUninitializedThisReference() const OVERRIDE { return true; } @@ -792,7 +828,7 @@ class UnresolvedUninitializedThisRefType FINAL : public UninitializedType { std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); private: - void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_); + void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; }; // A type of register holding a reference to an Object of type GetClass or a @@ -801,7 +837,9 @@ class ReferenceType FINAL : public RegType { public: ReferenceType(mirror::Class* klass, const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) - : RegType(klass, descriptor, cache_id) {} + : RegType(klass, descriptor, cache_id) { + CheckConstructorInvariants(this); + } bool IsReference() const OVERRIDE { return true; } @@ -848,9 +886,7 @@ class UnresolvedReferenceType FINAL : public UnresolvedType { UnresolvedReferenceType(const StringPiece& descriptor, uint16_t cache_id) REQUIRES_SHARED(Locks::mutator_lock_) : UnresolvedType(descriptor, cache_id) { - if (kIsDebugBuild) { - CheckInvariants(); - } + CheckConstructorInvariants(this); } bool IsUnresolvedReference() const OVERRIDE { return true; } @@ -860,7 +896,7 @@ class UnresolvedReferenceType FINAL : public UnresolvedType { std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); private: - void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_); + void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; }; // Type representing the super-class of an unresolved type. @@ -872,9 +908,7 @@ class UnresolvedSuperClass FINAL : public UnresolvedType { : UnresolvedType("", cache_id), unresolved_child_id_(child_id), reg_type_cache_(reg_type_cache) { - if (kIsDebugBuild) { - CheckInvariants(); - } + CheckConstructorInvariants(this); } bool IsUnresolvedSuperClass() const OVERRIDE { return true; } @@ -889,7 +923,7 @@ class UnresolvedSuperClass FINAL : public UnresolvedType { std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); private: - void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_); + void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; const uint16_t unresolved_child_id_; const RegTypeCache* const reg_type_cache_; @@ -925,7 +959,7 @@ class UnresolvedMergedType FINAL : public UnresolvedType { std::string Dump() const OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_); private: - void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_); + void CheckInvariants() const REQUIRES_SHARED(Locks::mutator_lock_) OVERRIDE; const RegTypeCache* const reg_type_cache_; diff --git a/runtime/verifier/verifier_deps.cc b/runtime/verifier/verifier_deps.cc index 000cf7c393..8e4c166492 100644 --- a/runtime/verifier/verifier_deps.cc +++ b/runtime/verifier/verifier_deps.cc @@ -68,13 +68,17 @@ const VerifierDeps::DexFileDeps* VerifierDeps::GetDexFileDeps(const DexFile& dex return (it == dex_deps_.end()) ? nullptr : it->second.get(); } +// Access flags that impact vdex verification. +static constexpr uint32_t kAccVdexAccessFlags = + kAccPublic | kAccPrivate | kAccProtected | kAccStatic | kAccInterface; + template <typename T> uint16_t VerifierDeps::GetAccessFlags(T* element) { static_assert(kAccJavaFlagsMask == 0xFFFF, "Unexpected value of a constant"); if (element == nullptr) { return VerifierDeps::kUnresolvedMarker; } else { - uint16_t access_flags = Low16Bits(element->GetAccessFlags()); + uint16_t access_flags = Low16Bits(element->GetAccessFlags()) & kAccVdexAccessFlags; CHECK_NE(access_flags, VerifierDeps::kUnresolvedMarker); return access_flags; } @@ -458,8 +462,7 @@ void VerifierDeps::AddAssignability(const DexFile& dex_file, } if (!IsInClassPath(source)) { - if (!destination->IsInterface()) { - DCHECK(!source->IsInterface()); + if (!destination->IsInterface() && !source->IsInterface()) { // Find the super class at the classpath boundary. Only that class // can change the assignability. do { diff --git a/runtime/well_known_classes.cc b/runtime/well_known_classes.cc index 2610252aa7..5aef062728 100644 --- a/runtime/well_known_classes.cc +++ b/runtime/well_known_classes.cc @@ -34,7 +34,6 @@ namespace art { -jclass WellKnownClasses::com_android_dex_Dex; jclass WellKnownClasses::dalvik_annotation_optimization_CriticalNative; jclass WellKnownClasses::dalvik_annotation_optimization_FastNative; jclass WellKnownClasses::dalvik_system_BaseDexClassLoader; @@ -80,7 +79,6 @@ jclass WellKnownClasses::libcore_util_EmptyArray; jclass WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk; jclass WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer; -jmethodID WellKnownClasses::com_android_dex_Dex_create; jmethodID WellKnownClasses::dalvik_system_VMRuntime_runFinalization; jmethodID WellKnownClasses::java_lang_Boolean_valueOf; jmethodID WellKnownClasses::java_lang_Byte_valueOf; @@ -268,7 +266,6 @@ uint32_t WellKnownClasses::StringInitToEntryPoint(ArtMethod* string_init) { #undef STRING_INIT_LIST void WellKnownClasses::Init(JNIEnv* env) { - com_android_dex_Dex = CacheClass(env, "com/android/dex/Dex"); dalvik_annotation_optimization_CriticalNative = CacheClass(env, "dalvik/annotation/optimization/CriticalNative"); dalvik_annotation_optimization_FastNative = CacheClass(env, "dalvik/annotation/optimization/FastNative"); @@ -317,7 +314,6 @@ void WellKnownClasses::Init(JNIEnv* env) { org_apache_harmony_dalvik_ddmc_DdmServer = CacheClass(env, "org/apache/harmony/dalvik/ddmc/DdmServer"); dalvik_system_VMRuntime_runFinalization = CacheMethod(env, dalvik_system_VMRuntime, true, "runFinalization", "(J)V"); - com_android_dex_Dex_create = CacheMethod(env, com_android_dex_Dex, true, "create", "(Ljava/nio/ByteBuffer;)Lcom/android/dex/Dex;"); java_lang_ClassNotFoundException_init = CacheMethod(env, java_lang_ClassNotFoundException, false, "<init>", "(Ljava/lang/String;Ljava/lang/Throwable;)V"); java_lang_ClassLoader_loadClass = CacheMethod(env, java_lang_ClassLoader, false, "loadClass", "(Ljava/lang/String;)Ljava/lang/Class;"); diff --git a/runtime/well_known_classes.h b/runtime/well_known_classes.h index db8a53c44c..c18473197b 100644 --- a/runtime/well_known_classes.h +++ b/runtime/well_known_classes.h @@ -44,7 +44,6 @@ struct WellKnownClasses { static ObjPtr<mirror::Class> ToClass(jclass global_jclass) REQUIRES_SHARED(Locks::mutator_lock_); - static jclass com_android_dex_Dex; static jclass dalvik_annotation_optimization_CriticalNative; static jclass dalvik_annotation_optimization_FastNative; static jclass dalvik_system_BaseDexClassLoader; @@ -90,7 +89,6 @@ struct WellKnownClasses { static jclass org_apache_harmony_dalvik_ddmc_Chunk; static jclass org_apache_harmony_dalvik_ddmc_DdmServer; - static jmethodID com_android_dex_Dex_create; static jmethodID dalvik_system_VMRuntime_runFinalization; static jmethodID java_lang_Boolean_valueOf; static jmethodID java_lang_Byte_valueOf; diff --git a/test/021-string2/src/Main.java b/test/021-string2/src/Main.java index 0dd82abf6f..194f4a1a7d 100644 --- a/test/021-string2/src/Main.java +++ b/test/021-string2/src/Main.java @@ -127,6 +127,9 @@ public class Main { Assert.assertEquals("I", /* Small latin dotless i */ "\u0131".toUpperCase()); Assert.assertEquals("abc", "a\u0131c".replace('\u0131', 'b')); Assert.assertEquals("a\u0131c", "abc".replace('b', '\u0131')); + + // Regression test for scratch register exhaustion in String.equals() intrinsic on arm64. + Assert.assertFalse(result.equals("Very long constant string, so that the known constant count field cannot be embedded in a CMP immediate instruction on arm64. Since it can hold 12-bit values, optionally shifted left by 12, let's go somewhere over 2^12, i.e. 4096. That should trigger the bug with or without string compression. 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ+/")); } public static void testCompareToAndEquals() { diff --git a/test/155-java-set-resolved-type/src/Main.java b/test/155-java-set-resolved-type/src/Main.java index 56b8c3ece9..8f79bd7ecd 100644 --- a/test/155-java-set-resolved-type/src/Main.java +++ b/test/155-java-set-resolved-type/src/Main.java @@ -57,8 +57,8 @@ public class Main { // we need to find TestInterface. clearResolvedTypes(timpl); - // Force intialization of TestClass2. This expects the interface type to be - // resolved and found through simple lookup. + // Force intialization of TestImplementation. This expects the interface type + // to be resolved and found through simple lookup. timpl.newInstance(); } catch (Throwable t) { t.printStackTrace(); diff --git a/test/158-app-image-class-table/expected.txt b/test/158-app-image-class-table/expected.txt new file mode 100644 index 0000000000..6a5618ebc6 --- /dev/null +++ b/test/158-app-image-class-table/expected.txt @@ -0,0 +1 @@ +JNI_OnLoad called diff --git a/test/158-app-image-class-table/info.txt b/test/158-app-image-class-table/info.txt new file mode 100644 index 0000000000..c844c8ecd6 --- /dev/null +++ b/test/158-app-image-class-table/info.txt @@ -0,0 +1,3 @@ +Regression test for app image class table being erroneously omitted +when it contains only boot image class loader classes while dex caches +were written with references to these classes. diff --git a/test/158-app-image-class-table/profile b/test/158-app-image-class-table/profile new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/test/158-app-image-class-table/profile diff --git a/test/158-app-image-class-table/run b/test/158-app-image-class-table/run new file mode 100644 index 0000000000..146e180000 --- /dev/null +++ b/test/158-app-image-class-table/run @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Copyright (C) 2017 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +exec ${RUN} $@ --profile -Xcompiler-option --compiler-filter=speed-profile diff --git a/test/158-app-image-class-table/src/Main.java b/test/158-app-image-class-table/src/Main.java new file mode 100644 index 0000000000..804468fe91 --- /dev/null +++ b/test/158-app-image-class-table/src/Main.java @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main { + public static String TEST_NAME = "158-app-image-class-table"; + + public static void main(String[] args) { + try { + Class<?> class_loader_class = Class.forName("dalvik.system.PathClassLoader"); + System.loadLibrary(args[0]); + } catch (ClassNotFoundException e) { + usingRI = true; + // Add expected JNI_OnLoad log line to match expected.txt. + System.out.println("JNI_OnLoad called"); + } + try { + // Resolve but do not initialize TestImplementation. During the resolution, + // we see the Cloneable in the dex cache, so we do not try to look it up + // or resolve it. + Class<?> timpl = + Class.forName("TestImplementation", false, Main.class.getClassLoader()); + // Clear the dex cache resolved types to force a proper lookup the next time + // we need to find TestInterface. + clearResolvedTypes(timpl); + // Force intialization of TestImplementation. This expects the interface type + // to be resolved and found through simple lookup. + timpl.newInstance(); + } catch (Throwable t) { + t.printStackTrace(); + } + } + + public static void clearResolvedTypes(Class<?> c) { + if (!usingRI) { + nativeClearResolvedTypes(c); + } + } + + private static boolean usingRI = false; + + public static native void nativeClearResolvedTypes(Class<?> c); +} diff --git a/runtime/native/java_lang_DexCache.h b/test/158-app-image-class-table/src/TestImplementation.java index b1c1f5e72c..558e58772f 100644 --- a/runtime/native/java_lang_DexCache.h +++ b/test/158-app-image-class-table/src/TestImplementation.java @@ -1,5 +1,5 @@ /* - * Copyright (C) 2014 The Android Open Source Project + * Copyright (C) 2017 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -14,15 +14,8 @@ * limitations under the License. */ -#ifndef ART_RUNTIME_NATIVE_JAVA_LANG_DEXCACHE_H_ -#define ART_RUNTIME_NATIVE_JAVA_LANG_DEXCACHE_H_ - -#include <jni.h> - -namespace art { - -void register_java_lang_DexCache(JNIEnv* env); - -} // namespace art - -#endif // ART_RUNTIME_NATIVE_JAVA_LANG_DEXCACHE_H_ +public class TestImplementation implements Cloneable { + public Object clone() { + return new TestImplementation(); + } +} diff --git a/test/642-fp-callees/expected.txt b/test/642-fp-callees/expected.txt new file mode 100644 index 0000000000..77a1486479 --- /dev/null +++ b/test/642-fp-callees/expected.txt @@ -0,0 +1,2 @@ +JNI_OnLoad called +Done diff --git a/test/642-fp-callees/fp_callees.cc b/test/642-fp-callees/fp_callees.cc new file mode 100644 index 0000000000..600f9690eb --- /dev/null +++ b/test/642-fp-callees/fp_callees.cc @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "base/casts.h" +#include "base/logging.h" +#include "jni.h" + +namespace art { + +// Make the array volatile, which is apparently making the C compiler +// use FP registers in the method below. +volatile double array[] = { 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0 }; + +extern "C" JNIEXPORT void JNICALL Java_Main_holdFpTemporaries(JNIEnv* env, jclass cls) { + jmethodID mid = env->GetStaticMethodID(cls, "caller", "(IIJ)V"); + CHECK(mid != nullptr); + // Load values from the arrays, which will be loaded in callee-save FP registers. + double a = array[0]; + double b = array[1]; + double c = array[2]; + double d = array[3]; + double e = array[4]; + double f = array[5]; + double g = array[6]; + double h = array[7]; + double i = array[8]; + double j = array[9]; + double k = array[10]; + double l = array[11]; + env->CallStaticVoidMethod(cls, mid, 1, 1, 1L); + // Load it in a temporary to please C compiler with bit_cast. + double temp = array[0]; + CHECK_EQ(bit_cast<int64_t>(a), bit_cast<int64_t>(temp)); + temp = array[1]; + CHECK_EQ(bit_cast<int64_t>(b), bit_cast<int64_t>(temp)); + temp = array[2]; + CHECK_EQ(bit_cast<int64_t>(c), bit_cast<int64_t>(temp)); + temp = array[3]; + CHECK_EQ(bit_cast<int64_t>(d), bit_cast<int64_t>(temp)); + temp = array[4]; + CHECK_EQ(bit_cast<int64_t>(e), bit_cast<int64_t>(temp)); + temp = array[5]; + CHECK_EQ(bit_cast<int64_t>(f), bit_cast<int64_t>(temp)); + temp = array[6]; + CHECK_EQ(bit_cast<int64_t>(g), bit_cast<int64_t>(temp)); + temp = array[7]; + CHECK_EQ(bit_cast<int64_t>(h), bit_cast<int64_t>(temp)); + temp = array[8]; + CHECK_EQ(bit_cast<int64_t>(i), bit_cast<int64_t>(temp)); + temp = array[9]; + CHECK_EQ(bit_cast<int64_t>(j), bit_cast<int64_t>(temp)); + temp = array[10]; + CHECK_EQ(bit_cast<int64_t>(k), bit_cast<int64_t>(temp)); + temp = array[11]; + CHECK_EQ(bit_cast<int64_t>(l), bit_cast<int64_t>(temp)); +} + +} // namespace art diff --git a/test/642-fp-callees/info.txt b/test/642-fp-callees/info.txt new file mode 100644 index 0000000000..d3e4bdac50 --- /dev/null +++ b/test/642-fp-callees/info.txt @@ -0,0 +1,2 @@ +Regression test for vixl32 backend, which used to incorrectly +use D14 as a temporary register. diff --git a/test/642-fp-callees/src/Main.java b/test/642-fp-callees/src/Main.java new file mode 100644 index 0000000000..fa57c93eda --- /dev/null +++ b/test/642-fp-callees/src/Main.java @@ -0,0 +1,34 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main { + public static void main(String[] args) { + System.loadLibrary(args[0]); + holdFpTemporaries(); + System.out.println("Done"); + } + + public static void caller(int a, int b, long c) { + $noinline$callee(a, b, c); + } + + // This method is "no inline", in order to generate the + // bad floating point use at the call site. + public static void $noinline$callee(int a, int b, long c) { + } + + public native static void holdFpTemporaries(); +} diff --git a/test/701-easy-div-rem/build b/test/701-easy-div-rem/build index 666fe895b5..d83ee82b47 100644 --- a/test/701-easy-div-rem/build +++ b/test/701-easy-div-rem/build @@ -21,12 +21,4 @@ set -e mkdir src python ./genMain.py -# Increase the file size limitation for classes.lst as the machine generated -# source file contains a lot of methods and is quite large. - -# Jack generates big temp files so only apply ulimit for dx. -if [ ${USE_JACK} = "false" ]; then - ulimit -S 4096 -fi - ./default-build diff --git a/test/901-hello-ti-agent/basics.cc b/test/901-hello-ti-agent/basics.cc index 0b17656303..91662770be 100644 --- a/test/901-hello-ti-agent/basics.cc +++ b/test/901-hello-ti-agent/basics.cc @@ -20,7 +20,7 @@ #include <stdio.h> #include <string.h> #include "base/macros.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" #include "ti-agent/common_helper.h" #include "ti-agent/common_load.h" diff --git a/test/903-hello-tagging/tagging.cc b/test/903-hello-tagging/tagging.cc index 6177263cd2..b85ed48930 100644 --- a/test/903-hello-tagging/tagging.cc +++ b/test/903-hello-tagging/tagging.cc @@ -25,7 +25,7 @@ #include "art_method-inl.h" #include "base/logging.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" #include "ti-agent/common_helper.h" #include "ti-agent/common_load.h" #include "utils.h" diff --git a/test/904-object-allocation/tracking.cc b/test/904-object-allocation/tracking.cc index 95eab0c6cc..cc6f681d79 100644 --- a/test/904-object-allocation/tracking.cc +++ b/test/904-object-allocation/tracking.cc @@ -21,7 +21,7 @@ #include "base/logging.h" #include "jni.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" #include "ScopedLocalRef.h" #include "ScopedUtfChars.h" #include "ti-agent/common_helper.h" diff --git a/test/905-object-free/tracking_free.cc b/test/905-object-free/tracking_free.cc index 7b26d79edb..5eed4729af 100644 --- a/test/905-object-free/tracking_free.cc +++ b/test/905-object-free/tracking_free.cc @@ -21,7 +21,7 @@ #include "base/logging.h" #include "jni.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" #include "ScopedLocalRef.h" #include "ScopedUtfChars.h" #include "ti-agent/common_helper.h" diff --git a/test/906-iterate-heap/iterate_heap.cc b/test/906-iterate-heap/iterate_heap.cc index 13c3562b60..f2532debfb 100644 --- a/test/906-iterate-heap/iterate_heap.cc +++ b/test/906-iterate-heap/iterate_heap.cc @@ -26,7 +26,7 @@ #include "android-base/stringprintf.h" #include "base/logging.h" #include "jni.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" #include "ScopedPrimitiveArray.h" #include "ti-agent/common_helper.h" #include "ti-agent/common_load.h" diff --git a/test/907-get-loaded-classes/get_loaded_classes.cc b/test/907-get-loaded-classes/get_loaded_classes.cc index 5bda7ebac8..48ce2e2de1 100644 --- a/test/907-get-loaded-classes/get_loaded_classes.cc +++ b/test/907-get-loaded-classes/get_loaded_classes.cc @@ -21,7 +21,7 @@ #include "base/macros.h" #include "jni.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" #include "ScopedLocalRef.h" #include "ScopedUtfChars.h" diff --git a/test/908-gc-start-finish/gc_callbacks.cc b/test/908-gc-start-finish/gc_callbacks.cc index 8f96ee63ef..45148f87cd 100644 --- a/test/908-gc-start-finish/gc_callbacks.cc +++ b/test/908-gc-start-finish/gc_callbacks.cc @@ -19,7 +19,7 @@ #include "base/macros.h" #include "jni.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" #include "ti-agent/common_helper.h" #include "ti-agent/common_load.h" diff --git a/test/909-attach-agent/attach.cc b/test/909-attach-agent/attach.cc index adae844ef0..67c756745f 100644 --- a/test/909-attach-agent/attach.cc +++ b/test/909-attach-agent/attach.cc @@ -20,7 +20,7 @@ #include <stdio.h> #include <string.h> #include "base/macros.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" namespace art { namespace Test909AttachAgent { diff --git a/test/910-methods/methods.cc b/test/910-methods/methods.cc index f60fabb1df..fdc4cdbe04 100644 --- a/test/910-methods/methods.cc +++ b/test/910-methods/methods.cc @@ -18,7 +18,7 @@ #include "base/macros.h" #include "jni.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" #include "ScopedLocalRef.h" #include "ti-agent/common_helper.h" diff --git a/test/911-get-stack-trace/stack_trace.cc b/test/911-get-stack-trace/stack_trace.cc index 68f6d8dfb2..5a3a311255 100644 --- a/test/911-get-stack-trace/stack_trace.cc +++ b/test/911-get-stack-trace/stack_trace.cc @@ -24,7 +24,7 @@ #include "base/logging.h" #include "base/macros.h" #include "jni.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" #include "ScopedLocalRef.h" #include "ti-agent/common_helper.h" #include "ti-agent/common_load.h" diff --git a/test/912-classes/classes.cc b/test/912-classes/classes.cc index 3ccfe86bed..5bd34f6be8 100644 --- a/test/912-classes/classes.cc +++ b/test/912-classes/classes.cc @@ -20,7 +20,7 @@ #include "class_linker.h" #include "jni.h" #include "mirror/class_loader.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" #include "runtime.h" #include "ScopedLocalRef.h" #include "ScopedUtfChars.h" diff --git a/test/913-heaps/heaps.cc b/test/913-heaps/heaps.cc index 39fa000195..66fc7bef9a 100644 --- a/test/913-heaps/heaps.cc +++ b/test/913-heaps/heaps.cc @@ -28,7 +28,7 @@ #include "jit/jit.h" #include "jni.h" #include "native_stack_dump.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" #include "runtime.h" #include "scoped_thread_state_change-inl.h" #include "thread-inl.h" diff --git a/test/918-fields/fields.cc b/test/918-fields/fields.cc index 7d29912f47..c659126aae 100644 --- a/test/918-fields/fields.cc +++ b/test/918-fields/fields.cc @@ -18,7 +18,7 @@ #include "base/macros.h" #include "jni.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" #include "ScopedLocalRef.h" #include "ti-agent/common_helper.h" diff --git a/test/920-objects/objects.cc b/test/920-objects/objects.cc index 0553a9d007..ad1431ed00 100644 --- a/test/920-objects/objects.cc +++ b/test/920-objects/objects.cc @@ -18,7 +18,7 @@ #include "base/macros.h" #include "jni.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" #include "ScopedLocalRef.h" #include "ti-agent/common_helper.h" diff --git a/test/922-properties/properties.cc b/test/922-properties/properties.cc index cb732c74f1..3fd274e9d6 100644 --- a/test/922-properties/properties.cc +++ b/test/922-properties/properties.cc @@ -18,7 +18,7 @@ #include "base/macros.h" #include "jni.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" #include "ScopedUtfChars.h" #include "ti-agent/common_helper.h" diff --git a/test/923-monitors/monitors.cc b/test/923-monitors/monitors.cc index 4baa530ec2..131fc6a4d4 100644 --- a/test/923-monitors/monitors.cc +++ b/test/923-monitors/monitors.cc @@ -18,7 +18,7 @@ #include "base/macros.h" #include "jni.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" #include "ScopedUtfChars.h" #include "ti-agent/common_helper.h" diff --git a/test/924-threads/threads.cc b/test/924-threads/threads.cc index 0380433d19..14ea5af60e 100644 --- a/test/924-threads/threads.cc +++ b/test/924-threads/threads.cc @@ -20,7 +20,7 @@ #include "base/macros.h" #include "base/logging.h" #include "jni.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" #include "ScopedLocalRef.h" #include "ti-agent/common_helper.h" diff --git a/test/925-threadgroups/threadgroups.cc b/test/925-threadgroups/threadgroups.cc index 6c6e835dd3..2feaab079b 100644 --- a/test/925-threadgroups/threadgroups.cc +++ b/test/925-threadgroups/threadgroups.cc @@ -20,7 +20,7 @@ #include "base/macros.h" #include "base/logging.h" #include "jni.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" #include "ScopedLocalRef.h" #include "ti-agent/common_helper.h" diff --git a/test/927-timers/timers.cc b/test/927-timers/timers.cc index 58d5c271e6..7b1d5c3f52 100644 --- a/test/927-timers/timers.cc +++ b/test/927-timers/timers.cc @@ -20,7 +20,7 @@ #include "base/logging.h" #include "base/macros.h" #include "jni.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" #include "ti-agent/common_helper.h" #include "ti-agent/common_load.h" diff --git a/test/928-jni-table/jni_table.cc b/test/928-jni-table/jni_table.cc index 5123d3a43f..b5c0efdd95 100644 --- a/test/928-jni-table/jni_table.cc +++ b/test/928-jni-table/jni_table.cc @@ -17,7 +17,7 @@ #include <stdio.h> #include "jni.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" #include "base/logging.h" #include "base/macros.h" diff --git a/test/929-search/search.cc b/test/929-search/search.cc index d1c698491e..ad7a05323b 100644 --- a/test/929-search/search.cc +++ b/test/929-search/search.cc @@ -20,7 +20,7 @@ #include "base/logging.h" #include "base/macros.h" #include "jni.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" #include "ScopedUtfChars.h" #include "ti-agent/common_helper.h" diff --git a/test/931-agent-thread/agent_thread.cc b/test/931-agent-thread/agent_thread.cc index a488d9a803..f8f9e48657 100644 --- a/test/931-agent-thread/agent_thread.cc +++ b/test/931-agent-thread/agent_thread.cc @@ -21,7 +21,7 @@ #include "base/logging.h" #include "base/macros.h" #include "jni.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" #include "runtime.h" #include "ScopedLocalRef.h" #include "thread-inl.h" diff --git a/test/933-misc-events/misc_events.cc b/test/933-misc-events/misc_events.cc index 860d4b5e16..7043350b5a 100644 --- a/test/933-misc-events/misc_events.cc +++ b/test/933-misc-events/misc_events.cc @@ -21,7 +21,7 @@ #include "base/logging.h" #include "base/macros.h" #include "jni.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" #include "ti-agent/common_helper.h" #include "ti-agent/common_load.h" diff --git a/test/936-search-onload/search_onload.cc b/test/936-search-onload/search_onload.cc index 2286a467d3..3b19ca591d 100644 --- a/test/936-search-onload/search_onload.cc +++ b/test/936-search-onload/search_onload.cc @@ -22,7 +22,7 @@ #include "base/logging.h" #include "base/macros.h" #include "jni.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" #include "ScopedUtfChars.h" #include "ti-agent/common_helper.h" diff --git a/test/944-transform-classloaders/classloader.cc b/test/944-transform-classloaders/classloader.cc index 5fbd8e11c9..7cb3c08dc3 100644 --- a/test/944-transform-classloaders/classloader.cc +++ b/test/944-transform-classloaders/classloader.cc @@ -16,8 +16,8 @@ #include "base/macros.h" #include "jni.h" +#include "jvmti.h" #include "mirror/class-inl.h" -#include "openjdkjvmti/jvmti.h" #include "ScopedLocalRef.h" #include "ti-agent/common_helper.h" diff --git a/test/945-obsolete-native/obsolete_native.cc b/test/945-obsolete-native/obsolete_native.cc index 061e7afbbc..442836b7ff 100644 --- a/test/945-obsolete-native/obsolete_native.cc +++ b/test/945-obsolete-native/obsolete_native.cc @@ -24,7 +24,7 @@ #include "base/logging.h" #include "base/macros.h" #include "jni.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" #include "ScopedLocalRef.h" #include "ti-agent/common_helper.h" #include "ti-agent/common_load.h" diff --git a/test/958-methodhandle-emulated-stackframe/build b/test/958-methodhandle-stackframe/build index a423ca6b4e..a423ca6b4e 100755 --- a/test/958-methodhandle-emulated-stackframe/build +++ b/test/958-methodhandle-stackframe/build diff --git a/test/958-methodhandle-emulated-stackframe/expected.txt b/test/958-methodhandle-stackframe/expected.txt index 5f3825962d..5f3825962d 100644 --- a/test/958-methodhandle-emulated-stackframe/expected.txt +++ b/test/958-methodhandle-stackframe/expected.txt diff --git a/test/958-methodhandle-emulated-stackframe/info.txt b/test/958-methodhandle-stackframe/info.txt index bec2324e76..bec2324e76 100644 --- a/test/958-methodhandle-emulated-stackframe/info.txt +++ b/test/958-methodhandle-stackframe/info.txt diff --git a/test/958-methodhandle-emulated-stackframe/src/Main.java b/test/958-methodhandle-stackframe/src/Main.java index f739d47d08..f739d47d08 100644 --- a/test/958-methodhandle-emulated-stackframe/src/Main.java +++ b/test/958-methodhandle-stackframe/src/Main.java diff --git a/test/961-default-iface-resolution-gen/build b/test/961-default-iface-resolution-gen/build index ccebbe4ac9..2f7e3ba553 100755 --- a/test/961-default-iface-resolution-gen/build +++ b/test/961-default-iface-resolution-gen/build @@ -17,15 +17,6 @@ # make us exit on a failure set -e -# We will be making more files than the ulimit is set to allow. Remove it temporarily. -OLD_ULIMIT=`ulimit -S` -ulimit -S unlimited - -restore_ulimit() { - ulimit -S "$OLD_ULIMIT" -} -trap 'restore_ulimit' ERR - if [[ $@ != *"--jvm"* ]]; then # Don't do anything with jvm # Hard-wired use of experimental jack. @@ -39,6 +30,3 @@ mkdir -p ./src ./util-src/generate_java.py ./src ./expected.txt ./default-build "$@" --experimental default-methods - -# Reset the ulimit back to its initial value -restore_ulimit diff --git a/test/964-default-iface-init-gen/build b/test/964-default-iface-init-gen/build index ccebbe4ac9..2f7e3ba553 100755 --- a/test/964-default-iface-init-gen/build +++ b/test/964-default-iface-init-gen/build @@ -17,15 +17,6 @@ # make us exit on a failure set -e -# We will be making more files than the ulimit is set to allow. Remove it temporarily. -OLD_ULIMIT=`ulimit -S` -ulimit -S unlimited - -restore_ulimit() { - ulimit -S "$OLD_ULIMIT" -} -trap 'restore_ulimit' ERR - if [[ $@ != *"--jvm"* ]]; then # Don't do anything with jvm # Hard-wired use of experimental jack. @@ -39,6 +30,3 @@ mkdir -p ./src ./util-src/generate_java.py ./src ./expected.txt ./default-build "$@" --experimental default-methods - -# Reset the ulimit back to its initial value -restore_ulimit diff --git a/test/968-default-partial-compile-gen/build b/test/968-default-partial-compile-gen/build index 1e9f8aadd5..00ccb89faf 100755 --- a/test/968-default-partial-compile-gen/build +++ b/test/968-default-partial-compile-gen/build @@ -17,15 +17,6 @@ # make us exit on a failure set -e -# We will be making more files than the ulimit is set to allow. Remove it temporarily. -OLD_ULIMIT=`ulimit -S` -ulimit -S unlimited - -restore_ulimit() { - ulimit -S "$OLD_ULIMIT" -} -trap 'restore_ulimit' ERR - # TODO: Support running with jack. if [[ $@ == *"--jvm"* ]]; then @@ -45,6 +36,3 @@ else # Use the default build script ./default-build "$@" "$EXTRA_ARGS" --experimental default-methods fi - -# Reset the ulimit back to its initial value -restore_ulimit diff --git a/test/970-iface-super-resolution-gen/build b/test/970-iface-super-resolution-gen/build index fd1b271c1c..7217fac601 100755 --- a/test/970-iface-super-resolution-gen/build +++ b/test/970-iface-super-resolution-gen/build @@ -17,15 +17,6 @@ # make us exit on a failure set -e -# We will be making more files than the ulimit is set to allow. Remove it temporarily. -OLD_ULIMIT=`ulimit -S` -ulimit -S unlimited - -restore_ulimit() { - ulimit -S "$OLD_ULIMIT" -} -trap 'restore_ulimit' ERR - # Should we compile with Java source code. By default we will use Smali. USES_JAVA_SOURCE="false" if [[ $@ == *"--jvm"* ]]; then @@ -50,6 +41,3 @@ else fi ./default-build "$@" --experimental default-methods - -# Reset the ulimit back to its initial value -restore_ulimit diff --git a/test/971-iface-super/build b/test/971-iface-super/build index 1e9f8aadd5..00ccb89faf 100755 --- a/test/971-iface-super/build +++ b/test/971-iface-super/build @@ -17,15 +17,6 @@ # make us exit on a failure set -e -# We will be making more files than the ulimit is set to allow. Remove it temporarily. -OLD_ULIMIT=`ulimit -S` -ulimit -S unlimited - -restore_ulimit() { - ulimit -S "$OLD_ULIMIT" -} -trap 'restore_ulimit' ERR - # TODO: Support running with jack. if [[ $@ == *"--jvm"* ]]; then @@ -45,6 +36,3 @@ else # Use the default build script ./default-build "$@" "$EXTRA_ARGS" --experimental default-methods fi - -# Reset the ulimit back to its initial value -restore_ulimit diff --git a/test/980-redefine-object/check b/test/980-redefine-object/check new file mode 100755 index 0000000000..987066fe15 --- /dev/null +++ b/test/980-redefine-object/check @@ -0,0 +1,20 @@ +#!/bin/bash +# +# Copyright (C) 2014 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The number of paused background threads (and therefore InterruptedExceptions) +# can change so we will just delete their lines from the log. + +sed "/Object allocated of type 'Ljava\/lang\/InterruptedException;'/d" "$2" | diff --strip-trailing-cr -q "$1" - >/dev/null diff --git a/test/980-redefine-object/expected.txt b/test/980-redefine-object/expected.txt new file mode 100644 index 0000000000..6e9bce027a --- /dev/null +++ b/test/980-redefine-object/expected.txt @@ -0,0 +1,52 @@ + Initializing and loading the TestWatcher class that will (eventually) be notified of object allocations + Allocating an j.l.Object before redefining Object class + Allocating a Transform before redefining Object class + Redefining the Object class to add a hook into the <init> method +Object allocated of type 'Ljava/lang/StringBuilder;' +Object allocated of type 'Ljava/nio/HeapCharBuffer;' + Allocating an j.l.Object after redefining Object class +Object allocated of type 'Ljava/lang/Object;' +Object allocated of type 'Ljava/lang/StringBuilder;' +Object allocated of type 'Ljava/nio/HeapCharBuffer;' + Allocating a Transform after redefining Object class +Object allocated of type 'LTransform;' +Object allocated of type 'Ljava/lang/StringBuilder;' +Object allocated of type 'Ljava/nio/HeapCharBuffer;' + Allocating an int[] after redefining Object class +Object allocated of type 'Ljava/lang/StringBuilder;' +Object allocated of type 'Ljava/nio/HeapCharBuffer;' + Allocating an array list +Object allocated of type 'Ljava/util/ArrayList;' +Object allocated of type 'Ljava/lang/StringBuilder;' +Object allocated of type 'Ljava/nio/HeapCharBuffer;' + Adding a bunch of stuff to the array list +Object allocated of type 'Ljava/lang/Object;' +Object allocated of type 'Ljava/lang/Object;' +Object allocated of type 'LTransform;' +Object allocated of type 'Ljava/lang/StringBuilder;' +Object allocated of type 'Ljava/nio/HeapCharBuffer;' + Allocating a linked list +Object allocated of type 'Ljava/util/LinkedList;' +Object allocated of type 'Ljava/lang/StringBuilder;' +Object allocated of type 'Ljava/nio/HeapCharBuffer;' + Adding a bunch of stuff to the linked list +Object allocated of type 'Ljava/lang/Object;' +Object allocated of type 'Ljava/util/LinkedList$Node;' +Object allocated of type 'Ljava/lang/Object;' +Object allocated of type 'Ljava/util/LinkedList$Node;' +Object allocated of type 'Ljava/util/LinkedList$Node;' +Object allocated of type 'Ljava/util/LinkedList$Node;' +Object allocated of type 'Ljava/util/LinkedList$Node;' +Object allocated of type 'Ljava/util/LinkedList$Node;' +Object allocated of type 'LTransform;' +Object allocated of type 'Ljava/util/LinkedList$Node;' +Object allocated of type 'Ljava/lang/StringBuilder;' +Object allocated of type 'Ljava/nio/HeapCharBuffer;' + Throwing from down 4 stack frames +Object allocated of type 'Ljava/lang/Exception;' +Object allocated of type 'Ljava/lang/StringBuilder;' +Object allocated of type 'Ljava/nio/HeapCharBuffer;' + Exception caught. +Object allocated of type 'Ljava/lang/StringBuilder;' +Object allocated of type 'Ljava/nio/HeapCharBuffer;' + Finishing test! diff --git a/test/980-redefine-object/info.txt b/test/980-redefine-object/info.txt new file mode 100644 index 0000000000..f3e01b596d --- /dev/null +++ b/test/980-redefine-object/info.txt @@ -0,0 +1,23 @@ +Tests basic functions in the jvmti plugin. + +This tests that we are able to redefine methods/constructors on the +java.lang.Object class at runtime. + +This also (indirectly) tests that we correctly handle reading annotations on +obsolete methods. This is something that is not normally done since there is no +way to get a reference to an obsolete method outside of the runtime but some +annotations on the Object class are read by the runtime directly. + +NB This test cannot be run on the RI at the moment. + +If this test starts failing during the doCommonClassRedefinition call it is +possible that the definition of Object contained in the base64 DEX_BYTES array +has become stale and will need to be recreated. The only difference from the +normal Object dex bytes is that (a) it contains only the bytes of the Object +class itself, and (b) it adds an +'invoke-static {p0}, Ljava/lang/Object;->NotifyConstructed(Ljava/lang/Object;)V' +to the <init> function. + +It is also possible it could fail due to the pattern of allocations caused by +doing string concatenation or printing changing. In this case you should simply +update the expected.txt file. diff --git a/test/980-redefine-object/redefine_object.cc b/test/980-redefine-object/redefine_object.cc new file mode 100644 index 0000000000..daae08792a --- /dev/null +++ b/test/980-redefine-object/redefine_object.cc @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <inttypes.h> +#include <iostream> + +#include "android-base/stringprintf.h" +#include "base/logging.h" +#include "base/macros.h" +#include "jni.h" +#include "jvmti.h" +#include "ScopedUtfChars.h" + +#include "ti-agent/common_helper.h" +#include "ti-agent/common_load.h" + +namespace art { +namespace Test980RedefineObjects { + +extern "C" JNIEXPORT void JNICALL Java_Main_bindFunctionsForClass( + JNIEnv* env, jclass Main_klass ATTRIBUTE_UNUSED, jclass target) { + BindFunctionsOnClass(jvmti_env, env, target); +} + +extern "C" JNIEXPORT void JNICALL Java_art_test_TestWatcher_NotifyConstructed( + JNIEnv* env, jclass TestWatcherClass ATTRIBUTE_UNUSED, jobject constructed) { + char* sig = nullptr; + char* generic_sig = nullptr; + if (JvmtiErrorToException(env, jvmti_env->GetClassSignature(env->GetObjectClass(constructed), + &sig, + &generic_sig))) { + // Exception. + return; + } + std::cout << "Object allocated of type '" << sig << "'" << std::endl; + jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(sig)); + jvmti_env->Deallocate(reinterpret_cast<unsigned char*>(generic_sig)); +} + +} // namespace Test980RedefineObjects +} // namespace art diff --git a/test/980-redefine-object/run b/test/980-redefine-object/run new file mode 100755 index 0000000000..c6e62ae6cd --- /dev/null +++ b/test/980-redefine-object/run @@ -0,0 +1,17 @@ +#!/bin/bash +# +# Copyright 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +./default-run "$@" --jvmti diff --git a/test/980-redefine-object/src-ex/TestWatcher.java b/test/980-redefine-object/src-ex/TestWatcher.java new file mode 100644 index 0000000000..d15e68871c --- /dev/null +++ b/test/980-redefine-object/src-ex/TestWatcher.java @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package art.test; + +public class TestWatcher { + // NB This function is native since it is called in the Object.<init> method and so cannot cause + // any java allocations at all. The normal System.out.print* functions will cause allocations to + // occur so we cannot use them. This means the easiest way to report the object as being created + // is to go into native code and do it there. + public static native void NotifyConstructed(Object o); +} diff --git a/test/980-redefine-object/src/Main.java b/test/980-redefine-object/src/Main.java new file mode 100644 index 0000000000..348951c4ba --- /dev/null +++ b/test/980-redefine-object/src/Main.java @@ -0,0 +1,390 @@ +/* + * Copyright (C) 2017 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.util.ArrayList; +import java.util.Base64; +import java.util.LinkedList; + +public class Main { + + // TODO We should make this run on the RI. + /** + * This test cannot be run on the RI. + */ + private static final byte[] CLASS_BYTES = new byte[0]; + + // TODO It might be a good idea to replace this hard-coded Object definition with a + // retransformation based test. + /** + * Base64 encoding of the following smali file. + * + * .class public Ljava/lang/Object; + * .source "Object.java" + * # instance fields + * .field private transient shadow$_klass_:Ljava/lang/Class; + * .annotation system Ldalvik/annotation/Signature; + * value = { + * "Ljava/lang/Class", + * "<*>;" + * } + * .end annotation + * .end field + * + * .field private transient shadow$_monitor_:I + * # direct methods + * .method public constructor <init>()V + * .registers 1 + * .prologue + * invoke-static {p0}, Lart/test/TestWatcher;->NotifyConstructed(Ljava/lang/Object;)V + * return-void + * .end method + * + * .method static identityHashCode(Ljava/lang/Object;)I + * .registers 7 + * .prologue + * iget v0, p0, Ljava/lang/Object;->shadow$_monitor_:I + * const/high16 v3, -0x40000000 # -2.0f + * const/high16 v2, -0x80000000 + * const v1, 0xfffffff + * const/high16 v4, -0x40000000 # -2.0f + * and-int/2addr v4, v0 + * const/high16 v5, -0x80000000 + * if-ne v4, v5, :cond_15 + * const v4, 0xfffffff + * and-int/2addr v4, v0 + * return v4 + * :cond_15 + * invoke-static {p0}, Ljava/lang/Object;->identityHashCodeNative(Ljava/lang/Object;)I + * move-result v4 + * return v4 + * .end method + * + * .method private static native identityHashCodeNative(Ljava/lang/Object;)I + * .annotation build Ldalvik/annotation/optimization/FastNative; + * .end annotation + * .end method + * + * .method private native internalClone()Ljava/lang/Object; + * .annotation build Ldalvik/annotation/optimization/FastNative; + * .end annotation + * .end method + * + * + * # virtual methods + * .method protected clone()Ljava/lang/Object; + * .registers 4 + * .annotation system Ldalvik/annotation/Throws; + * value = { + * Ljava/lang/CloneNotSupportedException; + * } + * .end annotation + * + * .prologue + * instance-of v0, p0, Ljava/lang/Cloneable; + * if-nez v0, :cond_2d + * new-instance v0, Ljava/lang/CloneNotSupportedException; + * new-instance v1, Ljava/lang/StringBuilder; + * invoke-direct {v1}, Ljava/lang/StringBuilder;-><init>()V + * const-string/jumbo v2, "Class " + * invoke-virtual {v1, v2}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder; + * move-result-object v1 + * invoke-virtual {p0}, Ljava/lang/Object;->getClass()Ljava/lang/Class; + * move-result-object v2 + * invoke-virtual {v2}, Ljava/lang/Class;->getName()Ljava/lang/String; + * move-result-object v2 + * invoke-virtual {v1, v2}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder; + * move-result-object v1 + * const-string/jumbo v2, " doesn\'t implement Cloneable" + * invoke-virtual {v1, v2}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder; + * move-result-object v1 + * invoke-virtual {v1}, Ljava/lang/StringBuilder;->toString()Ljava/lang/String; + * move-result-object v1 + * invoke-direct {v0, v1}, Ljava/lang/CloneNotSupportedException;-><init>(Ljava/lang/String;)V + * throw v0 + * :cond_2d + * invoke-direct {p0}, Ljava/lang/Object;->internalClone()Ljava/lang/Object; + * move-result-object v0 + * return-object v0 + * .end method + * + * .method public equals(Ljava/lang/Object;)Z + * .registers 3 + * .prologue + * if-ne p0, p1, :cond_4 + * const/4 v0, 0x1 + * :goto_3 + * return v0 + * :cond_4 + * const/4 v0, 0x0 + * goto :goto_3 + * .end method + * + * .method protected finalize()V + * .registers 1 + * .annotation system Ldalvik/annotation/Throws; + * value = { + * Ljava/lang/Throwable; + * } + * .end annotation + * .prologue + * return-void + * .end method + * + * .method public final getClass()Ljava/lang/Class; + * .registers 2 + * .annotation system Ldalvik/annotation/Signature; + * value = { + * "()", + * "Ljava/lang/Class", + * "<*>;" + * } + * .end annotation + * .prologue + * iget-object v0, p0, Ljava/lang/Object;->shadow$_klass_:Ljava/lang/Class; + * return-object v0 + * .end method + * + * .method public hashCode()I + * .registers 2 + * .prologue + * invoke-static {p0}, Ljava/lang/Object;->identityHashCode(Ljava/lang/Object;)I + * move-result v0 + * return v0 + * .end method + * + * .method public final native notify()V + * .annotation build Ldalvik/annotation/optimization/FastNative; + * .end annotation + * .end method + * + * .method public final native notifyAll()V + * .annotation build Ldalvik/annotation/optimization/FastNative; + * .end annotation + * .end method + * + * .method public toString()Ljava/lang/String; + * .registers 3 + * .prologue + * new-instance v0, Ljava/lang/StringBuilder; + * invoke-direct {v0}, Ljava/lang/StringBuilder;-><init>()V + * invoke-virtual {p0}, Ljava/lang/Object;->getClass()Ljava/lang/Class; + * move-result-object v1 + * invoke-virtual {v1}, Ljava/lang/Class;->getName()Ljava/lang/String; + * move-result-object v1 + * invoke-virtual {v0, v1}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder; + * move-result-object v0 + * const-string/jumbo v1, "@" + * invoke-virtual {v0, v1}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder; + * move-result-object v0 + * invoke-virtual {p0}, Ljava/lang/Object;->hashCode()I + * move-result v1 + * invoke-static {v1}, Ljava/lang/Integer;->toHexString(I)Ljava/lang/String; + * move-result-object v1 + * invoke-virtual {v0, v1}, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder; + * move-result-object v0 + * invoke-virtual {v0}, Ljava/lang/StringBuilder;->toString()Ljava/lang/String; + * move-result-object v0 + * return-object v0 + * .end method + * + * .method public final native wait()V + * .annotation system Ldalvik/annotation/Throws; + * value = { + * Ljava/lang/InterruptedException; + * } + * .end annotation + * + * .annotation build Ldalvik/annotation/optimization/FastNative; + * .end annotation + * .end method + * + * .method public final wait(J)V + * .registers 4 + * .annotation system Ldalvik/annotation/Throws; + * value = { + * Ljava/lang/InterruptedException; + * } + * .end annotation + * .prologue + * const/4 v0, 0x0 + * invoke-virtual {p0, p1, p2, v0}, Ljava/lang/Object;->wait(JI)V + * return-void + * .end method + * + * .method public final native wait(JI)V + * .annotation system Ldalvik/annotation/Throws; + * value = { + * Ljava/lang/InterruptedException; + * } + * .end annotation + * + * .annotation build Ldalvik/annotation/optimization/FastNative; + * .end annotation + * .end method + */ + private static final byte[] DEX_BYTES = Base64.getDecoder().decode( + "ZGV4CjAzNQDUlMR9j03MYuOKekKs2p7zJzu2IfDb7RlMCgAAcAAAAHhWNBIAAAAAAAAAAIgJAAA6" + + "AAAAcAAAABEAAABYAQAADQAAAJwBAAACAAAAOAIAABYAAABIAgAAAQAAAPgCAAA0BwAAGAMAABgD" + + "AAA2AwAAOgMAAEADAABIAwAASwMAAFMDAABWAwAAWgMAAF0DAABgAwAAZAMAAGgDAACAAwAAnwMA" + + "ALsDAADoAwAA+gMAAA0EAAA1BAAATAQAAGEEAACDBAAAlwQAAKsEAADGBAAA3QQAAPAEAAD9BAAA" + + "AAUAAAQFAAAJBQAADQUAABAFAAAUBQAAHAUAACMFAAArBQAANQUAAD8FAABIBQAAUgUAAGQFAAB8" + + "BQAAiwUAAJUFAACnBQAAugUAAM0FAADVBQAA3QUAAOgFAADtBQAA/QUAAA8GAAAcBgAAJgYAAC0G" + + "AAAGAAAACAAAAAwAAAANAAAADgAAAA8AAAARAAAAEgAAABMAAAAUAAAAFQAAABYAAAAXAAAAGAAA" + + "ABkAAAAcAAAAIAAAAAYAAAAAAAAAAAAAAAcAAAAAAAAAPAYAAAkAAAAGAAAAAAAAAAkAAAALAAAA" + + "AAAAAAkAAAAMAAAAAAAAAAoAAAAMAAAARAYAAAsAAAANAAAAVAYAABwAAAAPAAAAAAAAAB0AAAAP" + + "AAAATAYAAB4AAAAPAAAANAYAAB8AAAAPAAAAPAYAAB8AAAAPAAAAVAYAACEAAAAQAAAAPAYAAAsA" + + "BgA0AAAACwAAADUAAAACAAoAGgAAAAYABAAnAAAABwALAAMAAAAJAAUANgAAAAsABwADAAAACwAD" + + "ACMAAAALAAwAJAAAAAsABwAlAAAACwACACYAAAALAAAAKAAAAAsAAQApAAAACwABACoAAAALAAMA" + + "KwAAAAsABwAxAAAACwAHADIAAAALAAQANwAAAAsABwA5AAAACwAIADkAAAALAAkAOQAAAA0ABwAD" + + "AAAADQAGACIAAAANAAQANwAAAAsAAAABAAAA/////wAAAAAbAAAA0AYAAD4JAAAAAAAAHCBkb2Vz" + + "bid0IGltcGxlbWVudCBDbG9uZWFibGUAAigpAAQ8Kj47AAY8aW5pdD4AAUAABkNsYXNzIAABSQAC" + + "SUwAAUoAAUwAAkxJAAJMTAAWTGFydC90ZXN0L1Rlc3RXYXRjaGVyOwAdTGRhbHZpay9hbm5vdGF0" + + "aW9uL1NpZ25hdHVyZTsAGkxkYWx2aWsvYW5ub3RhdGlvbi9UaHJvd3M7ACtMZGFsdmlrL2Fubm90" + + "YXRpb24vb3B0aW1pemF0aW9uL0Zhc3ROYXRpdmU7ABBMamF2YS9sYW5nL0NsYXNzABFMamF2YS9s" + + "YW5nL0NsYXNzOwAmTGphdmEvbGFuZy9DbG9uZU5vdFN1cHBvcnRlZEV4Y2VwdGlvbjsAFUxqYXZh" + + "L2xhbmcvQ2xvbmVhYmxlOwATTGphdmEvbGFuZy9JbnRlZ2VyOwAgTGphdmEvbGFuZy9JbnRlcnJ1" + + "cHRlZEV4Y2VwdGlvbjsAEkxqYXZhL2xhbmcvT2JqZWN0OwASTGphdmEvbGFuZy9TdHJpbmc7ABlM" + + "amF2YS9sYW5nL1N0cmluZ0J1aWxkZXI7ABVMamF2YS9sYW5nL1Rocm93YWJsZTsAEU5vdGlmeUNv" + + "bnN0cnVjdGVkAAtPYmplY3QuamF2YQABVgACVkoAA1ZKSQACVkwAAVoAAlpMAAZhcHBlbmQABWNs" + + "b25lAAZlcXVhbHMACGZpbmFsaXplAAhnZXRDbGFzcwAHZ2V0TmFtZQAIaGFzaENvZGUAEGlkZW50" + + "aXR5SGFzaENvZGUAFmlkZW50aXR5SGFzaENvZGVOYXRpdmUADWludGVybmFsQ2xvbmUACGxvY2tX" + + "b3JkABBsb2NrV29yZEhhc2hNYXNrABFsb2NrV29yZFN0YXRlSGFzaAARbG9ja1dvcmRTdGF0ZU1h" + + "c2sABm1pbGxpcwAGbm90aWZ5AAlub3RpZnlBbGwAA29iagAOc2hhZG93JF9rbGFzc18AEHNoYWRv" + + "dyRfbW9uaXRvcl8AC3RvSGV4U3RyaW5nAAh0b1N0cmluZwAFdmFsdWUABHdhaXQAAAIAAAABAAAA" + + "AQAAAAsAAAABAAAAAAAAAAEAAAABAAAAAQAAAAwAAgQBOBwBGAcCBAE4HAEYCgIDATgcAhcQFwIC" + + "BAE4HAEYDgAFAAIDATgcAxcBFxAXAgAAAAAAAAAAAAEAAABaBgAAAgAAAGIGAAB8BgAAAQAAAGIG" + + "AAABAAAAagYAAAEAAAB0BgAAAQAAAHwGAAABAAAAfwYAAAAAAAABAAAACgAAAAAAAAAAAAAAsAYA" + + "AAUAAACUBgAABwAAALgGAAAIAAAAyAYAAAsAAADABgAADAAAAMAGAAANAAAAwAYAAA4AAADABgAA" + + "EAAAAJwGAAARAAAAqAYAABIAAACcBgAAKAAHDgBwATQHDi0DAC0BLQMDMAEtAwIvATwDAS4BeFsA" + + "7AEABw5LARoPOsYArAEBNAcOAMUEAAcOAEEABw4AaAAHDgCRAgAHDgCmAwExBw5LAAAAAQABAAEA" + + "AAA4BwAABAAAAHEQAAAAAA4ABwABAAEAAAA9BwAAGgAAAFJgAQAVAwDAFQIAgBQB////DxUEAMC1" + + "BBUFAIAzVAcAFAT///8PtQQPBHEQCwAGAAoEDwQEAAEAAgAAAFkHAAAyAAAAIDAIADkAKwAiAAcA" + + "IgENAHAQEwABABsCBQAAAG4gFAAhAAwBbhAIAAMADAJuEAEAAgAMAm4gFAAhAAwBGwIAAAAAbiAU" + + "ACEADAFuEBUAAQAMAXAgAgAQACcAcBAMAAMADAARAAMAAgAAAAAAZQcAAAYAAAAzIQQAEhAPABIA" + + "KP4BAAEAAAAAAGwHAAABAAAADgAAAAIAAQAAAAAAcgcAAAMAAABUEAAAEQAAAAIAAQABAAAAdwcA" + + "AAUAAABxEAoAAQAKAA8AAAADAAEAAgAAAHwHAAApAAAAIgANAHAQEwAAAG4QCAACAAwBbhABAAEA" + + "DAFuIBQAEAAMABsBBAAAAG4gFAAQAAwAbhAJAAIACgFxEAMAAQAMAW4gFAAQAAwAbhAVAAAADAAR" + + "AAAABAADAAQAAACCBwAABQAAABIAbkASACEDDgAAAgQLAIIBAYIBBIGABIwPBgikDwGKAgABggIA" + + "BQToDwEB3BABBPgQARGMEQEBpBEEkQIAAZECAAEBwBEBkQIAARGkEgGRAgAAABAAAAAAAAAAAQAA" + + "AAAAAAABAAAAOgAAAHAAAAACAAAAEQAAAFgBAAADAAAADQAAAJwBAAAEAAAAAgAAADgCAAAFAAAA" + + "FgAAAEgCAAAGAAAAAQAAAPgCAAACIAAAOgAAABgDAAABEAAABQAAADQGAAAEIAAABgAAAFoGAAAD" + + "EAAACQAAAIwGAAAGIAAAAQAAANAGAAADIAAACQAAADgHAAABIAAACQAAAIwHAAAAIAAAAQAAAD4J" + + "AAAAEAAAAQAAAIgJAAA="); + + private static final String LISTENER_LOCATION = + System.getenv("DEX_LOCATION") + "/980-redefine-object-ex.jar"; + + public static void main(String[] args) { + doTest(); + } + + private static void ensureTestWatcherInitialized() { + try { + // Make sure the TestWatcher class can be found from the Object <init> function. + addToBootClassLoader(LISTENER_LOCATION); + // Load TestWatcher from the bootclassloader and make sure it is initialized. + Class<?> testwatcher_class = Class.forName("art.test.TestWatcher", true, null); + // Bind the native functions of testwatcher_class. + bindFunctionsForClass(testwatcher_class); + } catch (Exception e) { + throw new Error("Exception while making testwatcher", e); + } + } + + // NB This function will cause 2 objects of type "Ljava/nio/HeapCharBuffer;" and + // "Ljava/nio/HeapCharBuffer;" to be allocated each time it is called. + private static void safePrintln(Object o) { + System.out.flush(); + System.out.print("\t" + o + "\n"); + System.out.flush(); + } + + private static void throwFrom(int depth) throws Exception { + if (depth <= 0) { + throw new Exception("Throwing the exception"); + } else { + throwFrom(depth - 1); + } + } + + public static void doTest() { + safePrintln("Initializing and loading the TestWatcher class that will (eventually) be " + + "notified of object allocations"); + // Make sure the TestWatcher class is initialized before we do anything else. + ensureTestWatcherInitialized(); + safePrintln("Allocating an j.l.Object before redefining Object class"); + // Make sure these aren't shown. + Object o = new Object(); + safePrintln("Allocating a Transform before redefining Object class"); + Transform t = new Transform(); + + // Redefine the Object Class. + safePrintln("Redefining the Object class to add a hook into the <init> method"); + doCommonClassRedefinition(Object.class, CLASS_BYTES, DEX_BYTES); + + safePrintln("Allocating an j.l.Object after redefining Object class"); + Object o2 = new Object(); + safePrintln("Allocating a Transform after redefining Object class"); + Transform t2 = new Transform(); + + // This shouldn't cause the Object constructor to be run. + safePrintln("Allocating an int[] after redefining Object class"); + int[] abc = new int[12]; + + // Try adding stuff to an array list. + safePrintln("Allocating an array list"); + ArrayList<Object> al = new ArrayList<>(); + safePrintln("Adding a bunch of stuff to the array list"); + al.add(new Object()); + al.add(new Object()); + al.add(o2); + al.add(o); + al.add(t); + al.add(t2); + al.add(new Transform()); + + // Try adding stuff to a LinkedList + safePrintln("Allocating a linked list"); + LinkedList<Object> ll = new LinkedList<>(); + safePrintln("Adding a bunch of stuff to the linked list"); + ll.add(new Object()); + ll.add(new Object()); + ll.add(o2); + ll.add(o); + ll.add(t); + ll.add(t2); + ll.add(new Transform()); + + // Try making an exception. + safePrintln("Throwing from down 4 stack frames"); + try { + throwFrom(4); + } catch (Exception e) { + safePrintln("Exception caught."); + } + + safePrintln("Finishing test!"); + } + + private static native void addToBootClassLoader(String s); + + private static native void bindFunctionsForClass(Class<?> target); + + // Transforms the class + private static native void doCommonClassRedefinition(Class<?> target, + byte[] class_file, + byte[] dex_file); +} diff --git a/test/980-redefine-object/src/Transform.java b/test/980-redefine-object/src/Transform.java new file mode 100644 index 0000000000..23f67d96c7 --- /dev/null +++ b/test/980-redefine-object/src/Transform.java @@ -0,0 +1,17 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +class Transform { } diff --git a/test/Android.bp b/test/Android.bp index 00c890a834..4ebfd7429a 100644 --- a/test/Android.bp +++ b/test/Android.bp @@ -275,10 +275,12 @@ art_cc_defaults { "936-search-onload/search_onload.cc", "944-transform-classloaders/classloader.cc", "945-obsolete-native/obsolete_native.cc", + "980-redefine-object/redefine_object.cc", ], shared_libs: [ "libbase", ], + header_libs: ["libopenjdkjvmti_headers"], } art_cc_test_library { @@ -335,6 +337,7 @@ cc_defaults { "596-monitor-inflation/monitor_inflation.cc", "597-deopt-new-string/deopt.cc", "626-const-class-linking/clear_dex_cache_types.cc", + "642-fp-callees/fp_callees.cc", ], shared_libs: [ "libbacktrace", diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk index 01eb14eda2..2b57de679c 100644 --- a/test/Android.run-test.mk +++ b/test/Android.run-test.mk @@ -229,12 +229,14 @@ endef # name-to-var # they are rewritten. These tests use a broken class loader that tries to # register a dex file that's already registered with a different loader. # b/34193123 +# Disable 638-checker-inline-caches until b/36371709 is fixed. ART_TEST_RUN_TEST_SKIP += \ 115-native-bridge \ 153-reference-stress \ 080-oom-fragmentation \ 497-inlining-and-class-loader \ - 542-unresolved-access-check + 542-unresolved-access-check \ + 638-checker-inline-caches ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \ $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \ @@ -353,8 +355,13 @@ TEST_ART_BROKEN_NO_RELOCATE_TESTS := # Temporarily disable some broken tests when forcing access checks in interpreter b/22414682 # 629 requires compilation. +# 030, 080 and 530: b/36377828 TEST_ART_BROKEN_INTERPRETER_ACCESS_CHECK_TESTS := \ 137-cfi \ + 030-bad-finalizer \ + 530-checker-lse \ + 530-checker-lse2 \ + 080-oom-throw \ 629-vdex-speed ifneq (,$(filter interp-ac,$(COMPILER_TYPES))) @@ -381,7 +388,7 @@ TEST_ART_BROKEN_GCSTRESS_RUN_TESTS := \ 908-gc-start-finish \ 913-heaps \ 961-default-iface-resolution-gen \ - 964-default-iface-init-gen + 964-default-iface-init-gen \ ifneq (,$(filter gcstress,$(GC_TYPES))) ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \ @@ -432,14 +439,27 @@ TEST_ART_BROKEN_FALLBACK_RUN_TESTS := \ 138-duplicate-classes-check2 \ 147-stripped-dex-fallback \ 554-jit-profile-file \ + 616-cha \ + 616-cha-abstract \ + 912-classes \ 629-vdex-speed # This test fails without an image. # 018, 961, 964, 968 often time out. b/34369284 +# 508: b/36365552 +# 597: b/36467228 TEST_ART_BROKEN_NO_IMAGE_RUN_TESTS := \ 137-cfi \ 138-duplicate-classes-check \ 018-stack-overflow \ + 476-clinit-inline-static-invoke \ + 496-checker-inlining-class-loader \ + 508-referrer-method \ + 597-deopt-new-string \ + 637-checker-throw-inline \ + 616-cha \ + 616-cha-abstract \ + 912-classes \ 961-default-iface-resolution-gen \ 964-default-iface-init \ 968-default-partial-compile-gen \ diff --git a/test/VerifierDeps/Iface.smali b/test/VerifierDeps/Iface.smali new file mode 100644 index 0000000000..8607307093 --- /dev/null +++ b/test/VerifierDeps/Iface.smali @@ -0,0 +1,18 @@ +# /* +# * Copyright (C) 2017 The Android Open Source Project +# * +# * Licensed under the Apache License, Version 2.0 (the "License"); +# * you may not use this file except in compliance with the License. +# * You may obtain a copy of the License at +# * +# * http://www.apache.org/licenses/LICENSE-2.0 +# * +# * Unless required by applicable law or agreed to in writing, software +# * distributed under the License is distributed on an "AS IS" BASIS, +# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# * See the License for the specific language governing permissions and +# * limitations under the License. +# */ + +.class public abstract interface LIface; +.super Ljava/lang/Object; diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar index e4e571cbcf..808e58a7bd 100755 --- a/test/etc/run-test-jar +++ b/test/etc/run-test-jar @@ -444,27 +444,11 @@ fi JNI_OPTS="-Xjnigreflimit:512 -Xcheck:jni" +COMPILE_FLAGS="${COMPILE_FLAGS} --runtime-arg -Xnorelocate" if [ "$RELOCATE" = "y" ]; then - COMPILE_FLAGS="${COMPILE_FLAGS} --runtime-arg -Xnorelocate" FLAGS="${FLAGS} -Xrelocate" - if [ "$HOST" = "y" ]; then - # Run test sets a fairly draconian ulimit that we will likely blow right over - # since we are relocating. Get the total size of the /system/framework directory - # in 512 byte blocks and set it as the ulimit. This should be more than enough - # room. - if [ ! `uname` = "Darwin" ]; then # TODO: Darwin doesn't support "du -B..." - ulimit -S $(du -c -B512 ${ANDROID_HOST_OUT}/framework 2>/dev/null | tail -1 | cut -f1) || exit 1 - fi - fi else FLAGS="$FLAGS -Xnorelocate" - COMPILE_FLAGS="${COMPILE_FLAGS} --runtime-arg -Xnorelocate" -fi - -if [ "$HOST" = "y" ]; then - # Increase ulimit to 128MB in case we are running hprof test, - # or string append test with art-debug-gc. - ulimit -S 128000 || exit 1 fi if [ "$HOST" = "n" ]; then diff --git a/test/knownfailures.json b/test/knownfailures.json index 50d70f18dc..2de34ca44f 100644 --- a/test/knownfailures.json +++ b/test/knownfailures.json @@ -1,12 +1,12 @@ [ { - "test": "153-reference-stress", + "tests": "153-reference-stress", "description": ["Disable 153-reference-stress temporarily until a fix", "arrives."], "bug": "http://b/33389022" }, { - "test": "080-oom-fragmentation", + "tests": "080-oom-fragmentation", "description": "Disable 080-oom-fragmentation due to flakes.", "bug": "http://b/33795328" }, @@ -21,7 +21,7 @@ "bug": "http://b/34193123" }, { - "test": "149-suspend-all-stress", + "tests": "149-suspend-all-stress", "description": "Disable 149-suspend-all-stress, its output is flaky", "bug": "http://b/28988206" }, @@ -34,13 +34,13 @@ "loaded systems."] }, { - "test": "147-stripped-dex-fallback", + "tests": "147-stripped-dex-fallback", "variant": "target", "description": ["147-stripped-dex-fallback isn't supported on device", "because --strip-dex requires the zip command."] }, { - "test": "569-checker-pattern-replacement", + "tests": "569-checker-pattern-replacement", "variant": "target", "description": ["569-checker-pattern-replacement tests behaviour", "present only on host."] @@ -54,13 +54,7 @@ "doesn't (and isn't meant to) work with --prebuild."] }, { - "test": "554-jit-profile-file", - "variant": "no-prebuild | interpreter", - "description": ["554-jit-profile-file is disabled because it needs a", - "primary oat file to know what it should save."] - }, - { - "tests": ["529-checker-unresolved", "555-checker-regression-x86const"], + "tests": ["529-checker-unresolved"], "variant": "no-prebuild", "bug": "http://b/27784033" }, @@ -73,27 +67,26 @@ { "tests": ["117-nopatchoat", "118-noimage-dex2oat", - "119-noimage-patchoat", - "554-jit-profile-file"], + "119-noimage-patchoat"], "variant": "no-relocate", "description": ["117-nopatchoat is not broken per-se it just doesn't", "work (and isn't meant to) without --prebuild", "--relocate"] }, { - "test": "137-cfi", + "tests": "137-cfi", "variant": "interp-ac", "description": ["Temporarily disable some broken tests when forcing", "access checks in interpreter"], "bug": "http://b/22414682" }, { - "test" : "629-vdex-speed", + "tests" : "629-vdex-speed", "variant": "interp-ac | no-dex2oat | interpreter | jit | relocate-npatchoat", "description": "629 requires compilation." }, { - "test": "137-cfi", + "tests": "137-cfi", "variant": "gcstress", "description": ["137-cfi needs to unwind a second forked process. We're", "using a primitive sleep to wait till we hope the", @@ -101,9 +94,10 @@ "slowness of gcstress makes this bad."] }, { - "test": "152-dead-large-object", + "tests": "152-dead-large-object", "variant": "gcstress", - "description": ["152-dead-large-object requires a heap larger than what gcstress uses."] + "description": ["152-dead-large-object requires a heap larger than what gcstress uses."], + "bug": "http://b/35800768" }, { "tests": ["908-gc-start-finish", @@ -114,7 +108,7 @@ "non-deterministic. Same for 913."] }, { - "test": "961-default-iface-resolution-gen", + "tests": "961-default-iface-resolution-gen", "variant": "gcstress", "description": ["961-default-iface-resolution-gen and", "964-default-iface-init-genare very long tests that", @@ -124,24 +118,25 @@ "lot."] }, { - "test": "964-default-iface-init-gen", + "tests": "964-default-iface-init-gen", "variant": "gcstress" }, { "tests": "154-gc-loop", - "variant": "gcstress | jit", - "description": ["154-gc-loop depends GC not happening too often"] + "variant": "gcstress | jit & debug", + "description": ["154-gc-loop depends GC not happening too often"], + "bug": "http://b/35917229" }, { - "test": "115-native-bridge", + "tests": "115-native-bridge", "variant": "target", "description": ["115-native-bridge setup is complicated. Need to", "implement it correctly for the target."] }, { - "test": "130-hprof", + "tests": "130-hprof", "variant": "target", - "desription": ["130-hprof dumps the heap and runs hprof-conv to check", + "description": ["130-hprof dumps the heap and runs hprof-conv to check", "whether the file is somewhat readable. Thi is only", "possible on the host. TODO: Turn off all the other", "combinations, this is more about testing actual ART", @@ -149,7 +144,7 @@ "complete test) JDWP must be set up."] }, { - "test": "131-structural-change", + "tests": "131-structural-change", "variant": "debug", "description": ["131 is an old test. The functionality has been", "implemented at an earlier stage and is checked", @@ -158,25 +153,19 @@ "punt to interpreter"] }, { - "test": "138-duplicate-classes-check", + "tests": "138-duplicate-classes-check", "variant": "ndebug", "description": ["Turned on for debug builds since debug builds have", "duplicate classes checks enabled"], "bug": "http://b/2133391" }, { - "test": "147-stripped-dex-fallback", + "tests": "147-stripped-dex-fallback", "variant": "no-dex2oat | no-image | relocate-npatchoat", "description": ["147-stripped-dex-fallback is disabled because it", "requires --prebuild."] }, { - "test": "554-jit-profile-file", - "variant": "no-dex2oat | no-image | relocate-npatchoat", - "description": ["554-jit-profile-file is disabled because it needs a", - "primary oat file to know what it should save."] - }, - { "tests": ["116-nodex2oat", "117-nopatchoat", "118-noimage-dex2oat", @@ -195,14 +184,14 @@ "138-duplicate-classes-check", "018-stack-overflow", "961-default-iface-resolution-gen", - "964-default-iface-init"], + "964-default-iface-init-gen"], "variant": "no-image", "description": ["This test fails without an image. 018, 961, 964 often", "time out."], "bug": "http://b/34369284" }, { - "test": "137-cfi", + "tests": "137-cfi", "description": ["This test unrolls and expects managed frames, but", "tracing means we run the interpreter."], "variant": "trace | stream" @@ -217,7 +206,7 @@ "variant": "trace | stream" }, { - "test": "130-hprof", + "tests": "130-hprof", "description": "130 occasional timeout", "bug": "http://b/32383962", "variant": "trace | stream" @@ -238,14 +227,14 @@ "suppressed when tracing."] }, { - "test": "137-cfi", + "tests": "137-cfi", "description": ["CFI unwinding expects managed frames, and the test", "does not iterate enough to even compile. JIT also", "uses Generic JNI instead of the JNI compiler."], "variant": "interpreter | jit" }, { - "test": "906-iterate-heap", + "tests": "906-iterate-heap", "description": ["Test 906 iterates the heap filtering with different", "options. No instances should be created between those", "runs to be able to have precise checks."], @@ -273,22 +262,22 @@ "variant": "optimizing & ndebuggable | regalloc_gc & ndebuggable" }, { - "test": "596-app-images", + "tests": "596-app-images", "variant": "npictest" }, { - "test": "055-enum-performance", + "tests": "055-enum-performance", "variant": "optimizing | regalloc_gc", "description": ["055: Exceeds run time limits due to heap poisoning ", "instrumentation (on ARM and ARM64 devices)."] }, { - "test": "909-attach-agent", + "tests": "909-attach-agent", "variant": "debuggable", "description": "Tests that check semantics for a non-debuggable app." }, { - "test": "137-cfi", + "tests": "137-cfi", "variant": "debuggable", "description": ["The test relies on AOT code and debuggable makes us", "JIT always."] @@ -328,7 +317,7 @@ "variant": "optimizing | regalloc_gc" }, { - "test": "055-enum-performance", + "tests": "055-enum-performance", "description": ["The test tests performance which degrades during", "bisecting."], "env_vars": {"ART_TEST_BISECTION": "true"}, @@ -339,5 +328,39 @@ "641-checker-arraycopy"], "env_vars": {"ART_USE_READ_BARRIER": "true"}, "variant": "interpreter | optimizing | regalloc_gc | jit" + }, + { + "tests": ["912-classes", + "616-cha", + "616-cha-abstract"], + "bug": "http://b/36344364 http://b36344221", + "variant": "no-dex2oat | relocate-npatchoat" + }, + { + "tests": ["476-clinit-inline-static-invoke", + "496-checker-inlining-class-loader", + "508-referrer-method", + "637-checker-throw-inline"], + "bug": "http://b/36365552", + "variant": "no-image & jit" + }, + { + "tests": ["597-deopt-new-string"], + "bug": "http://b/36467228", + "variant": "no-image & jit" + }, + { + "tests": ["530-checker-lse", + "530-checker-lse2", + "030-bad-finalizer", + "080-oom-throw"], + "bug": "http://b/36377828", + "variant": "interp-ac" + }, + { + "tests": "638-checker-inline-caches", + "description": ["Disable 638-checker-inline-caches temporarily until a fix", + "arrives."], + "bug": "http://b/36371709" } ] diff --git a/test/run-test b/test/run-test index 1ac285769d..1715423a5c 100755 --- a/test/run-test +++ b/test/run-test @@ -766,27 +766,14 @@ fi run_args="${run_args} --testlib ${testlib}" -# To cause tests to fail fast, limit the file sizes created by dx, dex2oat and ART output to 2MB. -build_file_size_limit=2048 -run_file_size_limit=2048 - -# Add tests requiring a higher ulimit to this list. Ulimits might need to be raised to deal with -# large amounts of expected output or large generated files. -if echo "$test_dir" | grep -Eq "(083|089|961|964|971)" > /dev/null; then - build_file_size_limit=5120 - run_file_size_limit=5120 -fi -if [ "$run_checker" = "yes" -a "$target_mode" = "yes" ]; then - # We will need to `adb pull` the .cfg output from the target onto the host to - # run checker on it. This file can be big. - build_file_size_limit=32768 - run_file_size_limit=32768 -fi -if [ ${USE_JACK} = "false" ]; then - # Set ulimit if we build with dx only, Jack can generate big temp files. - if ! ulimit -S "$build_file_size_limit"; then - err_echo "ulimit file size setting failed" - fi +# To cause tests to fail fast, limit the file sizes created by dx, dex2oat and +# ART output to approximately 128MB. This should be more than sufficient +# for any test while still catching cases of runaway output. +# Set a hard limit to encourage ART developers to increase the ulimit here if +# needed to support a test case rather than resetting the limit in the run +# script for the particular test in question. +if ! ulimit -f -H 128000; then + err_echo "ulimit file size setting failed" fi good="no" @@ -797,9 +784,6 @@ if [ "$dev_mode" = "yes" ]; then build_exit="$?" echo "build exit status: $build_exit" 1>&2 if [ "$build_exit" = '0' ]; then - if ! ulimit -S "$run_file_size_limit"; then - err_echo "ulimit file size setting failed" - fi echo "${test_dir}: running..." 1>&2 "./${run}" $run_args "$@" 2>&1 run_exit="$?" @@ -825,9 +809,6 @@ elif [ "$update_mode" = "yes" ]; then "./${build}" $build_args >"$build_output" 2>&1 build_exit="$?" if [ "$build_exit" = '0' ]; then - if ! ulimit -S "$run_file_size_limit"; then - err_echo "ulimit file size setting failed" - fi echo "${test_dir}: running..." 1>&2 "./${run}" $run_args "$@" >"$output" 2>&1 if [ "$run_checker" = "yes" ]; then @@ -862,9 +843,6 @@ else "./${build}" $build_args >"$build_output" 2>&1 build_exit="$?" if [ "$build_exit" = '0' ]; then - if ! ulimit -S "$run_file_size_limit"; then - err_echo "ulimit file size setting failed" - fi echo "${test_dir}: running..." 1>&2 "./${run}" $run_args "$@" >"$output" 2>&1 run_exit="$?" @@ -934,9 +912,6 @@ if [ "$bisection_search" = "yes" -a "$good" != "yes" ]; then echo "${test_dir}: not bisecting, checker test." 1>&2 else # Increase file size limit, bisection search can generate large logfiles. - if ! ulimit -S unlimited; then - err_echo "ulimit file size setting failed" - fi echo "${test_dir}: bisecting..." 1>&2 cwd=`pwd` maybe_device_mode="" diff --git a/test/testrunner/env.py b/test/testrunner/env.py index ed4b4a9f3e..e93fb3afa8 100644 --- a/test/testrunner/env.py +++ b/test/testrunner/env.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python -# # Copyright 2017, The Android Open Source Project # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -57,7 +55,9 @@ def dump_many_vars(var_name): "make --no-print-directory -C \"%s\" -f build/core/config.mk " "dump-many-vars DUMP_MANY_VARS=\"%s\"") % (ANDROID_BUILD_TOP, all_vars) - config = subprocess.Popen(command, stdout=subprocess.PIPE, + config = subprocess.Popen(command, + stdout=subprocess.PIPE, + universal_newlines=True, shell=True).communicate()[0] # read until EOF, select stdin # Prints out something like: # TARGET_ARCH='arm64' diff --git a/test/testrunner/run_build_test_target.py b/test/testrunner/run_build_test_target.py index 0cd1ddee7b..835b678cd6 100755 --- a/test/testrunner/run_build_test_target.py +++ b/test/testrunner/run_build_test_target.py @@ -14,15 +14,28 @@ # See the License for the specific language governing permissions and # limitations under the License. +"""Build and run go/ab/git_master-art-host target + +Provided with a target name, the script setup the environment for +building the test target by taking config information from +from target_config.py. + +If the target field is defined in the configuration for the target, it +invokes `make` to build the target, otherwise, it assumes +that the its is a run-test target, and invokes testrunner.py +script for building and running the run-tests. +""" + import argparse import os import subprocess +import sys from target_config import target_config import env parser = argparse.ArgumentParser() -parser.add_argument('--build-target', required=True, dest='build_target') +parser.add_argument('build_target') parser.add_argument('-j', default='1', dest='n_threads') options = parser.parse_args() @@ -33,7 +46,6 @@ custom_env['SOONG_ALLOW_MISSING_DEPENDENCIES'] = 'true' print custom_env os.environ.update(custom_env) - if target.get('target'): build_command = 'make' build_command += ' -j' + str(n_threads) @@ -43,12 +55,13 @@ if target.get('target'): if subprocess.call(build_command.split()): sys.exit(1) -else: +if target.get('run-tests'): run_test_command = [os.path.join(env.ANDROID_BUILD_TOP, 'art/test/testrunner/testrunner.py')] run_test_command += target.get('flags', []) run_test_command += ['-j', str(n_threads)] run_test_command += ['-b'] + run_test_command += ['--host'] run_test_command += ['--verbose'] print run_test_command diff --git a/test/testrunner/target_config.py b/test/testrunner/target_config.py index 5387d6a8e8..5a6ecffd44 100644 --- a/test/testrunner/target_config.py +++ b/test/testrunner/target_config.py @@ -1,29 +1,35 @@ target_config = { 'art-test' : { + 'target' : 'test-art-host-gtest', + 'run-tests' : True, 'flags' : [], 'env' : { 'ART_USE_READ_BARRIER' : 'false' } }, 'art-interpreter' : { + 'run-tests' : True, 'flags' : ['--interpreter'], 'env' : { 'ART_USE_READ_BARRIER' : 'false' } }, 'art-interpreter-access-checks' : { + 'run-tests' : True, 'flags' : ['--interp-ac'], 'env' : { 'ART_USE_READ_BARRIER' : 'false' } }, 'art-jit' : { + 'run-tests' : True, 'flags' : ['--jit'], 'env' : { 'ART_USE_READ_BARRIER' : 'false' } }, 'art-gcstress-gcverify': { + 'run-tests' : True, 'flags' : ['--gcstress', '--gcverify'], 'env' : { @@ -32,6 +38,7 @@ target_config = { } }, 'art-interpreter-gcstress' : { + 'run-tests' : True, 'flags': ['--interpreter', '--gcstress'], 'env' : { @@ -40,6 +47,7 @@ target_config = { } }, 'art-optimizing-gcstress' : { + 'run-tests' : True, 'flags': ['--gcstress', '--optimizing'], 'env' : { @@ -48,13 +56,16 @@ target_config = { } }, 'art-jit-gcstress' : { + 'run-tests' : True, 'flags': ['--jit', '--gcstress'], 'env' : { - 'ART_USE_READ_BARRIER' : 'false' + 'ART_USE_READ_BARRIER' : 'false', + 'ART_DEFAULT_GC_TYPE' : 'SS' } }, 'art-read-barrier' : { + 'run-tests' : True, 'flags': ['--interpreter', '--optimizing'], 'env' : { @@ -63,6 +74,7 @@ target_config = { } }, 'art-read-barrier-gcstress' : { + 'run-tests' : True, 'flags' : ['--interpreter', '--optimizing', '--gcstress'], @@ -72,6 +84,7 @@ target_config = { } }, 'art-read-barrier-table-lookup' : { + 'run-tests' : True, 'flags' : ['--interpreter', '--optimizing'], 'env' : { @@ -81,6 +94,7 @@ target_config = { } }, 'art-debug-gc' : { + 'run-tests' : True, 'flags' : ['--interpreter', '--optimizing'], 'env' : { @@ -89,6 +103,7 @@ target_config = { } }, 'art-ss-gc' : { + 'run-tests' : True, 'flags' : ['--interpreter', '--optimizing', '--jit'], @@ -98,6 +113,7 @@ target_config = { } }, 'art-gss-gc' : { + 'run-tests' : True, 'flags' : ['--interpreter', '--optimizing', '--jit'], @@ -107,6 +123,7 @@ target_config = { } }, 'art-ss-gc-tlab' : { + 'run-tests' : True, 'flags' : ['--interpreter', '--optimizing', '--jit'], @@ -117,6 +134,7 @@ target_config = { } }, 'art-gss-gc-tlab' : { + 'run-tests' : True, 'flags' : ['--interpreter', '--optimizing', '--jit'], @@ -127,12 +145,14 @@ target_config = { } }, 'art-tracing' : { + 'run-tests' : True, 'flags' : ['--trace'], 'env' : { 'ART_USE_READ_BARRIER' : 'false' } }, 'art-interpreter-tracing' : { + 'run-tests' : True, 'flags' : ['--interpreter', '--trace'], 'env' : { @@ -140,24 +160,28 @@ target_config = { } }, 'art-forcecopy' : { + 'run-tests' : True, 'flags' : ['--forcecopy'], 'env' : { 'ART_USE_READ_BARRIER' : 'false', } }, 'art-no-prebuild' : { + 'run-tests' : True, 'flags' : ['--no-prebuild'], 'env' : { 'ART_USE_READ_BARRIER' : 'false', } }, 'art-no-image' : { + 'run-tests' : True, 'flags' : ['--no-image'], 'env' : { 'ART_USE_READ_BARRIER' : 'false', } }, 'art-interpreter-no-image' : { + 'run-tests' : True, 'flags' : ['--interpreter', '--no-image'], 'env' : { @@ -165,18 +189,21 @@ target_config = { } }, 'art-relocate-no-patchoat' : { + 'run-tests' : True, 'flags' : ['--relocate-npatchoat'], 'env' : { 'ART_USE_READ_BARRIER' : 'false', } }, 'art-no-dex2oat' : { + 'run-tests' : True, 'flags' : ['--no-dex2oat'], 'env' : { 'ART_USE_READ_BARRIER' : 'false', } }, 'art-heap-poisoning' : { + 'run-tests' : True, 'flags' : ['--interpreter', '--optimizing'], 'env' : { @@ -185,20 +212,20 @@ target_config = { } }, 'art-gtest' : { - 'target' : 'test-art-gtest', + 'target' : 'test-art-host-gtest', 'env' : { 'ART_USE_READ_BARRIER' : 'true' } }, 'art-gtest-read-barrier': { - 'target' : 'test-art-gtest', + 'target' : 'test-art-host-gtest', 'env' : { 'ART_USE_READ_BARRIER' : 'true', 'ART_HEAP_POISONING' : 'true' } }, 'art-gtest-read-barrier-table-lookup': { - 'target' : 'test-art-gtest', + 'target' : 'test-art-host-gtest', 'env': { 'ART_USE_READ_BARRIER' : 'true', 'ART_READ_BARRIER_TYPE' : 'TABLELOOKUP', @@ -206,21 +233,21 @@ target_config = { } }, 'art-gtest-ss-gc': { - 'target' : 'test-art-gtest', + 'target' : 'test-art-host-gtest', 'env': { 'ART_DEFAULT_GC_TYPE' : 'SS', 'ART_USE_READ_BARRIER' : 'false' } }, 'art-gtest-gss-gc': { - 'target' : 'test-art-gtest', + 'target' : 'test-art-host-gtest', 'env' : { 'ART_DEFAULT_GC_TYPE' : 'GSS', 'ART_USE_READ_BARRIER' : 'false' } }, 'art-gtest-ss-gc-tlab': { - 'target' : 'test-art-gtest', + 'target' : 'test-art-host-gtest', 'env': { 'ART_DEFAULT_GC_TYPE' : 'SS', 'ART_USE_TLAB' : 'true', @@ -228,13 +255,20 @@ target_config = { } }, 'art-gtest-gss-gc-tlab': { - 'target' : 'test-art-gtest', + 'target' : 'test-art-host-gtest', 'env': { 'ART_DEFAULT_GC_TYPE' : 'GSS', 'ART_USE_TLAB' : 'true', 'ART_USE_READ_BARRIER' : 'false' } }, + 'art-gtest-debug-gc' : { + 'target' : 'test-art-host-gtest', + 'env' : { + 'ART_TEST_DEBUG_GC' : 'true', + 'ART_USE_READ_BARRIER' : 'false' + } + }, 'art-gtest-valgrind32': { 'target' : 'valgrind-test-art-host32', 'env': { diff --git a/test/testrunner/testrunner.py b/test/testrunner/testrunner.py index f77e9adb10..0b9a6e6457 100755 --- a/test/testrunner/testrunner.py +++ b/test/testrunner/testrunner.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # # Copyright 2017, The Android Open Source Project # @@ -72,6 +72,9 @@ DEBUGGABLE_TYPES = set() ADDRESS_SIZES = set() OPTIMIZING_COMPILER_TYPES = set() ADDRESS_SIZES_TARGET = {'host': set(), 'target': set()} +# timeout for individual tests. +# TODO: make it adjustable per tests and for buildbots +timeout = 3000 # 50 minutes # DISABLED_TEST_CONTAINER holds information about the disabled tests. It is a map # that has key as the test name (like 001-HelloWorld), and value as set of @@ -184,10 +187,18 @@ def setup_test_env(): if env.ART_TEST_OPTIMIZING_GRAPH_COLOR: COMPILER_TYPES.add('regalloc_gc') OPTIMIZING_COMPILER_TYPES.add('regalloc_gc') - if env.ART_TEST_OPTIMIZING or not COMPILER_TYPES: # Default + if env.ART_TEST_OPTIMIZING: COMPILER_TYPES.add('optimizing') OPTIMIZING_COMPILER_TYPES.add('optimizing') + # By default we run all 'compiler' variants. + if not COMPILER_TYPES: + COMPILER_TYPES.add('optimizing') + COMPILER_TYPES.add('jit') + COMPILER_TYPES.add('interpreter') + COMPILER_TYPES.add('interp-ac') + OPTIMIZING_COMPILER_TYPES.add('optimizing') + if env.ART_TEST_RUN_TEST_RELOCATE: RELOCATE_TYPES.add('relocate') if env.ART_TEST_RUN_TEST_RELOCATE_NO_PATCHOAT: @@ -443,8 +454,8 @@ def run_test(command, test, test_variant, test_name): test_skipped = True else: test_skipped = False - proc = subprocess.Popen(command.split(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE) - script_output = proc.stdout.read().strip() + proc = subprocess.Popen(command.split(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE, universal_newlines=True) + script_output = proc.communicate(timeout=timeout)[0] test_passed = not proc.wait() if not test_skipped: @@ -461,9 +472,14 @@ def run_test(command, test, test_variant, test_name): skipped_tests.append(test_name) else: print_test_info(test_name, '') - except Exception, e: + except subprocess.TimeoutExpired as e: failed_tests.append(test_name) - print_text(('%s\n%s\n') % (command, str(e))) + print_test_info(test_name, 'TIMEOUT', 'timed out in %d\n%s' % ( + timeout, command)) + except Exception as e: + failed_tests.append(test_name) + print_test_info(test_name, 'FAIL') + print_text(('%s\n%s\n\n') % (command, str(e))) finally: semaphore.release() @@ -500,11 +516,11 @@ def print_test_info(test_name, result, failed_test_info=""): test_count, total_test_count) - if result == "FAIL": + if result == 'FAIL' or result == 'TIMEOUT': info += ('%s %s %s\n%s\n') % ( progress_info, test_name, - COLOR_ERROR + 'FAIL' + COLOR_NORMAL, + COLOR_ERROR + result + COLOR_NORMAL, failed_test_info) else: result_text = '' @@ -525,20 +541,35 @@ def print_test_info(test_name, result, failed_test_info=""): allowed_test_length = console_width - total_output_length test_name_len = len(test_name) if allowed_test_length < test_name_len: - test_name = ('%s...%s') % ( - test_name[:(allowed_test_length - 3)/2], - test_name[-(allowed_test_length - 3)/2:]) + test_name = ('...%s') % ( + test_name[-(allowed_test_length - 3):]) info += ('%s %s %s') % ( progress_info, test_name, result_text) print_text(info) - except Exception, e: + except Exception as e: print_text(('%s\n%s\n') % (test_name, str(e))) failed_tests.append(test_name) finally: print_mutex.release() +def verify_knownfailure_entry(entry): + supported_field = { + 'tests' : (list, str), + 'description' : (list, str), + 'bug' : (str,), + 'variant' : (str,), + 'env_vars' : (dict,), + } + for field in entry: + field_type = type(entry[field]) + if field_type not in supported_field[field]: + raise ValueError('%s is not supported type for %s\n%s' % ( + str(field_type), + field, + str(entry))) + def get_disabled_test_info(): """Generate set of known failures. @@ -555,15 +586,18 @@ def get_disabled_test_info(): disabled_test_info = {} for failure in known_failures_info: - tests = failure.get('test') - if tests: + verify_knownfailure_entry(failure) + tests = failure.get('tests', []) + if isinstance(tests, str): tests = [tests] - else: - tests = failure.get('tests', []) variants = parse_variants(failure.get('variant')) env_vars = failure.get('env_vars') + if check_env_vars(env_vars): for test in tests: + if test not in RUN_TEST_SET: + raise ValueError('%s is not a valid run-test' % ( + test)) if test in disabled_test_info: disabled_test_info[test] = disabled_test_info[test].union(variants) else: @@ -627,6 +661,9 @@ def parse_variants(variants): variant = set() for and_variant in and_variants: and_variant = and_variant.strip() + if and_variant not in TOTAL_VARIANTS_SET: + raise ValueError('%s is not a valid variant' % ( + and_variant)) variant.add(and_variant) variant_list.add(frozenset(variant)) return variant_list @@ -643,12 +680,25 @@ def print_analysis(): console_width = int(os.popen('stty size', 'r').read().split()[1]) eraser_text = '\r' + ' ' * console_width + '\r' print_text(eraser_text) + + # Prints information about the total tests run. + # E.g., "2/38 (5%) tests passed". + passed_test_count = total_test_count - len(skipped_tests) - len(failed_tests) + passed_test_information = ('%d/%d (%d%%) %s passed.\n') % ( + passed_test_count, + total_test_count, + (passed_test_count*100)/total_test_count, + 'tests' if passed_test_count > 1 else 'test') + print_text(passed_test_information) + + # Prints the list of skipped tests, if any. if skipped_tests: print_text(COLOR_SKIP + 'SKIPPED TESTS' + COLOR_NORMAL + '\n') for test in skipped_tests: print_text(test + '\n') print_text('\n') + # Prints the list of failed tests, if any. if failed_tests: print_text(COLOR_ERROR + 'FAILED TESTS' + COLOR_NORMAL + '\n') for test in failed_tests: @@ -731,10 +781,12 @@ def parse_option(): global build global gdb global gdb_arg + global timeout parser = argparse.ArgumentParser(description="Runs all or a subset of the ART test suite.") parser.add_argument('-t', '--test', dest='test', help='name of the test') parser.add_argument('-j', type=int, dest='n_thread') + parser.add_argument('--timeout', default=timeout, type=int, dest='timeout') for variant in TOTAL_VARIANTS_SET: flag = '--' + variant flag_dest = variant.replace('-', '_') @@ -842,7 +894,7 @@ def parse_option(): gdb = True if options['gdb_arg']: gdb_arg = options['gdb_arg'] - + timeout = options['timeout'] return test def main(): @@ -871,7 +923,7 @@ def main(): while threading.active_count() > 1: time.sleep(0.1) print_analysis() - except Exception, e: + except Exception as e: print_analysis() print_text(str(e)) sys.exit(1) diff --git a/test/ti-agent/common_helper.cc b/test/ti-agent/common_helper.cc index ea6359e5e0..6316a9c368 100644 --- a/test/ti-agent/common_helper.cc +++ b/test/ti-agent/common_helper.cc @@ -25,7 +25,7 @@ #include "art_method.h" #include "jni.h" #include "jni_internal.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" #include "scoped_thread_state_change-inl.h" #include "ScopedLocalRef.h" #include "stack.h" @@ -520,11 +520,14 @@ void BindFunctions(jvmtiEnv* jenv, JNIEnv* env, const char* class_name) { LOG(FATAL) << "Could not load " << class_name; } } + BindFunctionsOnClass(jenv, env, klass.get()); +} +void BindFunctionsOnClass(jvmtiEnv* jenv, JNIEnv* env, jclass klass) { // Use JVMTI to get the methods. jint method_count; jmethodID* methods; - jvmtiError methods_result = jenv->GetClassMethods(klass.get(), &method_count, &methods); + jvmtiError methods_result = jenv->GetClassMethods(klass, &method_count, &methods); if (methods_result != JVMTI_ERROR_NONE) { LOG(FATAL) << "Could not get methods"; } @@ -538,7 +541,7 @@ void BindFunctions(jvmtiEnv* jenv, JNIEnv* env, const char* class_name) { } constexpr jint kNative = static_cast<jint>(kAccNative); if ((modifiers & kNative) != 0) { - BindMethod(jenv, env, klass.get(), methods[i]); + BindMethod(jenv, env, klass, methods[i]); } } diff --git a/test/ti-agent/common_helper.h b/test/ti-agent/common_helper.h index 031850147e..f10356dcbb 100644 --- a/test/ti-agent/common_helper.h +++ b/test/ti-agent/common_helper.h @@ -18,7 +18,7 @@ #define ART_TEST_TI_AGENT_COMMON_HELPER_H_ #include "jni.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" #include "ScopedLocalRef.h" namespace art { @@ -81,6 +81,7 @@ bool JvmtiErrorToException(JNIEnv* env, jvmtiError error); // // This will abort on failure. void BindFunctions(jvmtiEnv* jvmti_env, JNIEnv* env, const char* class_name); +void BindFunctionsOnClass(jvmtiEnv* jvmti_env, JNIEnv* env, jclass klass); } // namespace art diff --git a/test/ti-agent/common_load.cc b/test/ti-agent/common_load.cc index 351857d1d9..fddae3af02 100644 --- a/test/ti-agent/common_load.cc +++ b/test/ti-agent/common_load.cc @@ -18,8 +18,6 @@ #include <jni.h> #include <stdio.h> -// TODO I don't know? -#include "openjdkjvmti/jvmti.h" #include "art_method-inl.h" #include "base/logging.h" diff --git a/test/ti-agent/common_load.h b/test/ti-agent/common_load.h index d2544214ec..e79a0067b0 100644 --- a/test/ti-agent/common_load.h +++ b/test/ti-agent/common_load.h @@ -17,8 +17,7 @@ #ifndef ART_TEST_TI_AGENT_COMMON_LOAD_H_ #define ART_TEST_TI_AGENT_COMMON_LOAD_H_ -#include "jni.h" -#include "openjdkjvmti/jvmti.h" +#include "jvmti.h" namespace art { diff --git a/tools/golem/build-target.sh b/tools/golem/build-target.sh new file mode 100755 index 0000000000..8d8e2bbe6f --- /dev/null +++ b/tools/golem/build-target.sh @@ -0,0 +1,384 @@ +#!/bin/bash +# +# Copyright (C) 2017 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +if [[ ! -d art ]]; then + echo "Script needs to be run at the root of the android tree" + exit 1 +fi + +ALL_CONFIGS=(linux-ia32 linux-x64 linux-armv8 linux-armv7 android-armv8 android-armv7) + +usage() { + local config + local golem_target + + (cat << EOF + Usage: $(basename "${BASH_SOURCE[0]}") [--golem=<target>] --machine-type=MACHINE_TYPE + [--tarball[=<target>.tar.gz]] + + Build minimal art binaries required to run golem benchmarks either + locally or on the golem servers. + + Creates the \$MACHINE_TYPE binaries in your \$OUT_DIR, and if --tarball was specified, + it also tars the results of the build together into your <target.tar.gz> file. + -------------------------------------------------------- + Required Flags: + --machine-type=MT Specify the machine type that will be built. + + Optional Flags": + --golem=<target> Builds with identical commands that Golem servers use. + --tarball[=o.tgz] Tar/gz the results. File name defaults to <machine_type>.tar.gz + -j<num> Specify how many jobs to use for parallelism. + --help Print this help listing. + --showcommands Show commands as they are being executed. + --simulate Print commands only, don't execute commands. +EOF + ) | sed -e 's/^[[:space:]][[:space:]]//g' >&2 # Strip leading whitespace from heredoc. + + echo >&2 "Available machine types:" + for config in "${ALL_CONFIGS[@]}"; do + echo >&2 " $config" + done + + echo >&2 + echo >&2 "Available Golem targets:" + while IFS='' read -r golem_target; do + echo >&2 " $golem_target" + done < <("$(thisdir)/env" --list-targets) +} + +# Check if $1 element is in array $2 +contains_element() { + local e + for e in "${@:2}"; do [[ "$e" == "$1" ]] && return 0; done + return 1 +} + +# Display a command, but don't execute it, if --showcommands was set. +show_command() { + if [[ $showcommands == "showcommands" ]]; then + echo "$@" + fi +} + +# Execute a command, displaying it if --showcommands was set. +# If --simulate is used, command is not executed. +execute() { + show_command "$@" + execute_noshow "$@" +} + +# Execute a command unless --simulate was used. +execute_noshow() { + if [[ $simulate == "simulate" ]]; then + return 0 + fi + + local prog="$1" + shift + "$prog" "$@" +} + +# Export environment variable, echoing it to screen. +setenv() { + local name="$1" + local value="$2" + + export $name="$value" + echo export $name="$value" +} + +# Export environment variable, echoing $3 to screen ($3 is meant to be unevaluated). +setenv_escape() { + local name="$1" + local value="$2" + local escaped_value="$3" + + export $name="$value" + echo export $name="$escaped_value" +} + +log_usage_error() { + echo >&2 "ERROR: " "$@" + echo >&2 " See --help for the correct usage information." + exit 1 +} + +log_fatal() { + echo >&2 "FATAL: " "$@" + exit 2 +} + +# Get the directory of this script. +thisdir() { + (\cd "$(dirname "${BASH_SOURCE[0]}")" && pwd ) +} + +# Get the path to the top of the Android source tree. +gettop() { + if [[ "x$ANDROID_BUILD_TOP" != "x" ]]; then + echo "$ANDROID_BUILD_TOP"; + else + echo "$(thisdir)/../../.." + fi +} + +# Get a build variable from the Android build system. +get_build_var() { + local varname="$1" + + # include the desired target product/build-variant + # which won't be set in our env if neither we nor the user first executed + # source build/envsetup.sh (e.g. if simulating from a fresh shell). + local extras + [[ -n $target_product ]] && extras+=" TARGET_PRODUCT=$target_product" + [[ -n $target_build_variant ]] && extras+=" TARGET_BUILD_VARIANT=$target_build_variant" + + # call dumpvar-$name from the makefile system. + (\cd "$(gettop)"; + CALLED_FROM_SETUP=true BUILD_SYSTEM=build/core \ + command make --no-print-directory -f build/core/config.mk \ + $extras \ + dumpvar-$varname) +} + +# Defaults from command-line. + +mode="" # blank or 'golem' if --golem was specified. +golem_target="" # --golem=$golem_target +config="" # --machine-type=$config +j_arg="-j8" +showcommands="" +simulate="" +make_tarball="" +tarball="" + +# Parse command line arguments + +while [[ "$1" != "" ]]; do + case "$1" in + --help) + usage + exit 1 + ;; + --golem=*) + mode="golem" + golem_target="${1##--golem=}" + + if [[ "x$golem_target" == x ]]; then + log_usage_error "Missing --golem target type." + fi + + shift + ;; + --machine-type=*) + config="${1##--machine-type=}" + if ! contains_element "$config" "${ALL_CONFIGS[@]}"; then + log_usage_error "Invalid --machine-type value '$config'" + fi + shift + ;; + --tarball) + tarball="" # reuse the machine type name. + make_tarball="make_tarball" + shift + ;; + --tarball=*) + tarball="${1##--tarball=}" + make_tarball="make_tarball" + shift + ;; + -j*) + j_arg="$1" + shift + ;; + --showcommands) + showcommands="showcommands" + shift + ;; + --simulate) + simulate="simulate" + shift + ;; + *) + log_usage_error "Unknown options $1" + ;; + esac +done + +################################### +################################### +################################### + +if [[ -z $config ]]; then + log_usage_error "--machine-type option is required." +fi + +# --tarball defaults to the --machine-type value with .tar.gz. +tarball="${tarball:-$config.tar.gz}" + +target_product="$TARGET_PRODUCT" +target_build_variant="$TARGET_BUILD_VARIANT" + +# If not using --golem, use whatever the user had lunch'd prior to this script. +if [[ $mode == "golem" ]]; then + # This section is intended solely to be executed by a golem build server. + + target_build_variant=eng + case "$config" in + *-armv7) + target_product="arm_krait" + ;; + *-armv8) + target_product="armv8" + ;; + *) + target_product="sdk" + ;; + esac + + if [[ $target_product = arm* ]]; then + # If using the regular manifest, e.g. 'master' + # The lunch command for arm will assuredly fail because we don't have device/generic/art. + # + # Print a human-readable error message instead of trying to lunch and failing there. + if ! [[ -d "$(gettop)/device/generic/art" ]]; then + log_fatal "Missing device/generic/art directory. Perhaps try master-art repo manifest?\n" \ + " Cannot build ARM targets (arm_krait, armv8) for Golem." >&2 + fi + # We could try to keep on simulating but it seems brittle because we won't have the proper + # build variables to output the right strings. + fi + + # Get this particular target's environment variables (e.g. ART read barrier on/off). + source "$(thisdir)"/env "$golem_target" || exit 1 + + lunch_target="$target_product-$target_build_variant" + + execute 'source' build/envsetup.sh + # Build generic targets (as opposed to something specific like aosp_angler-eng). + execute lunch "$lunch_target" + setenv JACK_SERVER false + setenv_escape JACK_REPOSITORY "$PWD/prebuilts/sdk/tools/jacks" '$PWD/prebuilts/sdk/tools/jacks' + # Golem uses master-art repository which is missing a lot of other libraries. + setenv SOONG_ALLOW_MISSING_DEPENDENCIES true + # Golem may be missing tools such as javac from its path. + setenv_escape PATH "/usr/lib/jvm/java-8-openjdk-amd64/bin/:$PATH" '/usr/lib/jvm/java-8-openjdk-amd64/bin/:$PATH' +else + # Look up the default variables from the build system if they weren't set already. + [[ -z $target_product ]] && target_product="$(get_build_var TARGET_PRODUCT)" + [[ -z $target_build_variant ]] && target_build_variant="$(get_build_var TARGET_BUILD_VARIANT)" +fi + +# Defaults for all machine types. +make_target="build-art-target-golem" +out_dir="out/x86_64" +root_dir_var="PRODUCT_OUT" +strip_symbols=false +bit64_suffix="" +tar_directories=(system data/art-test) + +# Per-machine type overrides +if [[ $config == linux-arm* ]]; then + setenv ART_TARGET_LINUX true +fi + +case "$config" in + linux-ia32|linux-x64) + root_dir_var="HOST_OUT" + # Android strips target builds automatically, but not host builds. + strip_symbols=true + make_target="build-art-host-golem" + + if [[ $config == linux-ia32 ]]; then + out_dir="out/x86" + setenv HOST_PREFER_32_BIT true + else + bit64_suffix="64" + fi + + tar_directories=(bin framework usr lib${bit64_suffix}) + ;; + *-armv8) + bit64_suffix="64" + ;; + *-armv7) + ;; + *) + log_fatal "Unsupported machine-type '$config'" +esac + +# Golem benchmark run commands expect a certain $OUT_DIR to be set, +# so specify it here. +# +# Note: It is questionable if we want to customize this since users +# could alternatively probably use their own build directly (and forgo this script). +setenv OUT_DIR "$out_dir" +root_dir="$(get_build_var "$root_dir_var")" + +if [[ $mode == "golem" ]]; then + # For golem-style running only. + # Sets the DT_INTERP to this path in every .so we can run the + # non-system version of dalvikvm with our own copies of the dependencies (e.g. our own libc++). + if [[ $config == android-* ]]; then + # TODO: the linker can be relative to the binaries + # (which is what we do for linux-armv8 and linux-armv7) + golem_run_path="/data/local/tmp/runner/" + else + golem_run_path="" + fi + + # Only do this for target builds. Host doesn't need this. + if [[ $config == *-arm* ]]; then + setenv CUSTOM_TARGET_LINKER "${golem_run_path}${root_dir}/system/bin/linker${bit64_suffix}" + fi +fi + +# +# Main command execution below here. +# (everything prior to this just sets up environment variables, +# and maybe calls lunch). +# + +execute make "${j_arg}" "${make_target}" + +if $strip_symbols; then + # Further reduce size by stripping symbols. + execute_noshow strip $root_dir/bin/* || true + show_command strip $root_dir/bin/'*' '|| true' + execute_noshow strip $root_dir/lib${bit64_suffix}/'*' + show_command strip $root_dir/lib${bit64_suffix}/'*' +fi + +if [[ "$make_tarball" == "make_tarball" ]]; then + # Create a tarball which is required for the golem build resource. + # (In particular, each golem benchmark's run commands depend on a list of resource files + # in order to have all the files it needs to actually execute, + # and this tarball would satisfy that particular target+machine-type's requirements). + dirs_rooted=() + for tar_dir in "${tar_directories[@]}"; do + dirs_rooted+=("$root_dir/$tar_dir") + done + + execute tar -czf "${tarball}" "${dirs_rooted[@]}" --exclude .git --exclude .gitignore + tar_result=$? + if [[ $tar_result -ne 0 ]]; then + [[ -f $tarball ]] && rm $tarball + fi + + show_command '[[ $? -ne 0 ]] && rm' "$tarball" +fi + diff --git a/tools/golem/env b/tools/golem/env new file mode 100755 index 0000000000..187ba3a01f --- /dev/null +++ b/tools/golem/env @@ -0,0 +1,117 @@ +#!/bin/bash +# +# Copyright (C) 2017 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# Export some environment variables used by ART's Android.mk/Android.bp +# build systems to configure ART [to use a different implementation]. +# +# Currently only varies on ART_USE_READ_BARRIER for a concurrent/non-concurrent +# flavor of the ART garbage collector. +# +# Only meant for golem use since when building ART directly, one can/should set +# these environment flags themselves. +# +# These environment flags are not really meant here to be for "correctness", +# but rather telling the ART C++ to use alternative algorithms. +# In other words, the same exact binary build with a different "target" +# should run in the same context (e.g. it does not change arch or the OS it's built for). +# + +setenv() { + local name="$1" + local value="$2" + + export $name="$value" + echo export $name="$value" +} + +# Enforce specified target-name is one of these. +# Perhaps we should be less strict? +ALL_TARGETS=(art-interpreter art-opt art-jit art-jit-cc art-opt-cc art-opt-debuggable art-vdex) + +usage() { + echo >&2 "Usage: $(basename $0) (--list-targets | <target-name>)" + echo >&2 + echo >&2 "Exports the necessary ART environment variables" + echo >&2 "to pass to the Golem build to correctly configure ART." + echo >&2 "--------------------------------------------------------" + echo >&2 "Required Arguments:" + echo >&2 " <target-name> Specify the golem target to get environment variables for." + echo >&2 + echo >&2 "Optional Flags": + echo >&2 " --list-targets Display all the targets. Do not require the main target-name." + echo >&2 " --help Print this help listing." + echo >&2 + echo >&2 "Available Targets:" + + list_targets 2 " " +} + +list_targets() { + local out_fd="${1:-1}" # defaults to 1 if no param was set + local prefix="$2" + + for target in "${ALL_TARGETS[@]}"; do + echo >&$out_fd "${prefix}${target}" + done +} + + +# Check if $1 element is in array $2 +contains_element() { + local e + for e in "${@:2}"; do [[ "$e" == "$1" ]] && return 0; done + return 1 +} + +main() { + if [[ $# -lt 1 ]]; then + usage + exit 1 + fi + + if [[ "$1" == "--help" ]]; then + usage + exit 1 + fi + + if [[ "$1" == "--list-targets" ]]; then + list_targets + exit 0 + fi + + local selected_target="$1" + if ! contains_element "$selected_target" "${ALL_TARGETS[@]}"; then + echo "ERROR: Invalid target value '$selected_target'" >&2 + exit 1 + fi + + case "$selected_target" in + *-cc) + setenv ART_USE_READ_BARRIER true + ;; + *) + setenv ART_USE_READ_BARRIER false + ;; + esac + + # Make smaller .tar.gz files by excluding debug targets. + setenv ART_BUILD_TARGET_DEBUG false + setenv ART_BUILD_HOST_DEBUG false + setenv USE_DEX2OAT_DEBUG false +} + +main "$@" |