diff options
132 files changed, 3144 insertions, 1656 deletions
diff --git a/build/Android.common_path.mk b/build/Android.common_path.mk index 81cd6efbef..ecc9e76b10 100644 --- a/build/Android.common_path.mk +++ b/build/Android.common_path.mk @@ -80,7 +80,7 @@ HOST_CORE_IMG_LOCATION := $(HOST_OUT_JAVA_LIBRARIES)/core.art TARGET_CORE_IMG_LOCATION := $(ART_TARGET_TEST_OUT)/core.art # Jar files for core.art. -TARGET_CORE_JARS := core-oj core-libart conscrypt okhttp bouncycastle +TARGET_CORE_JARS := core-oj core-libart conscrypt okhttp bouncycastle apache-xml HOST_CORE_JARS := $(addsuffix -hostdex,$(TARGET_CORE_JARS)) HOST_CORE_DEX_LOCATIONS := $(foreach jar,$(HOST_CORE_JARS), $(HOST_OUT_JAVA_LIBRARIES)/$(jar).jar) diff --git a/build/Android.common_test.mk b/build/Android.common_test.mk index c9af1c67a4..ab7036717a 100644 --- a/build/Android.common_test.mk +++ b/build/Android.common_test.mk @@ -205,7 +205,7 @@ define build-art-test-dex LOCAL_DEX_PREOPT_IMAGE_LOCATION := $(TARGET_CORE_IMG_OUT) ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),) LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex - LOCAL_JACK_FLAGS := -D jack.dex.output.policy=minimal-multidex -D jack.preprocessor=true -D jack.preprocessor.file=$(LOCAL_PATH)/$(2)/main.jpp + LOCAL_JACK_FLAGS := -D jack.dex.output.policy=minimal-multidex -D jack.preprocessor=true -D jack.preprocessor.file=$(LOCAL_PATH)/$(2)/main.jpp -D jack.dex.output.multidex.legacy=true endif include $(BUILD_JAVA_LIBRARY) $(5) := $$(LOCAL_INSTALLED_MODULE) @@ -221,7 +221,7 @@ define build-art-test-dex LOCAL_DEX_PREOPT_IMAGE := $(HOST_CORE_IMG_LOCATION) ifneq ($(wildcard $(LOCAL_PATH)/$(2)/main.list),) LOCAL_DX_FLAGS := --multi-dex --main-dex-list=$(LOCAL_PATH)/$(2)/main.list --minimal-main-dex - LOCAL_JACK_FLAGS := -D jack.dex.output.policy=minimal-multidex -D jack.preprocessor=true -D jack.preprocessor.file=$(LOCAL_PATH)/$(2)/main.jpp + LOCAL_JACK_FLAGS := -D jack.dex.output.policy=minimal-multidex -D jack.preprocessor=true -D jack.preprocessor.file=$(LOCAL_PATH)/$(2)/main.jpp -D jack.dex.output.multidex.legacy=true endif include $(BUILD_HOST_DALVIK_JAVA_LIBRARY) $(6) := $$(LOCAL_INSTALLED_MODULE) diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk index 99f7a2afb8..3d16c49fe4 100644 --- a/build/Android.gtest.mk +++ b/build/Android.gtest.mk @@ -442,8 +442,8 @@ define define-art-gtest-rule-target $$(ART_TARGET_NATIVETEST_OUT)/$$(TARGET_$(2)ARCH)/$(1) \ $$($(2)TARGET_OUT_SHARED_LIBRARIES)/libjavacore.so \ $$($(2)TARGET_OUT_SHARED_LIBRARIES)/libopenjdk.so \ - $$(TARGET_OUT_JAVA_LIBRARIES)/core-libart.jar \ - $$(TARGET_OUT_JAVA_LIBRARIES)/core-oj.jar + $$(TARGET_OUT_JAVA_LIBRARIES)/core-libart-testdex.jar \ + $$(TARGET_OUT_JAVA_LIBRARIES)/core-oj-testdex.jar .PHONY: $$(gtest_rule) $$(gtest_rule): test-art-target-sync diff --git a/compiler/Android.mk b/compiler/Android.mk index f0bf4997c6..458973684e 100644 --- a/compiler/Android.mk +++ b/compiler/Android.mk @@ -108,7 +108,8 @@ LIBART_COMPILER_SRC_FILES := \ elf_writer_debug.cc \ elf_writer_quick.cc \ image_writer.cc \ - oat_writer.cc + oat_writer.cc \ + profile_assistant.cc LIBART_COMPILER_SRC_FILES_arm := \ dex/quick/arm/assemble_arm.cc \ diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc index 278c49017e..b5fd1e074f 100644 --- a/compiler/common_compiler_test.cc +++ b/compiler/common_compiler_test.cc @@ -208,8 +208,8 @@ void CommonCompilerTest::CreateCompilerDriver(Compiler::Kind kind, InstructionSe false, timer_.get(), -1, - /* profile_file */ "", - /* dex_to_oat_map */ nullptr)); + /* dex_to_oat_map */ nullptr, + /* profile_compilation_info */ nullptr)); // We typically don't generate an image in unit tests, disable this optimization by default. compiler_driver_->SetSupportBootImageFixup(false); } diff --git a/compiler/dex/quick/quick_cfi_test.cc b/compiler/dex/quick/quick_cfi_test.cc index bcf20c7efa..12568a4ad4 100644 --- a/compiler/dex/quick/quick_cfi_test.cc +++ b/compiler/dex/quick/quick_cfi_test.cc @@ -92,7 +92,7 @@ class QuickCFITest : public CFITest { false, 0, -1, - "", + nullptr, nullptr); ClassLinker* linker = nullptr; CompilationUnit cu(&pool, isa, &driver, linker); diff --git a/compiler/dex/quick/x86/quick_assemble_x86_test.cc b/compiler/dex/quick/x86/quick_assemble_x86_test.cc index 9deabc02e9..b39fe4da4f 100644 --- a/compiler/dex/quick/x86/quick_assemble_x86_test.cc +++ b/compiler/dex/quick/x86/quick_assemble_x86_test.cc @@ -73,7 +73,7 @@ class QuickAssembleX86TestBase : public testing::Test { false, 0, -1, - "", + nullptr, nullptr)); cu_.reset(new CompilationUnit(pool_.get(), isa_, compiler_driver_.get(), nullptr)); DexFile::CodeItem* code_item = static_cast<DexFile::CodeItem*>( diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc index 84fb4324b5..f18fa67ea5 100644 --- a/compiler/driver/compiled_method_storage_test.cc +++ b/compiler/driver/compiled_method_storage_test.cc @@ -45,7 +45,7 @@ TEST(CompiledMethodStorage, Deduplicate) { false, nullptr, -1, - "", + nullptr, nullptr); CompiledMethodStorage* storage = driver.GetCompiledMethodStorage(); diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index afb4b71ccf..043bd93bd7 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -347,8 +347,8 @@ CompilerDriver::CompilerDriver( size_t thread_count, bool dump_stats, bool dump_passes, const std::string& dump_cfg_file_name, bool dump_cfg_append, CumulativeLogger* timer, int swap_fd, - const std::string& profile_file, - const std::unordered_map<const DexFile*, const char*>* dex_to_oat_map) + const std::unordered_map<const DexFile*, const char*>* dex_to_oat_map, + const ProfileCompilationInfo* profile_compilation_info) : compiler_options_(compiler_options), verification_results_(verification_results), method_inliner_map_(method_inliner_map), @@ -377,7 +377,8 @@ CompilerDriver::CompilerDriver( support_boot_image_fixup_(instruction_set != kMips && instruction_set != kMips64), dex_files_for_oat_file_(nullptr), dex_file_oat_filename_map_(dex_to_oat_map), - compiled_method_storage_(swap_fd) { + compiled_method_storage_(swap_fd), + profile_compilation_info_(profile_compilation_info) { DCHECK(compiler_options_ != nullptr); DCHECK(verification_results_ != nullptr); DCHECK(method_inliner_map_ != nullptr); @@ -385,12 +386,6 @@ CompilerDriver::CompilerDriver( compiler_->Init(); CHECK_EQ(boot_image_, image_classes_.get() != nullptr); - - // Read the profile file if one is provided. - if (!profile_file.empty()) { - profile_compilation_info_.reset(new ProfileCompilationInfo(profile_file)); - LOG(INFO) << "Using profile data from file " << profile_file; - } } CompilerDriver::~CompilerDriver() { @@ -2306,15 +2301,11 @@ void CompilerDriver::InitializeClasses(jobject class_loader, void CompilerDriver::Compile(jobject class_loader, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings) { - if (profile_compilation_info_ != nullptr) { - if (!profile_compilation_info_->Load(dex_files)) { - LOG(WARNING) << "Failed to load offline profile info from " - << profile_compilation_info_->GetFilename() - << ". No methods will be compiled"; - } else if (kDebugProfileGuidedCompilation) { - LOG(INFO) << "[ProfileGuidedCompilation] " - << profile_compilation_info_->DumpInfo(); - } + if (kDebugProfileGuidedCompilation) { + LOG(INFO) << "[ProfileGuidedCompilation] " << + ((profile_compilation_info_ == nullptr) + ? "null" + : profile_compilation_info_->DumpInfo(&dex_files)); } for (size_t i = 0; i != dex_files.size(); ++i) { const DexFile* dex_file = dex_files[i]; diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h index fa0cb9a412..3847c8183e 100644 --- a/compiler/driver/compiler_driver.h +++ b/compiler/driver/compiler_driver.h @@ -97,8 +97,8 @@ class CompilerDriver { size_t thread_count, bool dump_stats, bool dump_passes, const std::string& dump_cfg_file_name, bool dump_cfg_append, CumulativeLogger* timer, int swap_fd, - const std::string& profile_file, - const std::unordered_map<const DexFile*, const char*>* dex_to_oat_map); + const std::unordered_map<const DexFile*, const char*>* dex_to_oat_map, + const ProfileCompilationInfo* profile_compilation_info); ~CompilerDriver(); @@ -657,9 +657,6 @@ class CompilerDriver { // This option may be restricted to the boot image, depending on a flag in the implementation. std::unique_ptr<std::unordered_set<std::string>> methods_to_compile_; - // Info for profile guided compilation. - std::unique_ptr<ProfileCompilationInfo> profile_compilation_info_; - bool had_hard_verifier_failure_; size_t thread_count_; @@ -689,6 +686,9 @@ class CompilerDriver { CompiledMethodStorage compiled_method_storage_; + // Info for profile guided compilation. + const ProfileCompilationInfo* const profile_compilation_info_; + friend class CompileClassVisitor; DISALLOW_COPY_AND_ASSIGN(CompilerDriver); }; diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h index 9ad1beefec..f8032bb514 100644 --- a/compiler/driver/compiler_options.h +++ b/compiler/driver/compiler_options.h @@ -53,7 +53,7 @@ class CompilerOptions FINAL { static const bool kDefaultGenerateDebugInfo = kIsDebugBuild; static const bool kDefaultIncludePatchInformation = false; static const size_t kDefaultInlineDepthLimit = 3; - static const size_t kDefaultInlineMaxCodeUnits = 20; + static const size_t kDefaultInlineMaxCodeUnits = 32; static constexpr size_t kUnsetInlineDepthLimit = -1; static constexpr size_t kUnsetInlineMaxCodeUnits = -1; diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index 17d0f61a34..d0bb201d69 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -76,23 +76,35 @@ static constexpr bool kBinObjects = true; // Return true if an object is already in an image space. bool ImageWriter::IsInBootImage(const void* obj) const { + gc::Heap* const heap = Runtime::Current()->GetHeap(); if (!compile_app_image_) { - DCHECK(boot_image_space_ == nullptr); + DCHECK(heap->GetBootImageSpaces().empty()); return false; } - const uint8_t* image_begin = boot_image_space_->Begin(); - // Real image end including ArtMethods and ArtField sections. - const uint8_t* image_end = image_begin + boot_image_space_->GetImageHeader().GetImageSize(); - return image_begin <= obj && obj < image_end; + for (gc::space::ImageSpace* boot_image_space : heap->GetBootImageSpaces()) { + const uint8_t* image_begin = boot_image_space->Begin(); + // Real image end including ArtMethods and ArtField sections. + const uint8_t* image_end = image_begin + boot_image_space->GetImageHeader().GetImageSize(); + if (image_begin <= obj && obj < image_end) { + return true; + } + } + return false; } bool ImageWriter::IsInBootOatFile(const void* ptr) const { + gc::Heap* const heap = Runtime::Current()->GetHeap(); if (!compile_app_image_) { - DCHECK(boot_image_space_ == nullptr); + DCHECK(heap->GetBootImageSpaces().empty()); return false; } - const ImageHeader& image_header = boot_image_space_->GetImageHeader(); - return image_header.GetOatFileBegin() <= ptr && ptr < image_header.GetOatFileEnd(); + for (gc::space::ImageSpace* boot_image_space : heap->GetBootImageSpaces()) { + const ImageHeader& image_header = boot_image_space->GetImageHeader(); + if (image_header.GetOatFileBegin() <= ptr && ptr < image_header.GetOatFileEnd()) { + return true; + } + } + return false; } static void CheckNoDexObjectsCallback(Object* obj, void* arg ATTRIBUTE_UNUSED) @@ -109,14 +121,6 @@ static void CheckNoDexObjects() { bool ImageWriter::PrepareImageAddressSpace() { target_ptr_size_ = InstructionSetPointerSize(compiler_driver_.GetInstructionSet()); gc::Heap* const heap = Runtime::Current()->GetHeap(); - // Cache boot image space. - for (gc::space::ContinuousSpace* space : heap->GetContinuousSpaces()) { - if (space->IsImageSpace()) { - CHECK(compile_app_image_); - CHECK(boot_image_space_ == nullptr) << "Multiple image spaces"; - boot_image_space_ = space->AsImageSpace(); - } - } { ScopedObjectAccess soa(Thread::Current()); PruneNonImageClasses(); // Remove junk @@ -205,9 +209,6 @@ bool ImageWriter::Write(int image_fd, oat_header.GetQuickResolutionTrampolineOffset(); image_info.oat_address_offsets_[kOatAddressQuickToInterpreterBridge] = oat_header.GetQuickToInterpreterBridgeOffset(); - } else { - // Other oat files use the primary trampolines. - // TODO: Dummy values to protect usage? b/26317072 } @@ -635,11 +636,11 @@ ImageWriter::BinSlot ImageWriter::GetImageBinSlot(mirror::Object* object) const bool ImageWriter::AllocMemory() { for (const char* oat_filename : oat_filenames_) { ImageInfo& image_info = GetImageInfo(oat_filename); - const size_t length = RoundUp(image_objects_offset_begin_ + - GetBinSizeSum(image_info) + - intern_table_bytes_ + - class_table_bytes_, - kPageSize); + ImageSection unused_sections[ImageHeader::kSectionCount]; + const size_t length = RoundUp( + image_info.CreateImageSections(target_ptr_size_, unused_sections), + kPageSize); + std::string error_msg; image_info.image_.reset(MemMap::MapAnonymous("image writer image", nullptr, @@ -909,14 +910,17 @@ void ImageWriter::CalculateObjectBinSlots(Object* obj) { DCHECK(obj != nullptr); // if it is a string, we want to intern it if its not interned. if (obj->GetClass()->IsStringClass()) { + const char* oat_filename = GetOatFilename(obj); + ImageInfo& image_info = GetImageInfo(oat_filename); + // we must be an interned string that was forward referenced and already assigned if (IsImageBinSlotAssigned(obj)) { - DCHECK_EQ(obj, obj->AsString()->Intern()); + DCHECK_EQ(obj, image_info.intern_table_->InternStrongImageString(obj->AsString())); return; } // InternImageString allows us to intern while holding the heap bitmap lock. This is safe since // we are guaranteed to not have GC during image writing. - mirror::String* const interned = Runtime::Current()->GetInternTable()->InternStrongImageString( + mirror::String* const interned = image_info.intern_table_->InternStrongImageString( obj->AsString()); if (obj != interned) { if (!IsImageBinSlotAssigned(interned)) { @@ -1067,6 +1071,13 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) { }; const char* oat_file = GetOatFilenameForDexCache(dex_cache); ImageInfo& image_info = GetImageInfo(oat_file); + { + // Note: This table is only accessed from the image writer, so the lock is technically + // unnecessary. + WriterMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); + // Insert in the class table for this iamge. + image_info.class_table_->Insert(as_klass); + } for (LengthPrefixedArray<ArtField>* cur_fields : fields) { // Total array length including header. if (cur_fields != nullptr) { @@ -1249,6 +1260,18 @@ void ImageWriter::CalculateNewObjectOffsets() { // Calculate size of the dex cache arrays slot and prepare offsets. PrepareDexCacheArraySlots(); + // Calculate the sizes of the intern tables and class tables. + for (const char* oat_filename : oat_filenames_) { + ImageInfo& image_info = GetImageInfo(oat_filename); + // Calculate how big the intern table will be after being serialized. + InternTable* const intern_table = image_info.intern_table_.get(); + CHECK_EQ(intern_table->WeakSize(), 0u) << " should have strong interned all the strings"; + image_info.intern_table_bytes_ = intern_table->WriteToMemory(nullptr); + // Calculate the size of the class table. + ReaderMutexLock mu(self, *Locks::classlinker_classes_lock_); + image_info.class_table_bytes_ += image_info.class_table_->WriteToMemory(nullptr); + } + // Calculate bin slot offsets. for (const char* oat_filename : oat_filenames_) { ImageInfo& image_info = GetImageInfo(oat_filename); @@ -1275,18 +1298,11 @@ void ImageWriter::CalculateNewObjectOffsets() { ImageInfo& image_info = GetImageInfo(oat_filename); image_info.image_begin_ = global_image_begin_ + image_offset; image_info.image_offset_ = image_offset; - size_t native_sections_size = image_info.bin_slot_sizes_[kBinArtField] + - image_info.bin_slot_sizes_[kBinArtMethodDirty] + - image_info.bin_slot_sizes_[kBinArtMethodClean] + - image_info.bin_slot_sizes_[kBinDexCacheArray] + - intern_table_bytes_ + - class_table_bytes_; - size_t image_objects = RoundUp(image_info.image_end_, kPageSize); - size_t bitmap_size = - RoundUp(gc::accounting::ContinuousSpaceBitmap::ComputeBitmapSize(image_objects), kPageSize); - size_t heap_size = gc::accounting::ContinuousSpaceBitmap::ComputeHeapSize(bitmap_size); - size_t max = std::max(heap_size, image_info.image_end_ + native_sections_size + bitmap_size); - image_info.image_size_ = RoundUp(max, kPageSize); + ImageSection unused_sections[ImageHeader::kSectionCount]; + image_info.image_size_ = RoundUp( + image_info.CreateImageSections(target_ptr_size_, unused_sections), + kPageSize); + // There should be no gaps until the next image. image_offset += image_info.image_size_; } @@ -1310,89 +1326,69 @@ void ImageWriter::CalculateNewObjectOffsets() { relocation.offset += image_info.bin_slot_offsets_[bin_type]; } - /* TODO: Reenable the intern table and class table. b/26317072 - // Calculate how big the intern table will be after being serialized. - InternTable* const intern_table = runtime->GetInternTable(); - CHECK_EQ(intern_table->WeakSize(), 0u) << " should have strong interned all the strings"; - intern_table_bytes_ = intern_table->WriteToMemory(nullptr); - - // Write out the class table. - ClassLinker* class_linker = runtime->GetClassLinker(); - if (boot_image_space_ == nullptr) { - // Compiling the boot image, add null class loader. - class_loaders_.insert(nullptr); - } - // class_loaders_ usually will not be empty, but may be empty if we attempt to create an image - // with no classes. - if (class_loaders_.size() == 1u) { - // Only write the class table if we have exactly one class loader. There may be cases where - // there are multiple class loaders if a class path is passed to dex2oat. - ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); - for (mirror::ClassLoader* loader : class_loaders_) { - ClassTable* table = class_linker->ClassTableForClassLoader(loader); - CHECK(table != nullptr); - class_table_bytes_ += table->WriteToMemory(nullptr); - } - } - */ - // Note that image_info.image_end_ is left at end of used mirror object section. } -void ImageWriter::CreateHeader(size_t oat_loaded_size, size_t oat_data_offset) { - CHECK_NE(0U, oat_loaded_size); - const char* oat_filename = oat_file_->GetLocation().c_str(); - ImageInfo& image_info = GetImageInfo(oat_filename); - const uint8_t* oat_file_begin = GetOatFileBegin(oat_filename); - const uint8_t* oat_file_end = oat_file_begin + oat_loaded_size; - image_info.oat_data_begin_ = const_cast<uint8_t*>(oat_file_begin) + oat_data_offset; - const uint8_t* oat_data_end = image_info.oat_data_begin_ + oat_file_->Size(); - image_info.oat_size_ = oat_file_->Size(); - - // Create the image sections. - ImageSection sections[ImageHeader::kSectionCount]; +size_t ImageWriter::ImageInfo::CreateImageSections(size_t target_ptr_size, + ImageSection* out_sections) const { + DCHECK(out_sections != nullptr); // Objects section - auto* objects_section = §ions[ImageHeader::kSectionObjects]; - *objects_section = ImageSection(0u, image_info.image_end_); + auto* objects_section = &out_sections[ImageHeader::kSectionObjects]; + *objects_section = ImageSection(0u, image_end_); size_t cur_pos = objects_section->End(); // Add field section. - auto* field_section = §ions[ImageHeader::kSectionArtFields]; - *field_section = ImageSection(cur_pos, image_info.bin_slot_sizes_[kBinArtField]); - CHECK_EQ(image_info.bin_slot_offsets_[kBinArtField], field_section->Offset()); + auto* field_section = &out_sections[ImageHeader::kSectionArtFields]; + *field_section = ImageSection(cur_pos, bin_slot_sizes_[kBinArtField]); + CHECK_EQ(bin_slot_offsets_[kBinArtField], field_section->Offset()); cur_pos = field_section->End(); // Round up to the alignment the required by the method section. - cur_pos = RoundUp(cur_pos, ArtMethod::Alignment(target_ptr_size_)); + cur_pos = RoundUp(cur_pos, ArtMethod::Alignment(target_ptr_size)); // Add method section. - auto* methods_section = §ions[ImageHeader::kSectionArtMethods]; + auto* methods_section = &out_sections[ImageHeader::kSectionArtMethods]; *methods_section = ImageSection(cur_pos, - image_info.bin_slot_sizes_[kBinArtMethodClean] + - image_info.bin_slot_sizes_[kBinArtMethodDirty]); - CHECK_EQ(image_info.bin_slot_offsets_[kBinArtMethodClean], methods_section->Offset()); + bin_slot_sizes_[kBinArtMethodClean] + + bin_slot_sizes_[kBinArtMethodDirty]); + CHECK_EQ(bin_slot_offsets_[kBinArtMethodClean], methods_section->Offset()); cur_pos = methods_section->End(); // Add dex cache arrays section. - auto* dex_cache_arrays_section = §ions[ImageHeader::kSectionDexCacheArrays]; - *dex_cache_arrays_section = ImageSection(cur_pos, image_info.bin_slot_sizes_[kBinDexCacheArray]); - CHECK_EQ(image_info.bin_slot_offsets_[kBinDexCacheArray], dex_cache_arrays_section->Offset()); + auto* dex_cache_arrays_section = &out_sections[ImageHeader::kSectionDexCacheArrays]; + *dex_cache_arrays_section = ImageSection(cur_pos, bin_slot_sizes_[kBinDexCacheArray]); + CHECK_EQ(bin_slot_offsets_[kBinDexCacheArray], dex_cache_arrays_section->Offset()); cur_pos = dex_cache_arrays_section->End(); // Round up to the alignment the string table expects. See HashSet::WriteToMemory. cur_pos = RoundUp(cur_pos, sizeof(uint64_t)); // Calculate the size of the interned strings. - auto* interned_strings_section = §ions[ImageHeader::kSectionInternedStrings]; + auto* interned_strings_section = &out_sections[ImageHeader::kSectionInternedStrings]; *interned_strings_section = ImageSection(cur_pos, intern_table_bytes_); cur_pos = interned_strings_section->End(); // Round up to the alignment the class table expects. See HashSet::WriteToMemory. cur_pos = RoundUp(cur_pos, sizeof(uint64_t)); // Calculate the size of the class table section. - auto* class_table_section = §ions[ImageHeader::kSectionClassTable]; + auto* class_table_section = &out_sections[ImageHeader::kSectionClassTable]; *class_table_section = ImageSection(cur_pos, class_table_bytes_); cur_pos = class_table_section->End(); // Image end goes right before the start of the image bitmap. - const size_t image_end = static_cast<uint32_t>(cur_pos); + return cur_pos; +} + +void ImageWriter::CreateHeader(size_t oat_loaded_size, size_t oat_data_offset) { + CHECK_NE(0U, oat_loaded_size); + const char* oat_filename = oat_file_->GetLocation().c_str(); + ImageInfo& image_info = GetImageInfo(oat_filename); + const uint8_t* oat_file_begin = GetOatFileBegin(oat_filename); + const uint8_t* oat_file_end = oat_file_begin + oat_loaded_size; + image_info.oat_data_begin_ = const_cast<uint8_t*>(oat_file_begin) + oat_data_offset; + const uint8_t* oat_data_end = image_info.oat_data_begin_ + oat_file_->Size(); + image_info.oat_size_ = oat_file_->Size(); + + // Create the image sections. + ImageSection sections[ImageHeader::kSectionCount]; + const size_t image_end = image_info.CreateImageSections(target_ptr_size_, sections); + // Finally bitmap section. const size_t bitmap_bytes = image_info.image_bitmap_->Size(); auto* bitmap_section = §ions[ImageHeader::kSectionImageBitmap]; - *bitmap_section = ImageSection(RoundUp(cur_pos, kPageSize), RoundUp(bitmap_bytes, kPageSize)); - cur_pos = bitmap_section->End(); + *bitmap_section = ImageSection(RoundUp(image_end, kPageSize), RoundUp(bitmap_bytes, kPageSize)); if (VLOG_IS_ON(compiler)) { LOG(INFO) << "Creating header for " << oat_filename; size_t idx = 0; @@ -1444,7 +1440,7 @@ class FixupRootVisitor : public RootVisitor { void VisitRoots(mirror::Object*** roots, size_t count, const RootInfo& info ATTRIBUTE_UNUSED) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { for (size_t i = 0; i < count; ++i) { - *roots[i] = ImageAddress(*roots[i]); + *roots[i] = image_writer_->GetImageAddress(*roots[i]); } } @@ -1452,19 +1448,12 @@ class FixupRootVisitor : public RootVisitor { const RootInfo& info ATTRIBUTE_UNUSED) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { for (size_t i = 0; i < count; ++i) { - roots[i]->Assign(ImageAddress(roots[i]->AsMirrorPtr())); + roots[i]->Assign(image_writer_->GetImageAddress(roots[i]->AsMirrorPtr())); } } private: ImageWriter* const image_writer_; - - mirror::Object* ImageAddress(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) { - const size_t offset = image_writer_->GetImageOffset(obj); - auto* const dest = reinterpret_cast<Object*>(image_writer_->global_image_begin_ + offset); - VLOG(compiler) << "Update root from " << obj << " to " << dest; - return dest; - } }; void ImageWriter::CopyAndFixupNativeData() { @@ -1536,54 +1525,48 @@ void ImageWriter::CopyAndFixupNativeData() { } FixupRootVisitor root_visitor(this); - /* TODO: Reenable the intern table and class table // Write the intern table into the image. - const ImageSection& intern_table_section = image_header->GetImageSection( - ImageHeader::kSectionInternedStrings); - Runtime* const runtime = Runtime::Current(); - InternTable* const intern_table = runtime->GetInternTable(); - uint8_t* const intern_table_memory_ptr = - image_info.image_->Begin() + intern_table_section.Offset(); - const size_t intern_table_bytes = intern_table->WriteToMemory(intern_table_memory_ptr); - CHECK_EQ(intern_table_bytes, intern_table_bytes_); - // Fixup the pointers in the newly written intern table to contain image addresses. - InternTable temp_intern_table; - // Note that we require that ReadFromMemory does not make an internal copy of the elements so that - // the VisitRoots() will update the memory directly rather than the copies. - // This also relies on visit roots not doing any verification which could fail after we update - // the roots to be the image addresses. - temp_intern_table.ReadFromMemory(intern_table_memory_ptr); - CHECK_EQ(temp_intern_table.Size(), intern_table->Size()); - temp_intern_table.VisitRoots(&root_visitor, kVisitRootFlagAllRoots); - + if (image_info.intern_table_bytes_ > 0) { + const ImageSection& intern_table_section = image_header->GetImageSection( + ImageHeader::kSectionInternedStrings); + InternTable* const intern_table = image_info.intern_table_.get(); + uint8_t* const intern_table_memory_ptr = + image_info.image_->Begin() + intern_table_section.Offset(); + const size_t intern_table_bytes = intern_table->WriteToMemory(intern_table_memory_ptr); + CHECK_EQ(intern_table_bytes, image_info.intern_table_bytes_); + // Fixup the pointers in the newly written intern table to contain image addresses. + InternTable temp_intern_table; + // Note that we require that ReadFromMemory does not make an internal copy of the elements so that + // the VisitRoots() will update the memory directly rather than the copies. + // This also relies on visit roots not doing any verification which could fail after we update + // the roots to be the image addresses. + temp_intern_table.AddTableFromMemory(intern_table_memory_ptr); + CHECK_EQ(temp_intern_table.Size(), intern_table->Size()); + temp_intern_table.VisitRoots(&root_visitor, kVisitRootFlagAllRoots); + } // Write the class table(s) into the image. class_table_bytes_ may be 0 if there are multiple // class loaders. Writing multiple class tables into the image is currently unsupported. - if (class_table_bytes_ > 0u) { - ClassLinker* const class_linker = Runtime::Current()->GetClassLinker(); + if (image_info.class_table_bytes_ > 0u) { const ImageSection& class_table_section = image_header->GetImageSection( ImageHeader::kSectionClassTable); uint8_t* const class_table_memory_ptr = image_info.image_->Begin() + class_table_section.Offset(); ReaderMutexLock mu(Thread::Current(), *Locks::classlinker_classes_lock_); - size_t class_table_bytes = 0; - for (mirror::ClassLoader* loader : class_loaders_) { - ClassTable* table = class_linker->ClassTableForClassLoader(loader); - CHECK(table != nullptr); - uint8_t* memory_ptr = class_table_memory_ptr + class_table_bytes; - class_table_bytes += table->WriteToMemory(memory_ptr); - // Fixup the pointers in the newly written class table to contain image addresses. See - // above comment for intern tables. - ClassTable temp_class_table; - temp_class_table.ReadFromMemory(memory_ptr); - CHECK_EQ(temp_class_table.NumZygoteClasses(), table->NumNonZygoteClasses() + - table->NumZygoteClasses()); - BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(&root_visitor, - RootInfo(kRootUnknown)); - temp_class_table.VisitRoots(buffered_visitor); - } - CHECK_EQ(class_table_bytes, class_table_bytes_); + + ClassTable* table = image_info.class_table_.get(); + CHECK(table != nullptr); + const size_t class_table_bytes = table->WriteToMemory(class_table_memory_ptr); + CHECK_EQ(class_table_bytes, image_info.class_table_bytes_); + // Fixup the pointers in the newly written class table to contain image addresses. See + // above comment for intern tables. + ClassTable temp_class_table; + temp_class_table.ReadFromMemory(class_table_memory_ptr); + CHECK_EQ(temp_class_table.NumZygoteClasses(), table->NumNonZygoteClasses() + + table->NumZygoteClasses()); + BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(&root_visitor, + RootInfo(kRootUnknown)); + temp_class_table.VisitRoots(buffered_visitor); } - */ } void ImageWriter::CopyAndFixupObjects() { @@ -1991,7 +1974,7 @@ void ImageWriter::CopyAndFixupMethod(ArtMethod* orig, copy->SetDeclaringClass(GetImageAddress(orig->GetDeclaringClassUnchecked())); const char* oat_filename; - if (orig->IsRuntimeMethod()) { + if (orig->IsRuntimeMethod() || compile_app_image_) { oat_filename = default_oat_filename_; } else { auto it = dex_file_oat_filename_map_.find(orig->GetDexFile()); @@ -2110,7 +2093,6 @@ uint32_t ImageWriter::BinSlot::GetIndex() const { } uint8_t* ImageWriter::GetOatFileBegin(const char* oat_filename) const { - // DCHECK_GT(intern_table_bytes_, 0u); TODO: Reenable intern table and class table. uintptr_t last_image_end = 0; for (const char* oat_fn : oat_filenames_) { const ImageInfo& image_info = GetConstImageInfo(oat_fn); @@ -2197,4 +2179,37 @@ void ImageWriter::UpdateOatFile(const char* oat_filename) { } } +ImageWriter::ImageWriter( + const CompilerDriver& compiler_driver, + uintptr_t image_begin, + bool compile_pic, + bool compile_app_image, + ImageHeader::StorageMode image_storage_mode, + const std::vector<const char*> oat_filenames, + const std::unordered_map<const DexFile*, const char*>& dex_file_oat_filename_map) + : compiler_driver_(compiler_driver), + global_image_begin_(reinterpret_cast<uint8_t*>(image_begin)), + image_objects_offset_begin_(0), + oat_file_(nullptr), + compile_pic_(compile_pic), + compile_app_image_(compile_app_image), + target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())), + image_method_array_(ImageHeader::kImageMethodsCount), + dirty_methods_(0u), + clean_methods_(0u), + image_storage_mode_(image_storage_mode), + dex_file_oat_filename_map_(dex_file_oat_filename_map), + oat_filenames_(oat_filenames), + default_oat_filename_(oat_filenames[0]) { + CHECK_NE(image_begin, 0U); + for (const char* oat_filename : oat_filenames) { + image_info_map_.emplace(oat_filename, ImageInfo()); + } + std::fill_n(image_methods_, arraysize(image_methods_), nullptr); +} + +ImageWriter::ImageInfo::ImageInfo() + : intern_table_(new InternTable), + class_table_(new ClassTable) {} + } // namespace art diff --git a/compiler/image_writer.h b/compiler/image_writer.h index 78297ae645..ad690389e9 100644 --- a/compiler/image_writer.h +++ b/compiler/image_writer.h @@ -47,6 +47,8 @@ class ImageSpace; } // namespace space } // namespace gc +class ClassTable; + static constexpr int kInvalidImageFd = -1; // Write a Space built during compilation for use during execution. @@ -58,33 +60,7 @@ class ImageWriter FINAL { bool compile_app_image, ImageHeader::StorageMode image_storage_mode, const std::vector<const char*> oat_filenames, - const std::unordered_map<const DexFile*, const char*>& dex_file_oat_filename_map) - : compiler_driver_(compiler_driver), - global_image_begin_(reinterpret_cast<uint8_t*>(image_begin)), - image_objects_offset_begin_(0), - oat_file_(nullptr), - compile_pic_(compile_pic), - compile_app_image_(compile_app_image), - boot_image_space_(nullptr), - target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())), - intern_table_bytes_(0u), - image_method_array_(ImageHeader::kImageMethodsCount), - dirty_methods_(0u), - clean_methods_(0u), - class_table_bytes_(0u), - image_storage_mode_(image_storage_mode), - dex_file_oat_filename_map_(dex_file_oat_filename_map), - oat_filenames_(oat_filenames), - default_oat_filename_(oat_filenames[0]) { - CHECK_NE(image_begin, 0U); - for (const char* oat_filename : oat_filenames) { - image_info_map_.emplace(oat_filename, ImageInfo()); - } - std::fill_n(image_methods_, arraysize(image_methods_), nullptr); - } - - ~ImageWriter() { - } + const std::unordered_map<const DexFile*, const char*>& dex_file_oat_filename_map); bool PrepareImageAddressSpace(); @@ -237,41 +213,40 @@ class ImageWriter FINAL { }; struct ImageInfo { - explicit ImageInfo() - : image_begin_(nullptr), - image_end_(RoundUp(sizeof(ImageHeader), kObjectAlignment)), - image_roots_address_(0), - image_offset_(0), - image_size_(0), - oat_offset_(0), - bin_slot_sizes_(), - bin_slot_offsets_(), - bin_slot_count_() {} + ImageInfo(); + ImageInfo(ImageInfo&&) = default; + + // Create the image sections into the out sections variable, returns the size of the image + // excluding the bitmap. + size_t CreateImageSections(size_t target_ptr_size, ImageSection* out_sections) const; std::unique_ptr<MemMap> image_; // Memory mapped for generating the image. // Target begin of this image. Notes: It is not valid to write here, this is the address // of the target image, not necessarily where image_ is mapped. The address is only valid // after layouting (otherwise null). - uint8_t* image_begin_; + uint8_t* image_begin_ = nullptr; - size_t image_end_; // Offset to the free space in image_, initially size of image header. - uint32_t image_roots_address_; // The image roots address in the image. - size_t image_offset_; // Offset of this image from the start of the first image. + // Offset to the free space in image_, initially size of image header. + size_t image_end_ = RoundUp(sizeof(ImageHeader), kObjectAlignment); + uint32_t image_roots_address_ = 0; // The image roots address in the image. + size_t image_offset_ = 0; // Offset of this image from the start of the first image. // Image size is the *address space* covered by this image. As the live bitmap is aligned // to the page size, the live bitmap will cover more address space than necessary. But live // bitmaps may not overlap, so an image has a "shadow," which is accounted for in the size. // The next image may only start at image_begin_ + image_size_ (which is guaranteed to be // page-aligned). - size_t image_size_; + size_t image_size_ = 0; // Oat data. - size_t oat_offset_; // Offset of the oat file for this image from start of oat files. This is - // valid when the previous oat file has been written. - uint8_t* oat_data_begin_; // Start of oatdata in the corresponding oat file. This is - // valid when the images have been layed out. - size_t oat_size_; // Size of the corresponding oat data. + // Offset of the oat file for this image from start of oat files. This is + // valid when the previous oat file has been written. + size_t oat_offset_ = 0; + // Start of oatdata in the corresponding oat file. This is + // valid when the images have been layed out. + uint8_t* oat_data_begin_ = nullptr; + size_t oat_size_ = 0; // Size of the corresponding oat data. // Image bitmap which lets us know where the objects inside of the image reside. std::unique_ptr<gc::accounting::ContinuousSpaceBitmap> image_bitmap_; @@ -280,12 +255,24 @@ class ImageWriter FINAL { SafeMap<const DexFile*, size_t> dex_cache_array_starts_; // Offset from oat_data_begin_ to the stubs. - uint32_t oat_address_offsets_[kOatAddressCount]; + uint32_t oat_address_offsets_[kOatAddressCount] = {}; // Bin slot tracking for dirty object packing. - size_t bin_slot_sizes_[kBinSize]; // Number of bytes in a bin. - size_t bin_slot_offsets_[kBinSize]; // Number of bytes in previous bins. - size_t bin_slot_count_[kBinSize]; // Number of objects in a bin. + size_t bin_slot_sizes_[kBinSize] = {}; // Number of bytes in a bin. + size_t bin_slot_offsets_[kBinSize] = {}; // Number of bytes in previous bins. + size_t bin_slot_count_[kBinSize] = {}; // Number of objects in a bin. + + // Cached size of the intern table for when we allocate memory. + size_t intern_table_bytes_ = 0; + + // Number of image class table bytes. + size_t class_table_bytes_ = 0; + + // Intern table associated with this image for serialization. + std::unique_ptr<InternTable> intern_table_; + + // Class table associated with this image for serialization. + std::unique_ptr<ClassTable> class_table_; }; // We use the lock word to store the offset of the object in the image. @@ -483,18 +470,12 @@ class ImageWriter FINAL { const bool compile_pic_; const bool compile_app_image_; - // Cache the boot image space in this class for faster lookups. - gc::space::ImageSpace* boot_image_space_; - // Size of pointers on the target architecture. size_t target_ptr_size_; // Mapping of oat filename to image data. std::unordered_map<std::string, ImageInfo> image_info_map_; - // Cached size of the intern table for when we allocate memory. - size_t intern_table_bytes_; - // ArtField, ArtMethod relocating map. These are allocated as array of structs but we want to // have one entry per art field for convenience. ArtFields are placed right after the end of the // image objects (aka sum of bin_slot_sizes_). ArtMethods are placed right after the ArtFields. @@ -528,9 +509,6 @@ class ImageWriter FINAL { // null is a valid entry. std::unordered_set<mirror::ClassLoader*> class_loaders_; - // Number of image class table bytes. - size_t class_table_bytes_; - // Which mode the image is stored as, see image.h const ImageHeader::StorageMode image_storage_mode_; diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc index b323d24038..85216b7610 100644 --- a/compiler/jit/jit_compiler.cc +++ b/compiler/jit/jit_compiler.cc @@ -155,8 +155,8 @@ JitCompiler::JitCompiler() : total_time_(0) { /* dump_cfg_append */ false, cumulative_logger_.get(), /* swap_fd */ -1, - /* profile_file */ "", - /* dex to oat map */ nullptr)); + /* dex to oat map */ nullptr, + /* profile_compilation_info */ nullptr)); // Disable dedupe so we can remove compiled methods. compiler_driver_->SetDedupeEnabled(false); compiler_driver_->SetSupportBootImageFixup(false); diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h index 877a674674..b10cc3534c 100644 --- a/compiler/linker/relative_patcher_test.h +++ b/compiler/linker/relative_patcher_test.h @@ -47,7 +47,7 @@ class RelativePatcherTest : public testing::Test { driver_(&compiler_options_, &verification_results_, &inliner_map_, Compiler::kQuick, instruction_set, nullptr, false, nullptr, nullptr, nullptr, 1u, - false, false, "", false, nullptr, -1, "", nullptr), + false, false, "", false, nullptr, -1, nullptr, nullptr), error_msg_(), instruction_set_(instruction_set), features_(InstructionSetFeatures::FromVariant(instruction_set, variant, &error_msg_)), diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc index 58f46d69a2..9f7ffa5ace 100644 --- a/compiler/oat_test.cc +++ b/compiler/oat_test.cc @@ -121,7 +121,7 @@ class OatTest : public CommonCompilerTest { false, timer_.get(), -1, - "", + nullptr, nullptr)); } diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc index 4dd0d26b89..1af684683b 100644 --- a/compiler/optimizing/builder.cc +++ b/compiler/optimizing/builder.cc @@ -1817,7 +1817,12 @@ void HGraphBuilder::BuildTypeCheck(const Instruction& instruction, UpdateLocal(destination, current_block_->GetLastInstruction(), dex_pc); } else { DCHECK_EQ(instruction.Opcode(), Instruction::CHECK_CAST); + // We emit a CheckCast followed by a BoundType. CheckCast is a statement + // which may throw. If it succeeds BoundType sets the new type of `object` + // for all subsequent uses. current_block_->AddInstruction(new (arena_) HCheckCast(object, cls, check_kind, dex_pc)); + current_block_->AddInstruction(new (arena_) HBoundType(object, dex_pc)); + UpdateLocal(reference, current_block_->GetLastInstruction(), dex_pc); } } diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 35c2c43fca..d1a0b10485 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -417,6 +417,56 @@ class ArraySetSlowPathARM : public SlowPathCode { DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARM); }; +// Slow path marking an object during a read barrier. +class ReadBarrierMarkSlowPathARM : public SlowPathCode { + public: + ReadBarrierMarkSlowPathARM(HInstruction* instruction, Location out, Location obj) + : instruction_(instruction), out_(out), obj_(obj) { + DCHECK(kEmitCompilerReadBarrier); + } + + const char* GetDescription() const OVERRIDE { return "ReadBarrierMarkSlowPathARM"; } + + void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { + LocationSummary* locations = instruction_->GetLocations(); + Register reg_out = out_.AsRegister<Register>(); + DCHECK(locations->CanCall()); + DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out)); + DCHECK(instruction_->IsInstanceFieldGet() || + instruction_->IsStaticFieldGet() || + instruction_->IsArrayGet() || + instruction_->IsLoadClass() || + instruction_->IsLoadString() || + instruction_->IsInstanceOf() || + instruction_->IsCheckCast()) + << "Unexpected instruction in read barrier marking slow path: " + << instruction_->DebugName(); + + __ Bind(GetEntryLabel()); + SaveLiveRegisters(codegen, locations); + + InvokeRuntimeCallingConvention calling_convention; + CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen); + arm_codegen->Move32(Location::RegisterLocation(calling_convention.GetRegisterAt(0)), obj_); + arm_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pReadBarrierMark), + instruction_, + instruction_->GetDexPc(), + this); + CheckEntrypointTypes<kQuickReadBarrierMark, mirror::Object*, mirror::Object*>(); + arm_codegen->Move32(out_, Location::RegisterLocation(R0)); + + RestoreLiveRegisters(codegen, locations); + __ b(GetExitLabel()); + } + + private: + HInstruction* const instruction_; + const Location out_; + const Location obj_; + + DISALLOW_COPY_AND_ASSIGN(ReadBarrierMarkSlowPathARM); +}; + // Slow path generating a read barrier for a heap reference. class ReadBarrierForHeapReferenceSlowPathARM : public SlowPathCode { public: @@ -438,7 +488,7 @@ class ReadBarrierForHeapReferenceSlowPathARM : public SlowPathCode { // to be instrumented, e.g.: // // __ LoadFromOffset(kLoadWord, out, out, offset); - // codegen_->GenerateReadBarrier(instruction, out_loc, out_loc, out_loc, offset); + // codegen_->GenerateReadBarrierSlow(instruction, out_loc, out_loc, out_loc, offset); // // In that case, we have lost the information about the original // object, and the emitted read barrier cannot work properly. @@ -454,7 +504,9 @@ class ReadBarrierForHeapReferenceSlowPathARM : public SlowPathCode { DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out)); DCHECK(!instruction_->IsInvoke() || (instruction_->IsInvokeStaticOrDirect() && - instruction_->GetLocations()->Intrinsified())); + instruction_->GetLocations()->Intrinsified())) + << "Unexpected instruction in read barrier for heap reference slow path: " + << instruction_->DebugName(); __ Bind(GetEntryLabel()); SaveLiveRegisters(codegen, locations); @@ -596,14 +648,18 @@ class ReadBarrierForHeapReferenceSlowPathARM : public SlowPathCode { class ReadBarrierForRootSlowPathARM : public SlowPathCode { public: ReadBarrierForRootSlowPathARM(HInstruction* instruction, Location out, Location root) - : instruction_(instruction), out_(out), root_(root) {} + : instruction_(instruction), out_(out), root_(root) { + DCHECK(kEmitCompilerReadBarrier); + } void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); Register reg_out = out_.AsRegister<Register>(); DCHECK(locations->CanCall()); DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(reg_out)); - DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString()); + DCHECK(instruction_->IsLoadClass() || instruction_->IsLoadString()) + << "Unexpected instruction in read barrier for GC root slow path: " + << instruction_->DebugName(); __ Bind(GetEntryLabel()); SaveLiveRegisters(codegen, locations); @@ -1891,7 +1947,7 @@ void LocationsBuilderARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) { } void InstructionCodeGeneratorARM::VisitMemoryBarrier(HMemoryBarrier* memory_barrier) { - GenerateMemoryBarrier(memory_barrier->GetBarrierKind()); + codegen_->GenerateMemoryBarrier(memory_barrier->GetBarrierKind()); } void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) { @@ -3436,7 +3492,7 @@ void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) { Register first_reg = first.AsRegister<Register>(); if (second.IsRegister()) { Register second_reg = second.AsRegister<Register>(); - // Arm doesn't mask the shift count so we need to do it ourselves. + // ARM doesn't mask the shift count so we need to do it ourselves. __ and_(out_reg, second_reg, ShifterOperand(kMaxIntShiftValue)); if (op->IsShl()) { __ Lsl(out_reg, first_reg, out_reg); @@ -3448,7 +3504,7 @@ void InstructionCodeGeneratorARM::HandleShift(HBinaryOperation* op) { } else { int32_t cst = second.GetConstant()->AsIntConstant()->GetValue(); uint32_t shift_value = static_cast<uint32_t>(cst & kMaxIntShiftValue); - if (shift_value == 0) { // arm does not support shifting with 0 immediate. + if (shift_value == 0) { // ARM does not support shifting with 0 immediate. __ Mov(out_reg, first_reg); } else if (op->IsShl()) { __ Lsl(out_reg, first_reg, shift_value); @@ -3795,9 +3851,9 @@ void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction ATTRIBUTE_UNUSED) { LOG(FATAL) << "Unreachable"; } -void InstructionCodeGeneratorARM::GenerateMemoryBarrier(MemBarrierKind kind) { - // TODO (ported from quick): revisit Arm barrier kinds - DmbOptions flavor = DmbOptions::ISH; // quiet c++ warnings +void CodeGeneratorARM::GenerateMemoryBarrier(MemBarrierKind kind) { + // TODO (ported from quick): revisit ARM barrier kinds. + DmbOptions flavor = DmbOptions::ISH; // Quiet C++ warnings. switch (kind) { case MemBarrierKind::kAnyStore: case MemBarrierKind::kLoadAny: @@ -3878,11 +3934,11 @@ void LocationsBuilderARM::HandleFieldSet(HInstruction* instruction, const FieldI locations->AddTemp(Location::RequiresRegister()); // Possibly used for reference poisoning too. locations->AddTemp(Location::RequiresRegister()); } else if (generate_volatile) { - // Arm encoding have some additional constraints for ldrexd/strexd: + // ARM encoding have some additional constraints for ldrexd/strexd: // - registers need to be consecutive // - the first register should be even but not R14. - // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever - // enable Arm encoding. + // We don't test for ARM yet, and the assertion makes sure that we + // revisit this if we ever enable ARM encoding. DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet()); locations->AddTemp(Location::RequiresRegister()); @@ -3912,7 +3968,7 @@ void InstructionCodeGeneratorARM::HandleFieldSet(HInstruction* instruction, CodeGenerator::StoreNeedsWriteBarrier(field_type, instruction->InputAt(1)); if (is_volatile) { - GenerateMemoryBarrier(MemBarrierKind::kAnyStore); + codegen_->GenerateMemoryBarrier(MemBarrierKind::kAnyStore); } switch (field_type) { @@ -4004,7 +4060,7 @@ void InstructionCodeGeneratorARM::HandleFieldSet(HInstruction* instruction, } if (is_volatile) { - GenerateMemoryBarrier(MemBarrierKind::kAnyAny); + codegen_->GenerateMemoryBarrier(MemBarrierKind::kAnyAny); } } @@ -4038,14 +4094,18 @@ void LocationsBuilderARM::HandleFieldGet(HInstruction* instruction, const FieldI (overlap ? Location::kOutputOverlap : Location::kNoOutputOverlap)); } if (volatile_for_double) { - // Arm encoding have some additional constraints for ldrexd/strexd: + // ARM encoding have some additional constraints for ldrexd/strexd: // - registers need to be consecutive // - the first register should be even but not R14. - // We don't test for Arm yet, and the assertion makes sure that we revisit this if we ever - // enable Arm encoding. + // We don't test for ARM yet, and the assertion makes sure that we + // revisit this if we ever enable ARM encoding. DCHECK_EQ(InstructionSet::kThumb2, codegen_->GetInstructionSet()); locations->AddTemp(Location::RequiresRegister()); locations->AddTemp(Location::RequiresRegister()); + } else if (object_field_get_with_read_barrier && kUseBakerReadBarrier) { + // We need a temporary register for the read barrier marking slow + // path in CodeGeneratorARM::GenerateFieldLoadWithBakerReadBarrier. + locations->AddTemp(Location::RequiresRegister()); } } @@ -4104,33 +4164,52 @@ void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction, uint32_t offset = field_info.GetFieldOffset().Uint32Value(); switch (field_type) { - case Primitive::kPrimBoolean: { + case Primitive::kPrimBoolean: __ LoadFromOffset(kLoadUnsignedByte, out.AsRegister<Register>(), base, offset); break; - } - case Primitive::kPrimByte: { + case Primitive::kPrimByte: __ LoadFromOffset(kLoadSignedByte, out.AsRegister<Register>(), base, offset); break; - } - case Primitive::kPrimShort: { + case Primitive::kPrimShort: __ LoadFromOffset(kLoadSignedHalfword, out.AsRegister<Register>(), base, offset); break; - } - case Primitive::kPrimChar: { + case Primitive::kPrimChar: __ LoadFromOffset(kLoadUnsignedHalfword, out.AsRegister<Register>(), base, offset); break; - } case Primitive::kPrimInt: - case Primitive::kPrimNot: { __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), base, offset); break; + + case Primitive::kPrimNot: { + // /* HeapReference<Object> */ out = *(base + offset) + if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + Location temp_loc = locations->GetTemp(0); + // Note that a potential implicit null check is handled in this + // CodeGeneratorARM::GenerateFieldLoadWithBakerReadBarrier call. + codegen_->GenerateFieldLoadWithBakerReadBarrier( + instruction, out, base, offset, temp_loc, /* needs_null_check */ true); + if (is_volatile) { + codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny); + } + } else { + __ LoadFromOffset(kLoadWord, out.AsRegister<Register>(), base, offset); + codegen_->MaybeRecordImplicitNullCheck(instruction); + if (is_volatile) { + codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny); + } + // If read barriers are enabled, emit read barriers other than + // Baker's using a slow path (and also unpoison the loaded + // reference, if heap poisoning is enabled). + codegen_->MaybeGenerateReadBarrierSlow(instruction, out, out, base_loc, offset); + } + break; } - case Primitive::kPrimLong: { + case Primitive::kPrimLong: if (is_volatile && !atomic_ldrd_strd) { GenerateWideAtomicLoad(base, offset, out.AsRegisterPairLow<Register>(), @@ -4139,12 +4218,10 @@ void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction, __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), base, offset); } break; - } - case Primitive::kPrimFloat: { + case Primitive::kPrimFloat: __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), base, offset); break; - } case Primitive::kPrimDouble: { DRegister out_reg = FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()); @@ -4166,17 +4243,20 @@ void InstructionCodeGeneratorARM::HandleFieldGet(HInstruction* instruction, UNREACHABLE(); } - // Doubles are handled in the switch. - if (field_type != Primitive::kPrimDouble) { + if (field_type == Primitive::kPrimNot || field_type == Primitive::kPrimDouble) { + // Potential implicit null checks, in the case of reference or + // double fields, are handled in the previous switch statement. + } else { codegen_->MaybeRecordImplicitNullCheck(instruction); } if (is_volatile) { - GenerateMemoryBarrier(MemBarrierKind::kLoadAny); - } - - if (field_type == Primitive::kPrimNot) { - codegen_->MaybeGenerateReadBarrier(instruction, out, out, base_loc, offset); + if (field_type == Primitive::kPrimNot) { + // Memory barriers, in the case of references, are also handled + // in the previous switch statement. + } else { + codegen_->GenerateMemoryBarrier(MemBarrierKind::kLoadAny); + } } } @@ -4339,6 +4419,11 @@ void LocationsBuilderARM::VisitArrayGet(HArrayGet* instruction) { Location::RequiresRegister(), object_array_get_with_read_barrier ? Location::kOutputOverlap : Location::kNoOutputOverlap); } + // We need a temporary register for the read barrier marking slow + // path in CodeGeneratorARM::GenerateArrayLoadWithBakerReadBarrier. + if (object_array_get_with_read_barrier && kUseBakerReadBarrier) { + locations->AddTemp(Location::RequiresRegister()); + } } void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) { @@ -4346,12 +4431,13 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) { Location obj_loc = locations->InAt(0); Register obj = obj_loc.AsRegister<Register>(); Location index = locations->InAt(1); - Primitive::Type type = instruction->GetType(); + Location out_loc = locations->Out(); + Primitive::Type type = instruction->GetType(); switch (type) { case Primitive::kPrimBoolean: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint8_t)).Uint32Value(); - Register out = locations->Out().AsRegister<Register>(); + Register out = out_loc.AsRegister<Register>(); if (index.IsConstant()) { size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset; @@ -4365,7 +4451,7 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) { case Primitive::kPrimByte: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(int8_t)).Uint32Value(); - Register out = locations->Out().AsRegister<Register>(); + Register out = out_loc.AsRegister<Register>(); if (index.IsConstant()) { size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_1) + data_offset; @@ -4379,7 +4465,7 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) { case Primitive::kPrimShort: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(int16_t)).Uint32Value(); - Register out = locations->Out().AsRegister<Register>(); + Register out = out_loc.AsRegister<Register>(); if (index.IsConstant()) { size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset; @@ -4393,7 +4479,7 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) { case Primitive::kPrimChar: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(uint16_t)).Uint32Value(); - Register out = locations->Out().AsRegister<Register>(); + Register out = out_loc.AsRegister<Register>(); if (index.IsConstant()) { size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_2) + data_offset; @@ -4405,13 +4491,9 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) { break; } - case Primitive::kPrimInt: - case Primitive::kPrimNot: { - static_assert( - sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t), - "art::mirror::HeapReference<mirror::Object> and int32_t have different sizes."); + case Primitive::kPrimInt: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value(); - Register out = locations->Out().AsRegister<Register>(); + Register out = out_loc.AsRegister<Register>(); if (index.IsConstant()) { size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset; @@ -4423,44 +4505,79 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) { break; } + case Primitive::kPrimNot: { + static_assert( + sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t), + "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes."); + uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value(); + // /* HeapReference<Object> */ out = + // *(obj + data_offset + index * sizeof(HeapReference<Object>)) + if (kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + Location temp = locations->GetTemp(0); + // Note that a potential implicit null check is handled in this + // CodeGeneratorARM::GenerateArrayLoadWithBakerReadBarrier call. + codegen_->GenerateArrayLoadWithBakerReadBarrier( + instruction, out_loc, obj, data_offset, index, temp, /* needs_null_check */ true); + } else { + Register out = out_loc.AsRegister<Register>(); + if (index.IsConstant()) { + size_t offset = + (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset; + __ LoadFromOffset(kLoadWord, out, obj, offset); + codegen_->MaybeRecordImplicitNullCheck(instruction); + // If read barriers are enabled, emit read barriers other than + // Baker's using a slow path (and also unpoison the loaded + // reference, if heap poisoning is enabled). + codegen_->MaybeGenerateReadBarrierSlow(instruction, out_loc, out_loc, obj_loc, offset); + } else { + __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4)); + __ LoadFromOffset(kLoadWord, out, IP, data_offset); + codegen_->MaybeRecordImplicitNullCheck(instruction); + // If read barriers are enabled, emit read barriers other than + // Baker's using a slow path (and also unpoison the loaded + // reference, if heap poisoning is enabled). + codegen_->MaybeGenerateReadBarrierSlow( + instruction, out_loc, out_loc, obj_loc, data_offset, index); + } + } + break; + } + case Primitive::kPrimLong: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(int64_t)).Uint32Value(); - Location out = locations->Out(); if (index.IsConstant()) { size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset; - __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), obj, offset); + __ LoadFromOffset(kLoadWordPair, out_loc.AsRegisterPairLow<Register>(), obj, offset); } else { __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8)); - __ LoadFromOffset(kLoadWordPair, out.AsRegisterPairLow<Register>(), IP, data_offset); + __ LoadFromOffset(kLoadWordPair, out_loc.AsRegisterPairLow<Register>(), IP, data_offset); } break; } case Primitive::kPrimFloat: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(float)).Uint32Value(); - Location out = locations->Out(); - DCHECK(out.IsFpuRegister()); + SRegister out = out_loc.AsFpuRegister<SRegister>(); if (index.IsConstant()) { size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset; - __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), obj, offset); + __ LoadSFromOffset(out, obj, offset); } else { __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4)); - __ LoadSFromOffset(out.AsFpuRegister<SRegister>(), IP, data_offset); + __ LoadSFromOffset(out, IP, data_offset); } break; } case Primitive::kPrimDouble: { uint32_t data_offset = mirror::Array::DataOffset(sizeof(double)).Uint32Value(); - Location out = locations->Out(); - DCHECK(out.IsFpuRegisterPair()); + SRegister out = out_loc.AsFpuRegisterPairLow<SRegister>(); if (index.IsConstant()) { size_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_8) + data_offset; - __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), obj, offset); + __ LoadDFromOffset(FromLowSToD(out), obj, offset); } else { __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_8)); - __ LoadDFromOffset(FromLowSToD(out.AsFpuRegisterPairLow<SRegister>()), IP, data_offset); + __ LoadDFromOffset(FromLowSToD(out), IP, data_offset); } break; } @@ -4469,20 +4586,12 @@ void InstructionCodeGeneratorARM::VisitArrayGet(HArrayGet* instruction) { LOG(FATAL) << "Unreachable type " << type; UNREACHABLE(); } - codegen_->MaybeRecordImplicitNullCheck(instruction); if (type == Primitive::kPrimNot) { - static_assert( - sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t), - "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes."); - uint32_t data_offset = mirror::Array::DataOffset(sizeof(int32_t)).Uint32Value(); - Location out = locations->Out(); - if (index.IsConstant()) { - uint32_t offset = (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + data_offset; - codegen_->MaybeGenerateReadBarrier(instruction, out, out, obj_loc, offset); - } else { - codegen_->MaybeGenerateReadBarrier(instruction, out, out, obj_loc, data_offset, index); - } + // Potential implicit null checks, in the case of reference + // arrays, are handled in the previous switch statement. + } else { + codegen_->MaybeRecordImplicitNullCheck(instruction); } } @@ -4573,6 +4682,7 @@ void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) { __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4)); __ StoreToOffset(kStoreWord, source, IP, data_offset); } + codegen_->MaybeRecordImplicitNullCheck(instruction); DCHECK(!needs_write_barrier); DCHECK(!may_need_runtime_call_for_type_check); break; @@ -4614,12 +4724,12 @@ void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) { // __ Mov(temp2, temp1); // // /* HeapReference<Class> */ temp1 = temp1->component_type_ // __ LoadFromOffset(kLoadWord, temp1, temp1, component_offset); - // codegen_->GenerateReadBarrier( + // codegen_->GenerateReadBarrierSlow( // instruction, temp1_loc, temp1_loc, temp2_loc, component_offset); // // // /* HeapReference<Class> */ temp2 = value->klass_ // __ LoadFromOffset(kLoadWord, temp2, value, class_offset); - // codegen_->GenerateReadBarrier( + // codegen_->GenerateReadBarrierSlow( // instruction, temp2_loc, temp2_loc, value_loc, class_offset, temp1_loc); // // __ cmp(temp1, ShifterOperand(temp2)); @@ -4716,8 +4826,6 @@ void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) { __ add(IP, array, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4)); __ StoreToOffset(kStoreWord, value, IP, data_offset); } - - codegen_->MaybeRecordImplicitNullCheck(instruction); break; } @@ -4769,8 +4877,8 @@ void InstructionCodeGeneratorARM::VisitArraySet(HArraySet* instruction) { UNREACHABLE(); } - // Ints and objects are handled in the switch. - if (value_type != Primitive::kPrimInt && value_type != Primitive::kPrimNot) { + // Objects are handled in the switch. + if (value_type != Primitive::kPrimNot) { codegen_->MaybeRecordImplicitNullCheck(instruction); } } @@ -5139,16 +5247,9 @@ void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) { if (cls->IsReferrersClass()) { DCHECK(!cls->CanCallRuntime()); DCHECK(!cls->MustGenerateClinitCheck()); - uint32_t declaring_class_offset = ArtMethod::DeclaringClassOffset().Int32Value(); - if (kEmitCompilerReadBarrier) { - // /* GcRoot<mirror::Class>* */ out = &(current_method->declaring_class_) - __ AddConstant(out, current_method, declaring_class_offset); - // /* mirror::Class* */ out = out->Read() - codegen_->GenerateReadBarrierForRoot(cls, out_loc, out_loc); - } else { - // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_ - __ LoadFromOffset(kLoadWord, out, current_method, declaring_class_offset); - } + // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_ + GenerateGcRootFieldLoad( + cls, out_loc, current_method, ArtMethod::DeclaringClassOffset().Int32Value()); } else { // /* GcRoot<mirror::Class>[] */ out = // current_method.ptr_sized_fields_->dex_cache_resolved_types_ @@ -5156,17 +5257,8 @@ void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) { out, current_method, ArtMethod::DexCacheResolvedTypesOffset(kArmPointerSize).Int32Value()); - - size_t cache_offset = CodeGenerator::GetCacheOffset(cls->GetTypeIndex()); - if (kEmitCompilerReadBarrier) { - // /* GcRoot<mirror::Class>* */ out = &out[type_index] - __ AddConstant(out, out, cache_offset); - // /* mirror::Class* */ out = out->Read() - codegen_->GenerateReadBarrierForRoot(cls, out_loc, out_loc); - } else { - // /* GcRoot<mirror::Class> */ out = out[type_index] - __ LoadFromOffset(kLoadWord, out, out, cache_offset); - } + // /* GcRoot<mirror::Class> */ out = out[type_index] + GenerateGcRootFieldLoad(cls, out_loc, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())); if (!cls->IsInDexCache() || cls->MustGenerateClinitCheck()) { DCHECK(cls->CanCallRuntime()); @@ -5229,30 +5321,14 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) { Register out = out_loc.AsRegister<Register>(); Register current_method = locations->InAt(0).AsRegister<Register>(); - uint32_t declaring_class_offset = ArtMethod::DeclaringClassOffset().Int32Value(); - if (kEmitCompilerReadBarrier) { - // /* GcRoot<mirror::Class>* */ out = &(current_method->declaring_class_) - __ AddConstant(out, current_method, declaring_class_offset); - // /* mirror::Class* */ out = out->Read() - codegen_->GenerateReadBarrierForRoot(load, out_loc, out_loc); - } else { - // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_ - __ LoadFromOffset(kLoadWord, out, current_method, declaring_class_offset); - } - + // /* GcRoot<mirror::Class> */ out = current_method->declaring_class_ + GenerateGcRootFieldLoad( + load, out_loc, current_method, ArtMethod::DeclaringClassOffset().Int32Value()); // /* GcRoot<mirror::String>[] */ out = out->dex_cache_strings_ __ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value()); - - size_t cache_offset = CodeGenerator::GetCacheOffset(load->GetStringIndex()); - if (kEmitCompilerReadBarrier) { - // /* GcRoot<mirror::String>* */ out = &out[string_index] - __ AddConstant(out, out, cache_offset); - // /* mirror::String* */ out = out->Read() - codegen_->GenerateReadBarrierForRoot(load, out_loc, out_loc); - } else { - // /* GcRoot<mirror::String> */ out = out[string_index] - __ LoadFromOffset(kLoadWord, out, out, cache_offset); - } + // /* GcRoot<mirror::String> */ out = out[string_index] + GenerateGcRootFieldLoad( + load, out_loc, out, CodeGenerator::GetCacheOffset(load->GetStringIndex())); if (!load->IsInDexCache()) { SlowPathCode* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load); @@ -5299,6 +5375,14 @@ void InstructionCodeGeneratorARM::VisitThrow(HThrow* instruction) { CheckEntrypointTypes<kQuickDeliverException, void, mirror::Object*>(); } +static bool TypeCheckNeedsATemporary(TypeCheckKind type_check_kind) { + return kEmitCompilerReadBarrier && + (kUseBakerReadBarrier || + type_check_kind == TypeCheckKind::kAbstractClassCheck || + type_check_kind == TypeCheckKind::kClassHierarchyCheck || + type_check_kind == TypeCheckKind::kArrayObjectCheck); +} + void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) { LocationSummary::CallKind call_kind = LocationSummary::kNoCall; TypeCheckKind type_check_kind = instruction->GetTypeCheckKind(); @@ -5325,21 +5409,22 @@ void LocationsBuilderARM::VisitInstanceOf(HInstanceOf* instruction) { locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); // When read barriers are enabled, we need a temporary register for // some cases. - if (kEmitCompilerReadBarrier && - (type_check_kind == TypeCheckKind::kAbstractClassCheck || - type_check_kind == TypeCheckKind::kClassHierarchyCheck || - type_check_kind == TypeCheckKind::kArrayObjectCheck)) { + if (TypeCheckNeedsATemporary(type_check_kind)) { locations->AddTemp(Location::RequiresRegister()); } } void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) { + TypeCheckKind type_check_kind = instruction->GetTypeCheckKind(); LocationSummary* locations = instruction->GetLocations(); Location obj_loc = locations->InAt(0); Register obj = obj_loc.AsRegister<Register>(); Register cls = locations->InAt(1).AsRegister<Register>(); Location out_loc = locations->Out(); Register out = out_loc.AsRegister<Register>(); + Location temp_loc = TypeCheckNeedsATemporary(type_check_kind) ? + locations->GetTemp(0) : + Location::NoLocation(); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); @@ -5354,10 +5439,9 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) { } // /* HeapReference<Class> */ out = obj->klass_ - __ LoadFromOffset(kLoadWord, out, obj, class_offset); - codegen_->MaybeGenerateReadBarrier(instruction, out_loc, out_loc, obj_loc, class_offset); + GenerateReferenceLoadTwoRegisters(instruction, out_loc, obj_loc, class_offset, temp_loc); - switch (instruction->GetTypeCheckKind()) { + switch (type_check_kind) { case TypeCheckKind::kExactCheck: { __ cmp(out, ShifterOperand(cls)); // Classes must be equal for the instanceof to succeed. @@ -5372,17 +5456,8 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) { // object to avoid doing a comparison we know will fail. Label loop; __ Bind(&loop); - Location temp_loc = kEmitCompilerReadBarrier ? locations->GetTemp(0) : Location::NoLocation(); - if (kEmitCompilerReadBarrier) { - // Save the value of `out` into `temp` before overwriting it - // in the following move operation, as we will need it for the - // read barrier below. - Register temp = temp_loc.AsRegister<Register>(); - __ Mov(temp, out); - } // /* HeapReference<Class> */ out = out->super_class_ - __ LoadFromOffset(kLoadWord, out, out, super_offset); - codegen_->MaybeGenerateReadBarrier(instruction, out_loc, out_loc, temp_loc, super_offset); + GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, temp_loc); // If `out` is null, we use it for the result, and jump to `done`. __ CompareAndBranchIfZero(out, &done); __ cmp(out, ShifterOperand(cls)); @@ -5400,17 +5475,8 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) { __ Bind(&loop); __ cmp(out, ShifterOperand(cls)); __ b(&success, EQ); - Location temp_loc = kEmitCompilerReadBarrier ? locations->GetTemp(0) : Location::NoLocation(); - if (kEmitCompilerReadBarrier) { - // Save the value of `out` into `temp` before overwriting it - // in the following move operation, as we will need it for the - // read barrier below. - Register temp = temp_loc.AsRegister<Register>(); - __ Mov(temp, out); - } // /* HeapReference<Class> */ out = out->super_class_ - __ LoadFromOffset(kLoadWord, out, out, super_offset); - codegen_->MaybeGenerateReadBarrier(instruction, out_loc, out_loc, temp_loc, super_offset); + GenerateReferenceLoadOneRegister(instruction, out_loc, super_offset, temp_loc); __ CompareAndBranchIfNonZero(out, &loop); // If `out` is null, we use it for the result, and jump to `done`. __ b(&done); @@ -5428,17 +5494,8 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) { __ cmp(out, ShifterOperand(cls)); __ b(&exact_check, EQ); // Otherwise, we need to check that the object's class is a non-primitive array. - Location temp_loc = kEmitCompilerReadBarrier ? locations->GetTemp(0) : Location::NoLocation(); - if (kEmitCompilerReadBarrier) { - // Save the value of `out` into `temp` before overwriting it - // in the following move operation, as we will need it for the - // read barrier below. - Register temp = temp_loc.AsRegister<Register>(); - __ Mov(temp, out); - } // /* HeapReference<Class> */ out = out->component_type_ - __ LoadFromOffset(kLoadWord, out, out, component_offset); - codegen_->MaybeGenerateReadBarrier(instruction, out_loc, out_loc, temp_loc, component_offset); + GenerateReferenceLoadOneRegister(instruction, out_loc, component_offset, temp_loc); // If `out` is null, we use it for the result, and jump to `done`. __ CompareAndBranchIfZero(out, &done); __ LoadFromOffset(kLoadUnsignedHalfword, out, out, primitive_offset); @@ -5477,6 +5534,13 @@ void InstructionCodeGeneratorARM::VisitInstanceOf(HInstanceOf* instruction) { // HInstanceOf instruction (following the runtime calling // convention), which might be cluttered by the potential first // read barrier emission at the beginning of this method. + // + // TODO: Introduce a new runtime entry point taking the object + // to test (instead of its class) as argument, and let it deal + // with the read barrier issues. This will let us refactor this + // case of the `switch` code as it was previously (with a direct + // call to the runtime not using a type checking slow path). + // This should also be beneficial for the other cases above. DCHECK(locations->OnlyCallsOnSlowPath()); slow_path = new (GetGraph()->GetArena()) TypeCheckSlowPathARM(instruction, /* is_fatal */ false); @@ -5531,27 +5595,27 @@ void LocationsBuilderARM::VisitCheckCast(HCheckCast* instruction) { locations->AddTemp(Location::RequiresRegister()); // When read barriers are enabled, we need an additional temporary // register for some cases. - if (kEmitCompilerReadBarrier && - (type_check_kind == TypeCheckKind::kAbstractClassCheck || - type_check_kind == TypeCheckKind::kClassHierarchyCheck || - type_check_kind == TypeCheckKind::kArrayObjectCheck)) { + if (TypeCheckNeedsATemporary(type_check_kind)) { locations->AddTemp(Location::RequiresRegister()); } } void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) { + TypeCheckKind type_check_kind = instruction->GetTypeCheckKind(); LocationSummary* locations = instruction->GetLocations(); Location obj_loc = locations->InAt(0); Register obj = obj_loc.AsRegister<Register>(); Register cls = locations->InAt(1).AsRegister<Register>(); Location temp_loc = locations->GetTemp(0); Register temp = temp_loc.AsRegister<Register>(); + Location temp2_loc = TypeCheckNeedsATemporary(type_check_kind) ? + locations->GetTemp(1) : + Location::NoLocation(); uint32_t class_offset = mirror::Object::ClassOffset().Int32Value(); uint32_t super_offset = mirror::Class::SuperClassOffset().Int32Value(); uint32_t component_offset = mirror::Class::ComponentTypeOffset().Int32Value(); uint32_t primitive_offset = mirror::Class::PrimitiveTypeOffset().Int32Value(); - TypeCheckKind type_check_kind = instruction->GetTypeCheckKind(); bool is_type_check_slow_path_fatal = (type_check_kind == TypeCheckKind::kExactCheck || type_check_kind == TypeCheckKind::kAbstractClassCheck || @@ -5570,8 +5634,7 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) { } // /* HeapReference<Class> */ temp = obj->klass_ - __ LoadFromOffset(kLoadWord, temp, obj, class_offset); - codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset); + GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, temp2_loc); switch (type_check_kind) { case TypeCheckKind::kExactCheck: @@ -5588,18 +5651,8 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) { // object to avoid doing a comparison we know will fail. Label loop, compare_classes; __ Bind(&loop); - Location temp2_loc = - kEmitCompilerReadBarrier ? locations->GetTemp(1) : Location::NoLocation(); - if (kEmitCompilerReadBarrier) { - // Save the value of `temp` into `temp2` before overwriting it - // in the following move operation, as we will need it for the - // read barrier below. - Register temp2 = temp2_loc.AsRegister<Register>(); - __ Mov(temp2, temp); - } // /* HeapReference<Class> */ temp = temp->super_class_ - __ LoadFromOffset(kLoadWord, temp, temp, super_offset); - codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, temp2_loc, super_offset); + GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, temp2_loc); // If the class reference currently in `temp` is not null, jump // to the `compare_classes` label to compare it with the checked @@ -5611,8 +5664,7 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) { // going into the slow path, as it has been overwritten in the // meantime. // /* HeapReference<Class> */ temp = obj->klass_ - __ LoadFromOffset(kLoadWord, temp, obj, class_offset); - codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset); + GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, temp2_loc); __ b(type_check_slow_path->GetEntryLabel()); __ Bind(&compare_classes); @@ -5628,18 +5680,8 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) { __ cmp(temp, ShifterOperand(cls)); __ b(&done, EQ); - Location temp2_loc = - kEmitCompilerReadBarrier ? locations->GetTemp(1) : Location::NoLocation(); - if (kEmitCompilerReadBarrier) { - // Save the value of `temp` into `temp2` before overwriting it - // in the following move operation, as we will need it for the - // read barrier below. - Register temp2 = temp2_loc.AsRegister<Register>(); - __ Mov(temp2, temp); - } // /* HeapReference<Class> */ temp = temp->super_class_ - __ LoadFromOffset(kLoadWord, temp, temp, super_offset); - codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, temp2_loc, super_offset); + GenerateReferenceLoadOneRegister(instruction, temp_loc, super_offset, temp2_loc); // If the class reference currently in `temp` is not null, jump // back at the beginning of the loop. @@ -5650,8 +5692,7 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) { // going into the slow path, as it has been overwritten in the // meantime. // /* HeapReference<Class> */ temp = obj->klass_ - __ LoadFromOffset(kLoadWord, temp, obj, class_offset); - codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset); + GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, temp2_loc); __ b(type_check_slow_path->GetEntryLabel()); break; } @@ -5663,19 +5704,8 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) { __ b(&done, EQ); // Otherwise, we need to check that the object's class is a non-primitive array. - Location temp2_loc = - kEmitCompilerReadBarrier ? locations->GetTemp(1) : Location::NoLocation(); - if (kEmitCompilerReadBarrier) { - // Save the value of `temp` into `temp2` before overwriting it - // in the following move operation, as we will need it for the - // read barrier below. - Register temp2 = temp2_loc.AsRegister<Register>(); - __ Mov(temp2, temp); - } // /* HeapReference<Class> */ temp = temp->component_type_ - __ LoadFromOffset(kLoadWord, temp, temp, component_offset); - codegen_->MaybeGenerateReadBarrier( - instruction, temp_loc, temp_loc, temp2_loc, component_offset); + GenerateReferenceLoadOneRegister(instruction, temp_loc, component_offset, temp2_loc); // If the component type is not null (i.e. the object is indeed // an array), jump to label `check_non_primitive_component_type` @@ -5688,8 +5718,7 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) { // going into the slow path, as it has been overwritten in the // meantime. // /* HeapReference<Class> */ temp = obj->klass_ - __ LoadFromOffset(kLoadWord, temp, obj, class_offset); - codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset); + GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, temp2_loc); __ b(type_check_slow_path->GetEntryLabel()); __ Bind(&check_non_primitive_component_type); @@ -5698,8 +5727,7 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) { __ CompareAndBranchIfZero(temp, &done); // Same comment as above regarding `temp` and the slow path. // /* HeapReference<Class> */ temp = obj->klass_ - __ LoadFromOffset(kLoadWord, temp, obj, class_offset); - codegen_->MaybeGenerateReadBarrier(instruction, temp_loc, temp_loc, obj_loc, class_offset); + GenerateReferenceLoadTwoRegisters(instruction, temp_loc, obj_loc, class_offset, temp2_loc); __ b(type_check_slow_path->GetEntryLabel()); break; } @@ -5716,6 +5744,13 @@ void InstructionCodeGeneratorARM::VisitCheckCast(HCheckCast* instruction) { // instruction (following the runtime calling convention), which // might be cluttered by the potential first read barrier // emission at the beginning of this method. + // + // TODO: Introduce a new runtime entry point taking the object + // to test (instead of its class) as argument, and let it deal + // with the read barrier issues. This will let us refactor this + // case of the `switch` code as it was previously (with a direct + // call to the runtime not using a type checking slow path). + // This should also be beneficial for the other cases above. __ b(type_check_slow_path->GetEntryLabel()); break; } @@ -5900,14 +5935,249 @@ void InstructionCodeGeneratorARM::HandleBitwiseOperation(HBinaryOperation* instr } } -void CodeGeneratorARM::GenerateReadBarrier(HInstruction* instruction, - Location out, - Location ref, - Location obj, - uint32_t offset, - Location index) { +void InstructionCodeGeneratorARM::GenerateReferenceLoadOneRegister(HInstruction* instruction, + Location out, + uint32_t offset, + Location temp) { + Register out_reg = out.AsRegister<Register>(); + if (kEmitCompilerReadBarrier) { + if (kUseBakerReadBarrier) { + // Load with fast path based Baker's read barrier. + // /* HeapReference<Object> */ out = *(out + offset) + codegen_->GenerateFieldLoadWithBakerReadBarrier( + instruction, out, out_reg, offset, temp, /* needs_null_check */ false); + } else { + // Load with slow path based read barrier. + // Save the value of `out` into `temp` before overwriting it + // in the following move operation, as we will need it for the + // read barrier below. + __ Mov(temp.AsRegister<Register>(), out_reg); + // /* HeapReference<Object> */ out = *(out + offset) + __ LoadFromOffset(kLoadWord, out_reg, out_reg, offset); + codegen_->GenerateReadBarrierSlow(instruction, out, out, temp, offset); + } + } else { + // Plain load with no read barrier. + // /* HeapReference<Object> */ out = *(out + offset) + __ LoadFromOffset(kLoadWord, out_reg, out_reg, offset); + __ MaybeUnpoisonHeapReference(out_reg); + } +} + +void InstructionCodeGeneratorARM::GenerateReferenceLoadTwoRegisters(HInstruction* instruction, + Location out, + Location obj, + uint32_t offset, + Location temp) { + Register out_reg = out.AsRegister<Register>(); + Register obj_reg = obj.AsRegister<Register>(); + if (kEmitCompilerReadBarrier) { + if (kUseBakerReadBarrier) { + // Load with fast path based Baker's read barrier. + // /* HeapReference<Object> */ out = *(obj + offset) + codegen_->GenerateFieldLoadWithBakerReadBarrier( + instruction, out, obj_reg, offset, temp, /* needs_null_check */ false); + } else { + // Load with slow path based read barrier. + // /* HeapReference<Object> */ out = *(obj + offset) + __ LoadFromOffset(kLoadWord, out_reg, obj_reg, offset); + codegen_->GenerateReadBarrierSlow(instruction, out, out, obj, offset); + } + } else { + // Plain load with no read barrier. + // /* HeapReference<Object> */ out = *(obj + offset) + __ LoadFromOffset(kLoadWord, out_reg, obj_reg, offset); + __ MaybeUnpoisonHeapReference(out_reg); + } +} + +void InstructionCodeGeneratorARM::GenerateGcRootFieldLoad(HInstruction* instruction, + Location root, + Register obj, + uint32_t offset) { + Register root_reg = root.AsRegister<Register>(); + if (kEmitCompilerReadBarrier) { + if (kUseBakerReadBarrier) { + // Fast path implementation of art::ReadBarrier::BarrierForRoot when + // Baker's read barrier are used: + // + // root = obj.field; + // if (Thread::Current()->GetIsGcMarking()) { + // root = ReadBarrier::Mark(root) + // } + + // /* GcRoot<mirror::Object> */ root = *(obj + offset) + __ LoadFromOffset(kLoadWord, root_reg, obj, offset); + static_assert( + sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(GcRoot<mirror::Object>), + "art::mirror::CompressedReference<mirror::Object> and art::GcRoot<mirror::Object> " + "have different sizes."); + static_assert(sizeof(mirror::CompressedReference<mirror::Object>) == sizeof(int32_t), + "art::mirror::CompressedReference<mirror::Object> and int32_t " + "have different sizes."); + + // Slow path used to mark the GC root `root`. + SlowPathCode* slow_path = + new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARM(instruction, root, root); + codegen_->AddSlowPath(slow_path); + + __ LoadFromOffset( + kLoadWord, IP, TR, Thread::IsGcMarkingOffset<kArmWordSize>().Int32Value()); + __ CompareAndBranchIfNonZero(IP, slow_path->GetEntryLabel()); + __ Bind(slow_path->GetExitLabel()); + } else { + // GC root loaded through a slow path for read barriers other + // than Baker's. + // /* GcRoot<mirror::Object>* */ root = obj + offset + __ AddConstant(root_reg, obj, offset); + // /* mirror::Object* */ root = root->Read() + codegen_->GenerateReadBarrierForRootSlow(instruction, root, root); + } + } else { + // Plain GC root load with no read barrier. + // /* GcRoot<mirror::Object> */ root = *(obj + offset) + __ LoadFromOffset(kLoadWord, root_reg, obj, offset); + // Note that GC roots are not affected by heap poisoning, thus we + // do not have to unpoison `root_reg` here. + } +} + +void CodeGeneratorARM::GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction, + Location ref, + Register obj, + uint32_t offset, + Location temp, + bool needs_null_check) { + DCHECK(kEmitCompilerReadBarrier); + DCHECK(kUseBakerReadBarrier); + + // /* HeapReference<Object> */ ref = *(obj + offset) + Location no_index = Location::NoLocation(); + GenerateReferenceLoadWithBakerReadBarrier( + instruction, ref, obj, offset, no_index, temp, needs_null_check); +} + +void CodeGeneratorARM::GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction, + Location ref, + Register obj, + uint32_t data_offset, + Location index, + Location temp, + bool needs_null_check) { DCHECK(kEmitCompilerReadBarrier); + DCHECK(kUseBakerReadBarrier); + // /* HeapReference<Object> */ ref = + // *(obj + data_offset + index * sizeof(HeapReference<Object>)) + GenerateReferenceLoadWithBakerReadBarrier( + instruction, ref, obj, data_offset, index, temp, needs_null_check); +} + +void CodeGeneratorARM::GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction, + Location ref, + Register obj, + uint32_t offset, + Location index, + Location temp, + bool needs_null_check) { + DCHECK(kEmitCompilerReadBarrier); + DCHECK(kUseBakerReadBarrier); + + // In slow path based read barriers, the read barrier call is + // inserted after the original load. However, in fast path based + // Baker's read barriers, we need to perform the load of + // mirror::Object::monitor_ *before* the original reference load. + // This load-load ordering is required by the read barrier. + // The fast path/slow path (for Baker's algorithm) should look like: + // + // uint32_t rb_state = Lockword(obj->monitor_).ReadBarrierState(); + // lfence; // Load fence or artificial data dependency to prevent load-load reordering + // HeapReference<Object> ref = *src; // Original reference load. + // bool is_gray = (rb_state == ReadBarrier::gray_ptr_); + // if (is_gray) { + // ref = ReadBarrier::Mark(ref); // Performed by runtime entrypoint slow path. + // } + // + // Note: the original implementation in ReadBarrier::Barrier is + // slightly more complex as: + // - it implements the load-load fence using a data dependency on + // the high-bits of rb_state, which are expected to be all zeroes; + // - it performs additional checks that we do not do here for + // performance reasons. + + Register ref_reg = ref.AsRegister<Register>(); + Register temp_reg = temp.AsRegister<Register>(); + uint32_t monitor_offset = mirror::Object::MonitorOffset().Int32Value(); + + // /* int32_t */ monitor = obj->monitor_ + __ LoadFromOffset(kLoadWord, temp_reg, obj, monitor_offset); + if (needs_null_check) { + MaybeRecordImplicitNullCheck(instruction); + } + // /* LockWord */ lock_word = LockWord(monitor) + static_assert(sizeof(LockWord) == sizeof(int32_t), + "art::LockWord and int32_t have different sizes."); + // /* uint32_t */ rb_state = lock_word.ReadBarrierState() + __ Lsr(temp_reg, temp_reg, LockWord::kReadBarrierStateShift); + __ and_(temp_reg, temp_reg, ShifterOperand(LockWord::kReadBarrierStateMask)); + static_assert( + LockWord::kReadBarrierStateMask == ReadBarrier::rb_ptr_mask_, + "art::LockWord::kReadBarrierStateMask is not equal to art::ReadBarrier::rb_ptr_mask_."); + + // Introduce a dependency on the high bits of rb_state, which shall + // be all zeroes, to prevent load-load reordering, and without using + // a memory barrier (which would be more expensive). + // IP = rb_state & ~LockWord::kReadBarrierStateMask = 0 + __ bic(IP, temp_reg, ShifterOperand(LockWord::kReadBarrierStateMask)); + // obj is unchanged by this operation, but its value now depends on + // IP, which depends on temp_reg. + __ add(obj, obj, ShifterOperand(IP)); + + // The actual reference load. + if (index.IsValid()) { + static_assert( + sizeof(mirror::HeapReference<mirror::Object>) == sizeof(int32_t), + "art::mirror::HeapReference<art::mirror::Object> and int32_t have different sizes."); + // /* HeapReference<Object> */ ref = + // *(obj + offset + index * sizeof(HeapReference<Object>)) + if (index.IsConstant()) { + size_t computed_offset = + (index.GetConstant()->AsIntConstant()->GetValue() << TIMES_4) + offset; + __ LoadFromOffset(kLoadWord, ref_reg, obj, computed_offset); + } else { + __ add(IP, obj, ShifterOperand(index.AsRegister<Register>(), LSL, TIMES_4)); + __ LoadFromOffset(kLoadWord, ref_reg, IP, offset); + } + } else { + // /* HeapReference<Object> */ ref = *(obj + offset) + __ LoadFromOffset(kLoadWord, ref_reg, obj, offset); + } + + // Object* ref = ref_addr->AsMirrorPtr() + __ MaybeUnpoisonHeapReference(ref_reg); + + // Slow path used to mark the object `ref` when it is gray. + SlowPathCode* slow_path = + new (GetGraph()->GetArena()) ReadBarrierMarkSlowPathARM(instruction, ref, ref); + AddSlowPath(slow_path); + + // if (rb_state == ReadBarrier::gray_ptr_) + // ref = ReadBarrier::Mark(ref); + __ cmp(temp_reg, ShifterOperand(ReadBarrier::gray_ptr_)); + __ b(slow_path->GetEntryLabel(), EQ); + __ Bind(slow_path->GetExitLabel()); +} + +void CodeGeneratorARM::GenerateReadBarrierSlow(HInstruction* instruction, + Location out, + Location ref, + Location obj, + uint32_t offset, + Location index) { + DCHECK(kEmitCompilerReadBarrier); + + // Insert a slow path based read barrier *after* the reference load. + // // If heap poisoning is enabled, the unpoisoning of the loaded // reference will be carried out by the runtime within the slow // path. @@ -5921,57 +6191,41 @@ void CodeGeneratorARM::GenerateReadBarrier(HInstruction* instruction, ReadBarrierForHeapReferenceSlowPathARM(instruction, out, ref, obj, offset, index); AddSlowPath(slow_path); - // TODO: When read barrier has a fast path, add it here. - /* Currently the read barrier call is inserted after the original load. - * However, if we have a fast path, we need to perform the load of obj.LockWord *before* the - * original load. This load-load ordering is required by the read barrier. - * The fast path/slow path (for Baker's algorithm) should look like: - * - * bool isGray = obj.LockWord & kReadBarrierMask; - * lfence; // load fence or artificial data dependence to prevent load-load reordering - * ref = obj.field; // this is the original load - * if (isGray) { - * ref = Mark(ref); // ideally the slow path just does Mark(ref) - * } - */ - __ b(slow_path->GetEntryLabel()); __ Bind(slow_path->GetExitLabel()); } -void CodeGeneratorARM::MaybeGenerateReadBarrier(HInstruction* instruction, - Location out, - Location ref, - Location obj, - uint32_t offset, - Location index) { +void CodeGeneratorARM::MaybeGenerateReadBarrierSlow(HInstruction* instruction, + Location out, + Location ref, + Location obj, + uint32_t offset, + Location index) { if (kEmitCompilerReadBarrier) { + // Baker's read barriers shall be handled by the fast path + // (CodeGeneratorARM::GenerateReferenceLoadWithBakerReadBarrier). + DCHECK(!kUseBakerReadBarrier); // If heap poisoning is enabled, unpoisoning will be taken care of // by the runtime within the slow path. - GenerateReadBarrier(instruction, out, ref, obj, offset, index); + GenerateReadBarrierSlow(instruction, out, ref, obj, offset, index); } else if (kPoisonHeapReferences) { __ UnpoisonHeapReference(out.AsRegister<Register>()); } } -void CodeGeneratorARM::GenerateReadBarrierForRoot(HInstruction* instruction, - Location out, - Location root) { +void CodeGeneratorARM::GenerateReadBarrierForRootSlow(HInstruction* instruction, + Location out, + Location root) { DCHECK(kEmitCompilerReadBarrier); + // Insert a slow path based read barrier *after* the GC root load. + // // Note that GC roots are not affected by heap poisoning, so we do // not need to do anything special for this here. SlowPathCode* slow_path = new (GetGraph()->GetArena()) ReadBarrierForRootSlowPathARM(instruction, out, root); AddSlowPath(slow_path); - // TODO: Implement a fast path for ReadBarrierForRoot, performing - // the following operation (for Baker's algorithm): - // - // if (thread.tls32_.is_gc_marking) { - // root = Mark(root); - // } - __ b(slow_path->GetEntryLabel()); __ Bind(slow_path->GetExitLabel()); } @@ -6355,7 +6609,7 @@ void InstructionCodeGeneratorARM::VisitArmDexCacheArraysBase(HArmDexCacheArraysB void CodeGeneratorARM::MoveFromReturnRegister(Location trg, Primitive::Type type) { if (!trg.IsValid()) { - DCHECK(type == Primitive::kPrimVoid); + DCHECK_EQ(type, Primitive::kPrimVoid); return; } diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h index b7c58e1248..f9c49a5f91 100644 --- a/compiler/optimizing/code_generator_arm.h +++ b/compiler/optimizing/code_generator_arm.h @@ -222,17 +222,51 @@ class InstructionCodeGeneratorARM : public HGraphVisitor { void HandleLongRotate(LocationSummary* locations); void HandleRotate(HRor* ror); void HandleShift(HBinaryOperation* operation); - void GenerateMemoryBarrier(MemBarrierKind kind); + void GenerateWideAtomicStore(Register addr, uint32_t offset, Register value_lo, Register value_hi, Register temp1, Register temp2, HInstruction* instruction); void GenerateWideAtomicLoad(Register addr, uint32_t offset, Register out_lo, Register out_hi); + void HandleFieldSet(HInstruction* instruction, const FieldInfo& field_info, bool value_can_be_null); void HandleFieldGet(HInstruction* instruction, const FieldInfo& field_info); + + // Generate a heap reference load using one register `out`: + // + // out <- *(out + offset) + // + // while honoring heap poisoning and/or read barriers (if any). + // Register `temp` is used when generating a read barrier. + void GenerateReferenceLoadOneRegister(HInstruction* instruction, + Location out, + uint32_t offset, + Location temp); + // Generate a heap reference load using two different registers + // `out` and `obj`: + // + // out <- *(obj + offset) + // + // while honoring heap poisoning and/or read barriers (if any). + // Register `temp` is used when generating a Baker's read barrier. + void GenerateReferenceLoadTwoRegisters(HInstruction* instruction, + Location out, + Location obj, + uint32_t offset, + Location temp); + // Generate a GC root reference load: + // + // root <- *(obj + offset) + // + // while honoring read barriers (if any). + void GenerateGcRootFieldLoad(HInstruction* instruction, + Location root, + Register obj, + uint32_t offset); + void GenerateImplicitNullCheck(HNullCheck* instruction); void GenerateExplicitNullCheck(HNullCheck* instruction); void GenerateTestAndBranch(HInstruction* instruction, @@ -346,6 +380,8 @@ class CodeGeneratorARM : public CodeGenerator { // Emit a write barrier. void MarkGCCard(Register temp, Register card, Register object, Register value, bool can_be_null); + void GenerateMemoryBarrier(MemBarrierKind kind); + Label* GetLabelOf(HBasicBlock* block) const { return CommonGetLabelOf<Label>(block_labels_, block); } @@ -406,7 +442,26 @@ class CodeGeneratorARM : public CodeGenerator { return &it->second; } - // Generate a read barrier for a heap reference within `instruction`. + // Fast path implementation of ReadBarrier::Barrier for a heap + // reference field load when Baker's read barriers are used. + void GenerateFieldLoadWithBakerReadBarrier(HInstruction* instruction, + Location out, + Register obj, + uint32_t offset, + Location temp, + bool needs_null_check); + // Fast path implementation of ReadBarrier::Barrier for a heap + // reference array load when Baker's read barriers are used. + void GenerateArrayLoadWithBakerReadBarrier(HInstruction* instruction, + Location out, + Register obj, + uint32_t data_offset, + Location index, + Location temp, + bool needs_null_check); + + // Generate a read barrier for a heap reference within `instruction` + // using a slow path. // // A read barrier for an object reference read from the heap is // implemented as a call to the artReadBarrierSlow runtime entry @@ -423,23 +478,25 @@ class CodeGeneratorARM : public CodeGenerator { // When `index` is provided (i.e. for array accesses), the offset // value passed to artReadBarrierSlow is adjusted to take `index` // into account. - void GenerateReadBarrier(HInstruction* instruction, - Location out, - Location ref, - Location obj, - uint32_t offset, - Location index = Location::NoLocation()); - - // If read barriers are enabled, generate a read barrier for a heap reference. - // If heap poisoning is enabled, also unpoison the reference in `out`. - void MaybeGenerateReadBarrier(HInstruction* instruction, - Location out, - Location ref, - Location obj, - uint32_t offset, - Location index = Location::NoLocation()); - - // Generate a read barrier for a GC root within `instruction`. + void GenerateReadBarrierSlow(HInstruction* instruction, + Location out, + Location ref, + Location obj, + uint32_t offset, + Location index = Location::NoLocation()); + + // If read barriers are enabled, generate a read barrier for a heap + // reference using a slow path. If heap poisoning is enabled, also + // unpoison the reference in `out`. + void MaybeGenerateReadBarrierSlow(HInstruction* instruction, + Location out, + Location ref, + Location obj, + uint32_t offset, + Location index = Location::NoLocation()); + + // Generate a read barrier for a GC root within `instruction` using + // a slow path. // // A read barrier for an object reference GC root is implemented as // a call to the artReadBarrierForRootSlow runtime entry point, @@ -449,9 +506,19 @@ class CodeGeneratorARM : public CodeGenerator { // // The `out` location contains the value returned by // artReadBarrierForRootSlow. - void GenerateReadBarrierForRoot(HInstruction* instruction, Location out, Location root); + void GenerateReadBarrierForRootSlow(HInstruction* instruction, Location out, Location root); private: + // Factored implementation of GenerateFieldLoadWithBakerReadBarrier + // and GenerateArrayLoadWithBakerReadBarrier. + void GenerateReferenceLoadWithBakerReadBarrier(HInstruction* instruction, + Location ref, + Register obj, + uint32_t offset, + Location index, + Location temp, + bool needs_null_check); + Register GetInvokeStaticOrDirectExtraParameter(HInvokeStaticOrDirect* invoke, Register temp); using MethodToLiteralMap = ArenaSafeMap<MethodReference, Literal*, MethodReferenceComparator>; diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index e306432773..75bf72924b 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -1191,17 +1191,16 @@ void CodeGeneratorMIPS::InvokeRuntime(int32_t entry_point_offset, uint32_t dex_pc, SlowPathCode* slow_path, bool is_direct_entrypoint) { + __ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset); + __ Jalr(T9); if (is_direct_entrypoint) { // Reserve argument space on stack (for $a0-$a3) for // entrypoints that directly reference native implementations. // Called function may use this space to store $a0-$a3 regs. - __ IncreaseFrameSize(kMipsDirectEntrypointRuntimeOffset); - } - __ LoadFromOffset(kLoadWord, T9, TR, entry_point_offset); - __ Jalr(T9); - __ Nop(); - if (is_direct_entrypoint) { + __ IncreaseFrameSize(kMipsDirectEntrypointRuntimeOffset); // Single instruction in delay slot. __ DecreaseFrameSize(kMipsDirectEntrypointRuntimeOffset); + } else { + __ Nop(); // In delay slot. } RecordPcInfo(instruction, dex_pc, slow_path); } @@ -1275,15 +1274,9 @@ void LocationsBuilderMIPS::HandleBinaryOp(HBinaryOperation* instruction) { } case Primitive::kPrimLong: { - // TODO: can 2nd param be const? locations->SetInAt(0, Location::RequiresRegister()); - locations->SetInAt(1, Location::RequiresRegister()); - if (instruction->IsAdd() || instruction->IsSub()) { - locations->SetOut(Location::RequiresRegister(), Location::kOutputOverlap); - } else { - DCHECK(instruction->IsAnd() || instruction->IsOr() || instruction->IsXor()); - locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); - } + locations->SetInAt(1, Location::RegisterOrConstant(instruction->InputAt(1))); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); break; } @@ -1350,34 +1343,142 @@ void InstructionCodeGeneratorMIPS::HandleBinaryOp(HBinaryOperation* instruction) } case Primitive::kPrimLong: { - // TODO: can 2nd param be const? Register dst_high = locations->Out().AsRegisterPairHigh<Register>(); Register dst_low = locations->Out().AsRegisterPairLow<Register>(); Register lhs_high = locations->InAt(0).AsRegisterPairHigh<Register>(); Register lhs_low = locations->InAt(0).AsRegisterPairLow<Register>(); - Register rhs_high = locations->InAt(1).AsRegisterPairHigh<Register>(); - Register rhs_low = locations->InAt(1).AsRegisterPairLow<Register>(); - - if (instruction->IsAnd()) { - __ And(dst_low, lhs_low, rhs_low); - __ And(dst_high, lhs_high, rhs_high); - } else if (instruction->IsOr()) { - __ Or(dst_low, lhs_low, rhs_low); - __ Or(dst_high, lhs_high, rhs_high); - } else if (instruction->IsXor()) { - __ Xor(dst_low, lhs_low, rhs_low); - __ Xor(dst_high, lhs_high, rhs_high); - } else if (instruction->IsAdd()) { - __ Addu(dst_low, lhs_low, rhs_low); - __ Sltu(TMP, dst_low, lhs_low); - __ Addu(dst_high, lhs_high, rhs_high); - __ Addu(dst_high, dst_high, TMP); + Location rhs_location = locations->InAt(1); + bool use_imm = rhs_location.IsConstant(); + if (!use_imm) { + Register rhs_high = rhs_location.AsRegisterPairHigh<Register>(); + Register rhs_low = rhs_location.AsRegisterPairLow<Register>(); + if (instruction->IsAnd()) { + __ And(dst_low, lhs_low, rhs_low); + __ And(dst_high, lhs_high, rhs_high); + } else if (instruction->IsOr()) { + __ Or(dst_low, lhs_low, rhs_low); + __ Or(dst_high, lhs_high, rhs_high); + } else if (instruction->IsXor()) { + __ Xor(dst_low, lhs_low, rhs_low); + __ Xor(dst_high, lhs_high, rhs_high); + } else if (instruction->IsAdd()) { + if (lhs_low == rhs_low) { + // Special case for lhs = rhs and the sum potentially overwriting both lhs and rhs. + __ Slt(TMP, lhs_low, ZERO); + __ Addu(dst_low, lhs_low, rhs_low); + } else { + __ Addu(dst_low, lhs_low, rhs_low); + // If the sum overwrites rhs, lhs remains unchanged, otherwise rhs remains unchanged. + __ Sltu(TMP, dst_low, (dst_low == rhs_low) ? lhs_low : rhs_low); + } + __ Addu(dst_high, lhs_high, rhs_high); + __ Addu(dst_high, dst_high, TMP); + } else { + DCHECK(instruction->IsSub()); + __ Sltu(TMP, lhs_low, rhs_low); + __ Subu(dst_low, lhs_low, rhs_low); + __ Subu(dst_high, lhs_high, rhs_high); + __ Subu(dst_high, dst_high, TMP); + } } else { - DCHECK(instruction->IsSub()); - __ Subu(dst_low, lhs_low, rhs_low); - __ Sltu(TMP, lhs_low, dst_low); - __ Subu(dst_high, lhs_high, rhs_high); - __ Subu(dst_high, dst_high, TMP); + int64_t value = CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()->AsConstant()); + if (instruction->IsOr()) { + uint32_t low = Low32Bits(value); + uint32_t high = High32Bits(value); + if (IsUint<16>(low)) { + if (dst_low != lhs_low || low != 0) { + __ Ori(dst_low, lhs_low, low); + } + } else { + __ LoadConst32(TMP, low); + __ Or(dst_low, lhs_low, TMP); + } + if (IsUint<16>(high)) { + if (dst_high != lhs_high || high != 0) { + __ Ori(dst_high, lhs_high, high); + } + } else { + if (high != low) { + __ LoadConst32(TMP, high); + } + __ Or(dst_high, lhs_high, TMP); + } + } else if (instruction->IsXor()) { + uint32_t low = Low32Bits(value); + uint32_t high = High32Bits(value); + if (IsUint<16>(low)) { + if (dst_low != lhs_low || low != 0) { + __ Xori(dst_low, lhs_low, low); + } + } else { + __ LoadConst32(TMP, low); + __ Xor(dst_low, lhs_low, TMP); + } + if (IsUint<16>(high)) { + if (dst_high != lhs_high || high != 0) { + __ Xori(dst_high, lhs_high, high); + } + } else { + if (high != low) { + __ LoadConst32(TMP, high); + } + __ Xor(dst_high, lhs_high, TMP); + } + } else if (instruction->IsAnd()) { + uint32_t low = Low32Bits(value); + uint32_t high = High32Bits(value); + if (IsUint<16>(low)) { + __ Andi(dst_low, lhs_low, low); + } else if (low != 0xFFFFFFFF) { + __ LoadConst32(TMP, low); + __ And(dst_low, lhs_low, TMP); + } else if (dst_low != lhs_low) { + __ Move(dst_low, lhs_low); + } + if (IsUint<16>(high)) { + __ Andi(dst_high, lhs_high, high); + } else if (high != 0xFFFFFFFF) { + if (high != low) { + __ LoadConst32(TMP, high); + } + __ And(dst_high, lhs_high, TMP); + } else if (dst_high != lhs_high) { + __ Move(dst_high, lhs_high); + } + } else { + if (instruction->IsSub()) { + value = -value; + } else { + DCHECK(instruction->IsAdd()); + } + int32_t low = Low32Bits(value); + int32_t high = High32Bits(value); + if (IsInt<16>(low)) { + if (dst_low != lhs_low || low != 0) { + __ Addiu(dst_low, lhs_low, low); + } + if (low != 0) { + __ Sltiu(AT, dst_low, low); + } + } else { + __ LoadConst32(TMP, low); + __ Addu(dst_low, lhs_low, TMP); + __ Sltu(AT, dst_low, TMP); + } + if (IsInt<16>(high)) { + if (dst_high != lhs_high || high != 0) { + __ Addiu(dst_high, lhs_high, high); + } + } else { + if (high != low) { + __ LoadConst32(TMP, high); + } + __ Addu(dst_high, lhs_high, TMP); + } + if (low != 0) { + __ Addu(dst_high, dst_high, AT); + } + } } break; } @@ -1416,12 +1517,15 @@ void LocationsBuilderMIPS::HandleShift(HBinaryOperation* instr) { Primitive::Type type = instr->GetResultType(); switch (type) { case Primitive::kPrimInt: - case Primitive::kPrimLong: { + locations->SetInAt(0, Location::RequiresRegister()); + locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1))); + locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + break; + case Primitive::kPrimLong: locations->SetInAt(0, Location::RequiresRegister()); locations->SetInAt(1, Location::RegisterOrConstant(instr->InputAt(1))); locations->SetOut(Location::RequiresRegister()); break; - } default: LOG(FATAL) << "Unexpected shift type " << type; } @@ -1440,6 +1544,8 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) { int64_t rhs_imm = use_imm ? CodeGenerator::GetInt64ValueOf(rhs_location.GetConstant()) : 0; uint32_t shift_mask = (type == Primitive::kPrimInt) ? kMaxIntShiftValue : kMaxLongShiftValue; uint32_t shift_value = rhs_imm & shift_mask; + // Is the INS (Insert Bit Field) instruction supported? + bool has_ins = codegen_->GetInstructionSetFeatures().IsMipsIsaRevGreaterThanEqual2(); switch (type) { case Primitive::kPrimInt: { @@ -1474,21 +1580,37 @@ void InstructionCodeGeneratorMIPS::HandleShift(HBinaryOperation* instr) { if (shift_value == 0) { codegen_->Move64(locations->Out(), locations->InAt(0)); } else if (shift_value < kMipsBitsPerWord) { - if (instr->IsShl()) { - __ Sll(dst_low, lhs_low, shift_value); - __ Srl(TMP, lhs_low, kMipsBitsPerWord - shift_value); - __ Sll(dst_high, lhs_high, shift_value); - __ Or(dst_high, dst_high, TMP); - } else if (instr->IsShr()) { - __ Sra(dst_high, lhs_high, shift_value); - __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value); - __ Srl(dst_low, lhs_low, shift_value); - __ Or(dst_low, dst_low, TMP); + if (has_ins) { + if (instr->IsShl()) { + __ Srl(dst_high, lhs_low, kMipsBitsPerWord - shift_value); + __ Ins(dst_high, lhs_high, shift_value, kMipsBitsPerWord - shift_value); + __ Sll(dst_low, lhs_low, shift_value); + } else if (instr->IsShr()) { + __ Srl(dst_low, lhs_low, shift_value); + __ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value); + __ Sra(dst_high, lhs_high, shift_value); + } else { + __ Srl(dst_low, lhs_low, shift_value); + __ Ins(dst_low, lhs_high, kMipsBitsPerWord - shift_value, shift_value); + __ Srl(dst_high, lhs_high, shift_value); + } } else { - __ Srl(dst_high, lhs_high, shift_value); - __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value); - __ Srl(dst_low, lhs_low, shift_value); - __ Or(dst_low, dst_low, TMP); + if (instr->IsShl()) { + __ Sll(dst_low, lhs_low, shift_value); + __ Srl(TMP, lhs_low, kMipsBitsPerWord - shift_value); + __ Sll(dst_high, lhs_high, shift_value); + __ Or(dst_high, dst_high, TMP); + } else if (instr->IsShr()) { + __ Sra(dst_high, lhs_high, shift_value); + __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value); + __ Srl(dst_low, lhs_low, shift_value); + __ Or(dst_low, dst_low, TMP); + } else { + __ Srl(dst_high, lhs_high, shift_value); + __ Sll(TMP, lhs_high, kMipsBitsPerWord - shift_value); + __ Srl(dst_low, lhs_low, shift_value); + __ Or(dst_low, dst_low, TMP); + } } } else { shift_value -= kMipsBitsPerWord; diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index e236f0efc8..46140ecae1 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -1335,9 +1335,10 @@ void LocationsBuilderX86::VisitExit(HExit* exit) { void InstructionCodeGeneratorX86::VisitExit(HExit* exit ATTRIBUTE_UNUSED) { } +template<class LabelType> void InstructionCodeGeneratorX86::GenerateFPJumps(HCondition* cond, - Label* true_label, - Label* false_label) { + LabelType* true_label, + LabelType* false_label) { if (cond->IsFPConditionTrueIfNaN()) { __ j(kUnordered, true_label); } else if (cond->IsFPConditionFalseIfNaN()) { @@ -1346,9 +1347,10 @@ void InstructionCodeGeneratorX86::GenerateFPJumps(HCondition* cond, __ j(X86UnsignedOrFPCondition(cond->GetCondition()), true_label); } +template<class LabelType> void InstructionCodeGeneratorX86::GenerateLongComparesAndJumps(HCondition* cond, - Label* true_label, - Label* false_label) { + LabelType* true_label, + LabelType* false_label) { LocationSummary* locations = cond->GetLocations(); Location left = locations->InAt(0); Location right = locations->InAt(1); @@ -1437,14 +1439,15 @@ void InstructionCodeGeneratorX86::GenerateLongComparesAndJumps(HCondition* cond, __ j(final_condition, true_label); } +template<class LabelType> void InstructionCodeGeneratorX86::GenerateCompareTestAndBranch(HCondition* condition, - Label* true_target_in, - Label* false_target_in) { + LabelType* true_target_in, + LabelType* false_target_in) { // Generated branching requires both targets to be explicit. If either of the // targets is nullptr (fallthrough) use and bind `fallthrough_target` instead. - Label fallthrough_target; - Label* true_target = true_target_in == nullptr ? &fallthrough_target : true_target_in; - Label* false_target = false_target_in == nullptr ? &fallthrough_target : false_target_in; + LabelType fallthrough_target; + LabelType* true_target = true_target_in == nullptr ? &fallthrough_target : true_target_in; + LabelType* false_target = false_target_in == nullptr ? &fallthrough_target : false_target_in; LocationSummary* locations = condition->GetLocations(); Location left = locations->InAt(0); @@ -1486,10 +1489,11 @@ static bool AreEflagsSetFrom(HInstruction* cond, HInstruction* branch) { !Primitive::IsFloatingPointType(cond->InputAt(0)->GetType()); } +template<class LabelType> void InstructionCodeGeneratorX86::GenerateTestAndBranch(HInstruction* instruction, size_t condition_input_index, - Label* true_target, - Label* false_target) { + LabelType* true_target, + LabelType* false_target) { HInstruction* cond = instruction->InputAt(condition_input_index); if (true_target == nullptr && false_target == nullptr) { @@ -1613,7 +1617,7 @@ void InstructionCodeGeneratorX86::VisitDeoptimize(HDeoptimize* deoptimize) { GenerateTestAndBranch(deoptimize, /* condition_input_index */ 0, slow_path->GetEntryLabel(), - /* false_target */ nullptr); + /* false_target */ static_cast<Label*>(nullptr)); } void LocationsBuilderX86::VisitNativeDebugInfo(HNativeDebugInfo* info) { @@ -1709,7 +1713,7 @@ void InstructionCodeGeneratorX86::HandleCondition(HCondition* cond) { Location lhs = locations->InAt(0); Location rhs = locations->InAt(1); Register reg = locations->Out().AsRegister<Register>(); - Label true_label, false_label; + NearLabel true_label, false_label; switch (cond->InputAt(0)->GetType()) { default: { diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h index 3d343177d0..df7347658b 100644 --- a/compiler/optimizing/code_generator_x86.h +++ b/compiler/optimizing/code_generator_x86.h @@ -267,15 +267,22 @@ class InstructionCodeGeneratorX86 : public HGraphVisitor { void GenerateImplicitNullCheck(HNullCheck* instruction); void GenerateExplicitNullCheck(HNullCheck* instruction); + template<class LabelType> void GenerateTestAndBranch(HInstruction* instruction, size_t condition_input_index, - Label* true_target, - Label* false_target); + LabelType* true_target, + LabelType* false_target); + template<class LabelType> void GenerateCompareTestAndBranch(HCondition* condition, - Label* true_target, - Label* false_target); - void GenerateFPJumps(HCondition* cond, Label* true_label, Label* false_label); - void GenerateLongComparesAndJumps(HCondition* cond, Label* true_label, Label* false_label); + LabelType* true_target, + LabelType* false_target); + template<class LabelType> + void GenerateFPJumps(HCondition* cond, LabelType* true_label, LabelType* false_label); + template<class LabelType> + void GenerateLongComparesAndJumps(HCondition* cond, + LabelType* true_label, + LabelType* false_label); + void HandleGoto(HInstruction* got, HBasicBlock* successor); void GenPackedSwitchWithCompares(Register value_reg, int32_t lower_bound, diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 8b77ec43c3..ae5c050bd9 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -1370,9 +1370,10 @@ void LocationsBuilderX86_64::VisitExit(HExit* exit) { void InstructionCodeGeneratorX86_64::VisitExit(HExit* exit ATTRIBUTE_UNUSED) { } +template<class LabelType> void InstructionCodeGeneratorX86_64::GenerateFPJumps(HCondition* cond, - Label* true_label, - Label* false_label) { + LabelType* true_label, + LabelType* false_label) { if (cond->IsFPConditionTrueIfNaN()) { __ j(kUnordered, true_label); } else if (cond->IsFPConditionFalseIfNaN()) { @@ -1381,14 +1382,15 @@ void InstructionCodeGeneratorX86_64::GenerateFPJumps(HCondition* cond, __ j(X86_64FPCondition(cond->GetCondition()), true_label); } +template<class LabelType> void InstructionCodeGeneratorX86_64::GenerateCompareTestAndBranch(HCondition* condition, - Label* true_target_in, - Label* false_target_in) { + LabelType* true_target_in, + LabelType* false_target_in) { // Generated branching requires both targets to be explicit. If either of the // targets is nullptr (fallthrough) use and bind `fallthrough_target` instead. - Label fallthrough_target; - Label* true_target = true_target_in == nullptr ? &fallthrough_target : true_target_in; - Label* false_target = false_target_in == nullptr ? &fallthrough_target : false_target_in; + LabelType fallthrough_target; + LabelType* true_target = true_target_in == nullptr ? &fallthrough_target : true_target_in; + LabelType* false_target = false_target_in == nullptr ? &fallthrough_target : false_target_in; LocationSummary* locations = condition->GetLocations(); Location left = locations->InAt(0); @@ -1470,10 +1472,11 @@ static bool AreEflagsSetFrom(HInstruction* cond, HInstruction* branch) { !Primitive::IsFloatingPointType(cond->InputAt(0)->GetType()); } +template<class LabelType> void InstructionCodeGeneratorX86_64::GenerateTestAndBranch(HInstruction* instruction, size_t condition_input_index, - Label* true_target, - Label* false_target) { + LabelType* true_target, + LabelType* false_target) { HInstruction* cond = instruction->InputAt(condition_input_index); if (true_target == nullptr && false_target == nullptr) { @@ -1597,7 +1600,7 @@ void InstructionCodeGeneratorX86_64::VisitDeoptimize(HDeoptimize* deoptimize) { GenerateTestAndBranch(deoptimize, /* condition_input_index */ 0, slow_path->GetEntryLabel(), - /* false_target */ nullptr); + /* false_target */ static_cast<Label*>(nullptr)); } void LocationsBuilderX86_64::VisitNativeDebugInfo(HNativeDebugInfo* info) { @@ -1684,7 +1687,7 @@ void InstructionCodeGeneratorX86_64::HandleCondition(HCondition* cond) { Location lhs = locations->InAt(0); Location rhs = locations->InAt(1); CpuRegister reg = locations->Out().AsRegister<CpuRegister>(); - Label true_label, false_label; + NearLabel true_label, false_label; switch (cond->InputAt(0)->GetType()) { default: @@ -5747,7 +5750,7 @@ void InstructionCodeGeneratorX86_64::VisitCheckCast(HCheckCast* instruction) { is_type_check_slow_path_fatal); codegen_->AddSlowPath(type_check_slow_path); - Label done; + NearLabel done; // Avoid null check if we know obj is not null. if (instruction->MustDoNullCheck()) { __ testl(obj, obj); diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h index 9995416138..c5e8a04da6 100644 --- a/compiler/optimizing/code_generator_x86_64.h +++ b/compiler/optimizing/code_generator_x86_64.h @@ -258,14 +258,18 @@ class InstructionCodeGeneratorX86_64 : public HGraphVisitor { void GenerateExplicitNullCheck(HNullCheck* instruction); void PushOntoFPStack(Location source, uint32_t temp_offset, uint32_t stack_adjustment, bool is_float); + template<class LabelType> void GenerateTestAndBranch(HInstruction* instruction, size_t condition_input_index, - Label* true_target, - Label* false_target); + LabelType* true_target, + LabelType* false_target); + template<class LabelType> void GenerateCompareTestAndBranch(HCondition* condition, - Label* true_target, - Label* false_target); - void GenerateFPJumps(HCondition* cond, Label* true_label, Label* false_label); + LabelType* true_target, + LabelType* false_target); + template<class LabelType> + void GenerateFPJumps(HCondition* cond, LabelType* true_label, LabelType* false_label); + void HandleGoto(HInstruction* got, HBasicBlock* successor); X86_64Assembler* const assembler_; diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc index f3c1dbe3f5..6d0bdbe19b 100644 --- a/compiler/optimizing/graph_checker.cc +++ b/compiler/optimizing/graph_checker.cc @@ -763,6 +763,14 @@ void SSAChecker::VisitPhi(HPhi* phi) { phi->GetId(), phi->GetRegNumber(), type_str.str().c_str())); + } else if (phi->GetType() == Primitive::kPrimNot) { + std::stringstream type_str; + type_str << other_phi->GetType(); + AddError(StringPrintf( + "Equivalent non-reference phi (%d) found for VReg %d with type: %s.", + phi->GetId(), + phi->GetRegNumber(), + type_str.str().c_str())); } else { ArenaBitVector visited(GetGraph()->GetArena(), 0, /* expandable */ true); if (!IsConstantEquivalent(phi, other_phi, &visited)) { @@ -913,4 +921,16 @@ void SSAChecker::VisitConstant(HConstant* instruction) { } } +void SSAChecker::VisitBoundType(HBoundType* instruction) { + VisitInstruction(instruction); + + ScopedObjectAccess soa(Thread::Current()); + if (!instruction->GetUpperBound().IsValid()) { + AddError(StringPrintf( + "%s %d does not have a valid upper bound RTI.", + instruction->DebugName(), + instruction->GetId())); + } +} + } // namespace art diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h index d5ddbabc8c..2e16bfe245 100644 --- a/compiler/optimizing/graph_checker.h +++ b/compiler/optimizing/graph_checker.h @@ -128,6 +128,7 @@ class SSAChecker : public GraphChecker { void VisitPackedSwitch(HPackedSwitch* instruction) OVERRIDE; void VisitBooleanNot(HBooleanNot* instruction) OVERRIDE; void VisitConstant(HConstant* instruction) OVERRIDE; + void VisitBoundType(HBoundType* instruction) OVERRIDE; void HandleBooleanInput(HInstruction* instruction, size_t input_index); diff --git a/compiler/optimizing/induction_var_analysis_test.cc b/compiler/optimizing/induction_var_analysis_test.cc index 776c115e9d..29a1845658 100644 --- a/compiler/optimizing/induction_var_analysis_test.cc +++ b/compiler/optimizing/induction_var_analysis_test.cc @@ -85,6 +85,7 @@ class InductionVarAnalysisTest : public CommonCompilerTest { constant0_ = graph_->GetIntConstant(0); constant1_ = graph_->GetIntConstant(1); constant100_ = graph_->GetIntConstant(100); + float_constant0_ = graph_->GetFloatConstant(0.0f); induc_ = new (&allocator_) HLocal(n); entry_->AddInstruction(induc_); entry_->AddInstruction(new (&allocator_) HStoreLocal(induc_, constant0_)); @@ -156,8 +157,10 @@ class InductionVarAnalysisTest : public CommonCompilerTest { HInstruction* InsertArrayStore(HLocal* subscript, int d) { HInstruction* load = InsertInstruction( new (&allocator_) HLoadLocal(subscript, Primitive::kPrimInt), d); + // ArraySet is given a float value in order to avoid SsaBuilder typing + // it from the array's non-existent reference type info. return InsertInstruction(new (&allocator_) HArraySet( - parameter_, load, constant0_, Primitive::kPrimInt, 0), d); + parameter_, load, float_constant0_, Primitive::kPrimFloat, 0), d); } // Returns induction information of instruction in loop at depth d. @@ -187,6 +190,7 @@ class InductionVarAnalysisTest : public CommonCompilerTest { HInstruction* constant0_; HInstruction* constant1_; HInstruction* constant100_; + HInstruction* float_constant0_; HLocal* induc_; // "vreg_n", the "k" HLocal* tmp_; // "vreg_n+1" HLocal* dum_; // "vreg_n+2" diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 0e50416a9e..48d32999b7 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -42,7 +42,14 @@ namespace art { -static constexpr size_t kMaximumNumberOfHInstructions = 12; +static constexpr size_t kMaximumNumberOfHInstructions = 32; + +// Limit the number of dex registers that we accumulate while inlining +// to avoid creating large amount of nested environments. +static constexpr size_t kMaximumNumberOfCumulatedDexRegisters = 64; + +// Avoid inlining within a huge method due to memory pressure. +static constexpr size_t kMaximumCodeUnitSize = 4096; void HInliner::Run() { const CompilerOptions& compiler_options = compiler_driver_->GetCompilerOptions(); @@ -50,6 +57,9 @@ void HInliner::Run() { || (compiler_options.GetInlineMaxCodeUnits() == 0)) { return; } + if (caller_compilation_unit_.GetCodeItem()->insns_size_in_code_units_ > kMaximumCodeUnitSize) { + return; + } if (graph_->IsDebuggable()) { // For simplicity, we currently never inline when the graph is debuggable. This avoids // doing some logic in the runtime to discover if a method could have been inlined. @@ -216,6 +226,7 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) { ClassLinker* class_linker = caller_compilation_unit_.GetClassLinker(); // We can query the dex cache directly. The verifier has populated it already. ArtMethod* resolved_method; + ArtMethod* actual_method = nullptr; if (invoke_instruction->IsInvokeStaticOrDirect()) { if (invoke_instruction->AsInvokeStaticOrDirect()->IsStringInit()) { VLOG(compiler) << "Not inlining a String.<init> method"; @@ -227,9 +238,15 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) { : class_linker->FindDexCache(soa.Self(), *ref.dex_file); resolved_method = dex_cache->GetResolvedMethod( ref.dex_method_index, class_linker->GetImagePointerSize()); + // actual_method == resolved_method for direct or static calls. + actual_method = resolved_method; } else { resolved_method = caller_compilation_unit_.GetDexCache().Get()->GetResolvedMethod( method_index, class_linker->GetImagePointerSize()); + if (resolved_method != nullptr) { + // Check if we can statically find the method. + actual_method = FindVirtualOrInterfaceTarget(invoke_instruction, resolved_method); + } } if (resolved_method == nullptr) { @@ -239,15 +256,10 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) { return false; } - if (invoke_instruction->IsInvokeStaticOrDirect()) { - return TryInline(invoke_instruction, resolved_method); - } - - // Check if we can statically find the method. - ArtMethod* actual_method = FindVirtualOrInterfaceTarget(invoke_instruction, resolved_method); if (actual_method != nullptr) { return TryInline(invoke_instruction, actual_method); } + DCHECK(!invoke_instruction->IsInvokeStaticOrDirect()); // Check if we can use an inline cache. ArtMethod* caller = graph_->GetArtMethod(); @@ -589,6 +601,7 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method, compiler_driver_, handles_, stats_, + total_number_of_dex_registers_ + code_item->registers_size_, depth_ + 1); inliner.Run(); number_of_instructions_budget += inliner.number_of_inlined_instructions_; @@ -620,6 +633,10 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method, HReversePostOrderIterator it(*callee_graph); it.Advance(); // Past the entry block, it does not contain instructions that prevent inlining. size_t number_of_instructions = 0; + + bool can_inline_environment = + total_number_of_dex_registers_ < kMaximumNumberOfCumulatedDexRegisters; + for (; !it.Done(); it.Advance()) { HBasicBlock* block = it.Current(); if (block->IsLoopHeader()) { @@ -633,10 +650,17 @@ bool HInliner::TryBuildAndInline(ArtMethod* resolved_method, instr_it.Advance()) { if (number_of_instructions++ == number_of_instructions_budget) { VLOG(compiler) << "Method " << PrettyMethod(method_index, callee_dex_file) - << " could not be inlined because it is too big."; + << " is not inlined because its caller has reached" + << " its instruction budget limit."; return false; } HInstruction* current = instr_it.Current(); + if (!can_inline_environment && current->NeedsEnvironment()) { + VLOG(compiler) << "Method " << PrettyMethod(method_index, callee_dex_file) + << " is not inlined because its caller has reached" + << " its environment budget limit."; + return false; + } if (current->IsInvokeInterface()) { // Disable inlining of interface calls. The cost in case of entering the diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h index 7b9fb73ccf..8de510ea37 100644 --- a/compiler/optimizing/inliner.h +++ b/compiler/optimizing/inliner.h @@ -40,13 +40,15 @@ class HInliner : public HOptimization { CompilerDriver* compiler_driver, StackHandleScopeCollection* handles, OptimizingCompilerStats* stats, - size_t depth = 0) + size_t total_number_of_dex_registers, + size_t depth) : HOptimization(outer_graph, kInlinerPassName, stats), outermost_graph_(outermost_graph), outer_compilation_unit_(outer_compilation_unit), caller_compilation_unit_(caller_compilation_unit), codegen_(codegen), compiler_driver_(compiler_driver), + total_number_of_dex_registers_(total_number_of_dex_registers), depth_(depth), number_of_inlined_instructions_(0), handles_(handles) {} @@ -88,6 +90,7 @@ class HInliner : public HOptimization { const DexCompilationUnit& caller_compilation_unit_; CodeGenerator* const codegen_; CompilerDriver* const compiler_driver_; + const size_t total_number_of_dex_registers_; const size_t depth_; size_t number_of_inlined_instructions_; StackHandleScopeCollection* const handles_; diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc index 4683aee603..1e6b3a1fb3 100644 --- a/compiler/optimizing/intrinsics_arm.cc +++ b/compiler/optimizing/intrinsics_arm.cc @@ -502,9 +502,6 @@ static void GenUnsafeGet(HInvoke* invoke, bool is_volatile, CodeGeneratorARM* codegen) { LocationSummary* locations = invoke->GetLocations(); - DCHECK((type == Primitive::kPrimInt) || - (type == Primitive::kPrimLong) || - (type == Primitive::kPrimNot)); ArmAssembler* assembler = codegen->GetAssembler(); Location base_loc = locations->InAt(1); Register base = base_loc.AsRegister<Register>(); // Object pointer. @@ -512,30 +509,67 @@ static void GenUnsafeGet(HInvoke* invoke, Register offset = offset_loc.AsRegisterPairLow<Register>(); // Long offset, lo part only. Location trg_loc = locations->Out(); - if (type == Primitive::kPrimLong) { - Register trg_lo = trg_loc.AsRegisterPairLow<Register>(); - __ add(IP, base, ShifterOperand(offset)); - if (is_volatile && !codegen->GetInstructionSetFeatures().HasAtomicLdrdAndStrd()) { - Register trg_hi = trg_loc.AsRegisterPairHigh<Register>(); - __ ldrexd(trg_lo, trg_hi, IP); - } else { - __ ldrd(trg_lo, Address(IP)); + switch (type) { + case Primitive::kPrimInt: { + Register trg = trg_loc.AsRegister<Register>(); + __ ldr(trg, Address(base, offset)); + if (is_volatile) { + __ dmb(ISH); + } + break; } - } else { - Register trg = trg_loc.AsRegister<Register>(); - __ ldr(trg, Address(base, offset)); - } - if (is_volatile) { - __ dmb(ISH); - } + case Primitive::kPrimNot: { + Register trg = trg_loc.AsRegister<Register>(); + if (kEmitCompilerReadBarrier) { + if (kUseBakerReadBarrier) { + Location temp = locations->GetTemp(0); + codegen->GenerateArrayLoadWithBakerReadBarrier( + invoke, trg_loc, base, 0U, offset_loc, temp, /* needs_null_check */ false); + if (is_volatile) { + __ dmb(ISH); + } + } else { + __ ldr(trg, Address(base, offset)); + if (is_volatile) { + __ dmb(ISH); + } + codegen->GenerateReadBarrierSlow(invoke, trg_loc, trg_loc, base_loc, 0U, offset_loc); + } + } else { + __ ldr(trg, Address(base, offset)); + if (is_volatile) { + __ dmb(ISH); + } + __ MaybeUnpoisonHeapReference(trg); + } + break; + } - if (type == Primitive::kPrimNot) { - codegen->MaybeGenerateReadBarrier(invoke, trg_loc, trg_loc, base_loc, 0U, offset_loc); + case Primitive::kPrimLong: { + Register trg_lo = trg_loc.AsRegisterPairLow<Register>(); + __ add(IP, base, ShifterOperand(offset)); + if (is_volatile && !codegen->GetInstructionSetFeatures().HasAtomicLdrdAndStrd()) { + Register trg_hi = trg_loc.AsRegisterPairHigh<Register>(); + __ ldrexd(trg_lo, trg_hi, IP); + } else { + __ ldrd(trg_lo, Address(IP)); + } + if (is_volatile) { + __ dmb(ISH); + } + break; + } + + default: + LOG(FATAL) << "Unexpected type " << type; + UNREACHABLE(); } } -static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke) { +static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, + HInvoke* invoke, + Primitive::Type type) { bool can_call = kEmitCompilerReadBarrier && (invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObject || invoke->GetIntrinsic() == Intrinsics::kUnsafeGetObjectVolatile); @@ -548,25 +582,30 @@ static void CreateIntIntIntToIntLocations(ArenaAllocator* arena, HInvoke* invoke locations->SetInAt(1, Location::RequiresRegister()); locations->SetInAt(2, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap); + if (type == Primitive::kPrimNot && kEmitCompilerReadBarrier && kUseBakerReadBarrier) { + // We need a temporary register for the read barrier marking slow + // path in InstructionCodeGeneratorARM::GenerateArrayLoadWithBakerReadBarrier. + locations->AddTemp(Location::RequiresRegister()); + } } void IntrinsicLocationsBuilderARM::VisitUnsafeGet(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke); + CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimInt); } void IntrinsicLocationsBuilderARM::VisitUnsafeGetVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke); + CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimInt); } void IntrinsicLocationsBuilderARM::VisitUnsafeGetLong(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke); + CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimLong); } void IntrinsicLocationsBuilderARM::VisitUnsafeGetLongVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke); + CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimLong); } void IntrinsicLocationsBuilderARM::VisitUnsafeGetObject(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke); + CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimNot); } void IntrinsicLocationsBuilderARM::VisitUnsafeGetObjectVolatile(HInvoke* invoke) { - CreateIntIntIntToIntLocations(arena_, invoke); + CreateIntIntIntToIntLocations(arena_, invoke, Primitive::kPrimNot); } void IntrinsicCodeGeneratorARM::VisitUnsafeGet(HInvoke* invoke) { diff --git a/compiler/optimizing/licm_test.cc b/compiler/optimizing/licm_test.cc index aa60fd646d..2b63ec8971 100644 --- a/compiler/optimizing/licm_test.cc +++ b/compiler/optimizing/licm_test.cc @@ -65,7 +65,8 @@ class LICMTest : public CommonCompilerTest { // Provide boiler-plate instructions. parameter_ = new (&allocator_) HParameterValue(graph_->GetDexFile(), 0, 0, Primitive::kPrimNot); entry_->AddInstruction(parameter_); - constant_ = graph_->GetIntConstant(42); + int_constant_ = graph_->GetIntConstant(42); + float_constant_ = graph_->GetFloatConstant(42.0f); loop_preheader_->AddInstruction(new (&allocator_) HGoto()); loop_header_->AddInstruction(new (&allocator_) HIf(parameter_)); loop_body_->AddInstruction(new (&allocator_) HGoto()); @@ -95,7 +96,8 @@ class LICMTest : public CommonCompilerTest { HBasicBlock* exit_; HInstruction* parameter_; // "this" - HInstruction* constant_; + HInstruction* int_constant_; + HInstruction* float_constant_; }; // @@ -118,7 +120,7 @@ TEST_F(LICMTest, FieldHoisting) { 0); loop_body_->InsertInstructionBefore(get_field, loop_body_->GetLastInstruction()); HInstruction* set_field = new (&allocator_) HInstanceFieldSet( - parameter_, constant_, Primitive::kPrimInt, MemberOffset(20), + parameter_, int_constant_, Primitive::kPrimInt, MemberOffset(20), false, kUnknownFieldIndex, kUnknownClassDefIndex, graph_->GetDexFile(), dex_cache, 0); loop_body_->InsertInstructionBefore(set_field, loop_body_->GetLastInstruction()); @@ -167,11 +169,13 @@ TEST_F(LICMTest, ArrayHoisting) { BuildLoop(); // Populate the loop with instructions: set/get array with different types. + // ArrayGet is typed as kPrimByte and ArraySet given a float value in order to + // avoid SsaBuilder's typing of ambiguous array operations from reference type info. HInstruction* get_array = new (&allocator_) HArrayGet( - parameter_, constant_, Primitive::kPrimByte, 0); + parameter_, int_constant_, Primitive::kPrimByte, 0); loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction()); HInstruction* set_array = new (&allocator_) HArraySet( - parameter_, constant_, constant_, Primitive::kPrimShort, 0); + parameter_, int_constant_, float_constant_, Primitive::kPrimShort, 0); loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction()); EXPECT_EQ(get_array->GetBlock(), loop_body_); @@ -185,11 +189,13 @@ TEST_F(LICMTest, NoArrayHoisting) { BuildLoop(); // Populate the loop with instructions: set/get array with same types. + // ArrayGet is typed as kPrimByte and ArraySet given a float value in order to + // avoid SsaBuilder's typing of ambiguous array operations from reference type info. HInstruction* get_array = new (&allocator_) HArrayGet( - parameter_, constant_, Primitive::kPrimByte, 0); + parameter_, int_constant_, Primitive::kPrimByte, 0); loop_body_->InsertInstructionBefore(get_array, loop_body_->GetLastInstruction()); HInstruction* set_array = new (&allocator_) HArraySet( - parameter_, get_array, constant_, Primitive::kPrimByte, 0); + parameter_, get_array, float_constant_, Primitive::kPrimByte, 0); loop_body_->InsertInstructionBefore(set_array, loop_body_->GetLastInstruction()); EXPECT_EQ(get_array->GetBlock(), loop_body_); diff --git a/compiler/optimizing/load_store_elimination.cc b/compiler/optimizing/load_store_elimination.cc index 727f2bb717..2b313f6b81 100644 --- a/compiler/optimizing/load_store_elimination.cc +++ b/compiler/optimizing/load_store_elimination.cc @@ -678,16 +678,6 @@ class LSEVisitor : public HGraphVisitor { } } - static bool IsIntFloatAlias(Primitive::Type type1, Primitive::Type type2) { - return (type1 == Primitive::kPrimFloat && type2 == Primitive::kPrimInt) || - (type2 == Primitive::kPrimFloat && type1 == Primitive::kPrimInt); - } - - static bool IsLongDoubleAlias(Primitive::Type type1, Primitive::Type type2) { - return (type1 == Primitive::kPrimDouble && type2 == Primitive::kPrimLong) || - (type2 == Primitive::kPrimDouble && type1 == Primitive::kPrimLong); - } - void VisitGetLocation(HInstruction* instruction, HInstruction* ref, size_t offset, @@ -716,22 +706,14 @@ class LSEVisitor : public HGraphVisitor { // Get the real heap value of the store. heap_value = store->InputAt(1); } - if ((heap_value != kUnknownHeapValue) && - // Keep the load due to possible I/F, J/D array aliasing. - // See b/22538329 for details. - !IsIntFloatAlias(heap_value->GetType(), instruction->GetType()) && - !IsLongDoubleAlias(heap_value->GetType(), instruction->GetType())) { - removed_loads_.push_back(instruction); - substitute_instructions_for_loads_.push_back(heap_value); - TryRemovingNullCheck(instruction); - return; - } - - // Load isn't eliminated. if (heap_value == kUnknownHeapValue) { - // Put the load as the value into the HeapLocation. + // Load isn't eliminated. Put the load as the value into the HeapLocation. // This acts like GVN but with better aliasing analysis. heap_values[idx] = instruction; + } else { + removed_loads_.push_back(instruction); + substitute_instructions_for_loads_.push_back(heap_value); + TryRemovingNullCheck(instruction); } } diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index fc12224783..6d4275d8a6 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -2060,6 +2060,16 @@ void HGraph::TransformLoopHeaderForBCE(HBasicBlock* header) { new_pre_header->SetTryCatchInformation(try_catch_info); } +static void CheckAgainstUpperBound(ReferenceTypeInfo rti, ReferenceTypeInfo upper_bound_rti) + SHARED_REQUIRES(Locks::mutator_lock_) { + if (rti.IsValid()) { + DCHECK(upper_bound_rti.IsSupertypeOf(rti)) + << " upper_bound_rti: " << upper_bound_rti + << " rti: " << rti; + DCHECK(!upper_bound_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes() || rti.IsExact()); + } +} + void HInstruction::SetReferenceTypeInfo(ReferenceTypeInfo rti) { if (kIsDebugBuild) { DCHECK_EQ(GetType(), Primitive::kPrimNot); @@ -2068,16 +2078,23 @@ void HInstruction::SetReferenceTypeInfo(ReferenceTypeInfo rti) { if (IsBoundType()) { // Having the test here spares us from making the method virtual just for // the sake of a DCHECK. - ReferenceTypeInfo upper_bound_rti = AsBoundType()->GetUpperBound(); - DCHECK(upper_bound_rti.IsSupertypeOf(rti)) - << " upper_bound_rti: " << upper_bound_rti - << " rti: " << rti; - DCHECK(!upper_bound_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes() || rti.IsExact()); + CheckAgainstUpperBound(rti, AsBoundType()->GetUpperBound()); } } reference_type_info_ = rti; } +void HBoundType::SetUpperBound(const ReferenceTypeInfo& upper_bound, bool can_be_null) { + if (kIsDebugBuild) { + ScopedObjectAccess soa(Thread::Current()); + DCHECK(upper_bound.IsValid()); + DCHECK(!upper_bound_.IsValid()) << "Upper bound should only be set once."; + CheckAgainstUpperBound(GetReferenceTypeInfo(), upper_bound); + } + upper_bound_ = upper_bound; + upper_can_be_null_ = can_be_null; +} + ReferenceTypeInfo::ReferenceTypeInfo() : type_handle_(TypeHandle()), is_exact_(false) {} ReferenceTypeInfo::ReferenceTypeInfo(TypeHandle type_handle, bool is_exact) @@ -2271,4 +2288,19 @@ HInstruction* HGraph::InsertOppositeCondition(HInstruction* cond, HInstruction* } } +std::ostream& operator<<(std::ostream& os, const MoveOperands& rhs) { + os << "[" + << " source=" << rhs.GetSource() + << " destination=" << rhs.GetDestination() + << " type=" << rhs.GetType() + << " instruction="; + if (rhs.GetInstruction() != nullptr) { + os << rhs.GetInstruction()->DebugName() << ' ' << rhs.GetInstruction()->GetId(); + } else { + os << "null"; + } + os << " ]"; + return os; +} + } // namespace art diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 5b072cf71c..c06d164523 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -101,7 +101,7 @@ enum IfCondition { enum BuildSsaResult { kBuildSsaFailNonNaturalLoop, kBuildSsaFailThrowCatchLoop, - kBuildSsaFailAmbiguousArrayGet, + kBuildSsaFailAmbiguousArrayOp, kBuildSsaSuccess, }; @@ -240,7 +240,7 @@ class ReferenceTypeInfo : ValueObject { // Returns true if the type information provide the same amount of details. // Note that it does not mean that the instructions have the same actual type // (because the type can be the result of a merge). - bool IsEqual(ReferenceTypeInfo rti) SHARED_REQUIRES(Locks::mutator_lock_) { + bool IsEqual(ReferenceTypeInfo rti) const SHARED_REQUIRES(Locks::mutator_lock_) { if (!IsValid() && !rti.IsValid()) { // Invalid types are equal. return true; @@ -5431,24 +5431,19 @@ class HInstanceOf : public HExpression<2> { class HBoundType : public HExpression<1> { public: - // Constructs an HBoundType with the given upper_bound. - // Ensures that the upper_bound is valid. - HBoundType(HInstruction* input, - ReferenceTypeInfo upper_bound, - bool upper_can_be_null, - uint32_t dex_pc = kNoDexPc) + HBoundType(HInstruction* input, uint32_t dex_pc = kNoDexPc) : HExpression(Primitive::kPrimNot, SideEffects::None(), dex_pc), - upper_bound_(upper_bound), - upper_can_be_null_(upper_can_be_null), - can_be_null_(upper_can_be_null) { + upper_bound_(ReferenceTypeInfo::CreateInvalid()), + upper_can_be_null_(true), + can_be_null_(true) { DCHECK_EQ(input->GetType(), Primitive::kPrimNot); SetRawInputAt(0, input); - SetReferenceTypeInfo(upper_bound_); } - // GetUpper* should only be used in reference type propagation. + // {Get,Set}Upper* should only be used in reference type propagation. const ReferenceTypeInfo& GetUpperBound() const { return upper_bound_; } bool GetUpperCanBeNull() const { return upper_can_be_null_; } + void SetUpperBound(const ReferenceTypeInfo& upper_bound, bool can_be_null); void SetCanBeNull(bool can_be_null) { DCHECK(upper_can_be_null_ || !can_be_null); @@ -5466,10 +5461,10 @@ class HBoundType : public HExpression<1> { // if (x instanceof ClassX) { // // uper_bound_ will be ClassX // } - const ReferenceTypeInfo upper_bound_; + ReferenceTypeInfo upper_bound_; // Represents the top constraint that can_be_null_ cannot exceed (i.e. if this // is false then can_be_null_ cannot be true). - const bool upper_can_be_null_; + bool upper_can_be_null_; bool can_be_null_; DISALLOW_COPY_AND_ASSIGN(HBoundType); @@ -5618,8 +5613,8 @@ class MoveOperands : public ArenaObject<kArenaAllocMoveOperands> { } bool IsPending() const { - DCHECK(!source_.IsInvalid() || destination_.IsInvalid()); - return destination_.IsInvalid() && !source_.IsInvalid(); + DCHECK(source_.IsValid() || destination_.IsInvalid()); + return destination_.IsInvalid() && source_.IsValid(); } // True if this blocks a move from the given location. @@ -5663,6 +5658,8 @@ class MoveOperands : public ArenaObject<kArenaAllocMoveOperands> { HInstruction* instruction_; }; +std::ostream& operator<<(std::ostream& os, const MoveOperands& rhs); + static constexpr size_t kDefaultNumberOfMoves = 4; class HParallelMove : public HTemplateInstruction<0> { diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 3de870e95e..3eb72744ee 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -426,8 +426,18 @@ static void MaybeRunInliner(HGraph* graph, if (!should_inline) { return; } + size_t number_of_dex_registers = dex_compilation_unit.GetCodeItem()->registers_size_; HInliner* inliner = new (graph->GetArena()) HInliner( - graph, graph, codegen, dex_compilation_unit, dex_compilation_unit, driver, handles, stats); + graph, + graph, + codegen, + dex_compilation_unit, + dex_compilation_unit, + driver, + handles, + stats, + number_of_dex_registers, + /* depth */ 0); HOptimization* optimizations[] = { inliner }; RunOptimizations(optimizations, arraysize(optimizations), pass_observer); @@ -776,8 +786,8 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena, case kBuildSsaFailThrowCatchLoop: MaybeRecordStat(MethodCompilationStat::kNotCompiledThrowCatchLoop); break; - case kBuildSsaFailAmbiguousArrayGet: - MaybeRecordStat(MethodCompilationStat::kNotCompiledAmbiguousArrayGet); + case kBuildSsaFailAmbiguousArrayOp: + MaybeRecordStat(MethodCompilationStat::kNotCompiledAmbiguousArrayOp); break; case kBuildSsaSuccess: UNREACHABLE(); diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h index 4713514bb2..bca1632e31 100644 --- a/compiler/optimizing/optimizing_compiler_stats.h +++ b/compiler/optimizing/optimizing_compiler_stats.h @@ -40,7 +40,7 @@ enum MethodCompilationStat { kNotCompiledBranchOutsideMethodCode, kNotCompiledNonNaturalLoop, kNotCompiledThrowCatchLoop, - kNotCompiledAmbiguousArrayGet, + kNotCompiledAmbiguousArrayOp, kNotCompiledHugeMethod, kNotCompiledLargeMethodNoBranches, kNotCompiledMalformedOpcode, @@ -108,7 +108,7 @@ class OptimizingCompilerStats { case kNotCompiledBranchOutsideMethodCode: name = "NotCompiledBranchOutsideMethodCode"; break; case kNotCompiledNonNaturalLoop : name = "NotCompiledNonNaturalLoop"; break; case kNotCompiledThrowCatchLoop : name = "NotCompiledThrowCatchLoop"; break; - case kNotCompiledAmbiguousArrayGet : name = "NotCompiledAmbiguousArrayGet"; break; + case kNotCompiledAmbiguousArrayOp : name = "NotCompiledAmbiguousArrayOp"; break; case kNotCompiledHugeMethod : name = "NotCompiledHugeMethod"; break; case kNotCompiledLargeMethodNoBranches : name = "NotCompiledLargeMethodNoBranches"; break; case kNotCompiledMalformedOpcode : name = "NotCompiledMalformedOpcode"; break; diff --git a/compiler/optimizing/parallel_move_resolver.cc b/compiler/optimizing/parallel_move_resolver.cc index 176c50ce21..9d136f3ae6 100644 --- a/compiler/optimizing/parallel_move_resolver.cc +++ b/compiler/optimizing/parallel_move_resolver.cc @@ -13,7 +13,6 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -#include <iostream> #include "parallel_move_resolver.h" @@ -172,7 +171,7 @@ MoveOperands* ParallelMoveResolverWithSwap::PerformMove(size_t index) { i = -1; } else if (required_swap != nullptr) { // A move is required to swap. We walk back the cycle to find the - // move by just returning from this `PerforrmMove`. + // move by just returning from this `PerformMove`. moves_[index]->ClearPending(destination); return required_swap; } @@ -201,7 +200,7 @@ MoveOperands* ParallelMoveResolverWithSwap::PerformMove(size_t index) { } else { for (MoveOperands* other_move : moves_) { if (other_move->Blocks(destination)) { - DCHECK(other_move->IsPending()); + DCHECK(other_move->IsPending()) << "move=" << *move << " other_move=" << *other_move; if (!move->Is64BitMove() && other_move->Is64BitMove()) { // We swap 64bits moves before swapping 32bits moves. Go back from the // cycle by returning the move that must be swapped. diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc index d1770b75ab..63ef600756 100644 --- a/compiler/optimizing/prepare_for_register_allocation.cc +++ b/compiler/optimizing/prepare_for_register_allocation.cc @@ -96,7 +96,7 @@ void PrepareForRegisterAllocation::VisitClinitCheck(HClinitCheck* check) { if (can_merge_with_load_class && !load_class->HasUses()) { load_class->GetBlock()->RemoveInstruction(load_class); } - } else if (can_merge_with_load_class) { + } else if (can_merge_with_load_class && !load_class->NeedsAccessCheck()) { // Pass the initialization duty to the `HLoadClass` instruction, // and remove the instruction from the graph. load_class->SetMustGenerateClinitCheck(true); diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index 94a297c9e6..1c25e4824c 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -56,6 +56,7 @@ class RTPVisitor : public HGraphDelegateVisitor { void VisitInvoke(HInvoke* instr) OVERRIDE; void VisitArrayGet(HArrayGet* instr) OVERRIDE; void VisitCheckCast(HCheckCast* instr) OVERRIDE; + void VisitBoundType(HBoundType* instr) OVERRIDE; void VisitNullCheck(HNullCheck* instr) OVERRIDE; void VisitFakeString(HFakeString* instr) OVERRIDE; void UpdateReferenceTypeInfo(HInstruction* instr, @@ -124,91 +125,6 @@ void ReferenceTypePropagation::ValidateTypes() { } } -static void CheckHasNoTypedInputs(HInstruction* root_instr) { - ArenaAllocatorAdapter<void> adapter = - root_instr->GetBlock()->GetGraph()->GetArena()->Adapter(kArenaAllocReferenceTypePropagation); - - ArenaVector<HPhi*> visited_phis(adapter); - ArenaVector<HInstruction*> worklist(adapter); - worklist.push_back(root_instr); - - while (!worklist.empty()) { - HInstruction* instr = worklist.back(); - worklist.pop_back(); - - if (instr->IsPhi() || instr->IsBoundType() || instr->IsNullCheck()) { - // Expect that both `root_instr` and its inputs have invalid RTI. - ScopedObjectAccess soa(Thread::Current()); - DCHECK(!instr->GetReferenceTypeInfo().IsValid()) << "Instruction should not have valid RTI."; - - // Insert all unvisited inputs to the worklist. - for (HInputIterator it(instr); !it.Done(); it.Advance()) { - HInstruction* input = it.Current(); - if (input->IsPhi()) { - if (ContainsElement(visited_phis, input->AsPhi())) { - continue; - } else { - visited_phis.push_back(input->AsPhi()); - } - } - worklist.push_back(input); - } - } else if (instr->IsNullConstant()) { - // The only input of `root_instr` allowed to have valid RTI because it is ignored. - } else { - LOG(FATAL) << "Unexpected input " << instr->DebugName() << instr->GetId() << " with RTI " - << instr->GetReferenceTypeInfo(); - UNREACHABLE(); - } - } -} - -template<typename Functor> -static void ForEachUntypedInstruction(HGraph* graph, Functor fn) { - ScopedObjectAccess soa(Thread::Current()); - for (HReversePostOrderIterator block_it(*graph); !block_it.Done(); block_it.Advance()) { - for (HInstructionIterator it(block_it.Current()->GetPhis()); !it.Done(); it.Advance()) { - HPhi* phi = it.Current()->AsPhi(); - // Note that the graph may contain dead phis when run from the SsaBuilder. - // Skip those as they might have a type conflict and will be removed anyway. - if (phi->IsLive() && - phi->GetType() == Primitive::kPrimNot && - !phi->GetReferenceTypeInfo().IsValid()) { - fn(phi); - } - } - for (HInstructionIterator it(block_it.Current()->GetInstructions()); !it.Done(); it.Advance()) { - HInstruction* instr = it.Current(); - if (instr->GetType() == Primitive::kPrimNot && !instr->GetReferenceTypeInfo().IsValid()) { - fn(instr); - } - } - } -} - -void ReferenceTypePropagation::SetUntypedInstructionsToObject() { - // In some cases, the fix-point iteration will leave kPrimNot instructions with - // invalid RTI because bytecode does not provide enough typing information. - // Set the RTI of such instructions to Object. - // Example: - // MyClass a = null, b = null; - // while (a == null) { - // if (cond) { a = b; } else { b = a; } - // } - - if (kIsDebugBuild) { - // Test that if we are going to set RTI from invalid to Object, that - // instruction did not have any typed instructions in its def-use chain - // and therefore its type could not be inferred. - ForEachUntypedInstruction(graph_, [](HInstruction* instr) { CheckHasNoTypedInputs(instr); }); - } - - ReferenceTypeInfo obj_rti = ReferenceTypeInfo::Create(object_class_handle_, /* is_exact */ false); - ForEachUntypedInstruction(graph_, [obj_rti](HInstruction* instr) { - instr->SetReferenceTypeInfo(obj_rti); - }); -} - void ReferenceTypePropagation::Run() { // To properly propagate type info we need to visit in the dominator-based order. // Reverse post order guarantees a node's dominators are visited first. @@ -218,7 +134,6 @@ void ReferenceTypePropagation::Run() { } ProcessWorklist(); - SetUntypedInstructionsToObject(); ValidateTypes(); } @@ -246,34 +161,6 @@ void ReferenceTypePropagation::VisitBasicBlock(HBasicBlock* block) { BoundTypeForIfInstanceOf(block); } -// Create a bound type for the given object narrowing the type as much as possible. -// The BoundType upper values for the super type and can_be_null will be taken from -// load_class.GetLoadedClassRTI() and upper_can_be_null. -static HBoundType* CreateBoundType(ArenaAllocator* arena, - HInstruction* obj, - HLoadClass* load_class, - bool upper_can_be_null) - SHARED_REQUIRES(Locks::mutator_lock_) { - ReferenceTypeInfo obj_rti = obj->GetReferenceTypeInfo(); - ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI(); - DCHECK(class_rti.IsValid()); - HBoundType* bound_type = new (arena) HBoundType(obj, class_rti, upper_can_be_null); - // Narrow the type as much as possible. - if (class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes()) { - bound_type->SetReferenceTypeInfo( - ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ true)); - } else if (obj_rti.IsValid() && class_rti.IsSupertypeOf(obj_rti)) { - bound_type->SetReferenceTypeInfo(obj_rti); - } else { - bound_type->SetReferenceTypeInfo( - ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false)); - } - if (upper_can_be_null) { - bound_type->SetCanBeNull(obj->CanBeNull()); - } - return bound_type; -} - // Check if we should create a bound type for the given object at the specified // position. Because of inlining and the fact we run RTP more than once and we // might have a HBoundType already. If we do, we should not create a new one. @@ -359,8 +246,8 @@ void ReferenceTypePropagation::BoundTypeForIfNotNull(HBasicBlock* block) { ReferenceTypeInfo object_rti = ReferenceTypeInfo::Create( object_class_handle_, /* is_exact */ true); if (ShouldCreateBoundType(insert_point, obj, object_rti, nullptr, notNullBlock)) { - bound_type = new (graph_->GetArena()) HBoundType( - obj, object_rti, /* bound_can_be_null */ false); + bound_type = new (graph_->GetArena()) HBoundType(obj); + bound_type->SetUpperBound(object_rti, /* bound_can_be_null */ false); if (obj->GetReferenceTypeInfo().IsValid()) { bound_type->SetReferenceTypeInfo(obj->GetReferenceTypeInfo()); } @@ -494,11 +381,8 @@ void ReferenceTypePropagation::BoundTypeForIfInstanceOf(HBasicBlock* block) { ScopedObjectAccess soa(Thread::Current()); HInstruction* insert_point = instanceOfTrueBlock->GetFirstInstruction(); if (ShouldCreateBoundType(insert_point, obj, class_rti, nullptr, instanceOfTrueBlock)) { - bound_type = CreateBoundType( - graph_->GetArena(), - obj, - load_class, - false /* InstanceOf ensures the object is not null. */); + bound_type = new (graph_->GetArena()) HBoundType(obj); + bound_type->SetUpperBound(class_rti, /* InstanceOf fails for null. */ false); instanceOfTrueBlock->InsertInstructionBefore(bound_type, insert_point); } else { // We already have a bound type on the position we would need to insert @@ -688,43 +572,61 @@ void RTPVisitor::VisitFakeString(HFakeString* instr) { instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(string_class_handle_, /* is_exact */ true)); } +void RTPVisitor::VisitBoundType(HBoundType* instr) { + ScopedObjectAccess soa(Thread::Current()); + + ReferenceTypeInfo class_rti = instr->GetUpperBound(); + if (class_rti.IsValid()) { + // Narrow the type as much as possible. + HInstruction* obj = instr->InputAt(0); + ReferenceTypeInfo obj_rti = obj->GetReferenceTypeInfo(); + if (class_rti.GetTypeHandle()->CannotBeAssignedFromOtherTypes()) { + instr->SetReferenceTypeInfo( + ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ true)); + } else if (obj_rti.IsValid()) { + if (class_rti.IsSupertypeOf(obj_rti)) { + // Object type is more specific. + instr->SetReferenceTypeInfo(obj_rti); + } else { + // Upper bound is more specific. + instr->SetReferenceTypeInfo( + ReferenceTypeInfo::Create(class_rti.GetTypeHandle(), /* is_exact */ false)); + } + } else { + // Object not typed yet. Leave BoundType untyped for now rather than + // assign the type conservatively. + } + instr->SetCanBeNull(obj->CanBeNull() && instr->GetUpperCanBeNull()); + } else { + // The owner of the BoundType was already visited. If the class is unresolved, + // the BoundType should have been removed from the data flow and this method + // should remove it from the graph. + DCHECK(!instr->HasUses()); + instr->GetBlock()->RemoveInstruction(instr); + } +} + void RTPVisitor::VisitCheckCast(HCheckCast* check_cast) { + ScopedObjectAccess soa(Thread::Current()); + HLoadClass* load_class = check_cast->InputAt(1)->AsLoadClass(); ReferenceTypeInfo class_rti = load_class->GetLoadedClassRTI(); - { - ScopedObjectAccess soa(Thread::Current()); - if (!class_rti.IsValid()) { - // He have loaded an unresolved class. Don't bother bounding the type. - return; - } + HBoundType* bound_type = check_cast->GetNext()->AsBoundType(); + if (bound_type == nullptr || bound_type->GetUpperBound().IsValid()) { + // The next instruction is not an uninitialized BoundType. This must be + // an RTP pass after SsaBuilder and we do not need to do anything. + return; } - HInstruction* obj = check_cast->InputAt(0); - HBoundType* bound_type = nullptr; - for (HUseIterator<HInstruction*> it(obj->GetUses()); !it.Done(); it.Advance()) { - HInstruction* user = it.Current()->GetUser(); - if (check_cast->StrictlyDominates(user)) { - if (bound_type == nullptr) { - ScopedObjectAccess soa(Thread::Current()); - if (ShouldCreateBoundType(check_cast->GetNext(), obj, class_rti, check_cast, nullptr)) { - bound_type = CreateBoundType( - GetGraph()->GetArena(), - obj, - load_class, - true /* CheckCast succeeds for nulls. */); - check_cast->GetBlock()->InsertInstructionAfter(bound_type, check_cast); - } else { - // Update nullability of the existing bound type, which may not have known - // that its input was not null when it was being created. - bound_type = check_cast->GetNext()->AsBoundType(); - bound_type->SetCanBeNull(obj->CanBeNull()); - // We already have a bound type on the position we would need to insert - // the new one. The existing bound type should dominate all the users - // (dchecked) so there's no need to continue. - break; - } - } - user->ReplaceInput(bound_type, it.Current()->GetIndex()); - } + DCHECK_EQ(bound_type->InputAt(0), check_cast->InputAt(0)); + + if (class_rti.IsValid()) { + // This is the first run of RTP and class is resolved. + bound_type->SetUpperBound(class_rti, /* CheckCast succeeds for nulls. */ true); + } else { + // This is the first run of RTP and class is unresolved. Remove the binding. + // The instruction itself is removed in VisitBoundType so as to not + // invalidate HInstructionIterator. + bound_type->ReplaceWith(bound_type->InputAt(0)); } } diff --git a/compiler/optimizing/reference_type_propagation.h b/compiler/optimizing/reference_type_propagation.h index 21789e1331..5c05592726 100644 --- a/compiler/optimizing/reference_type_propagation.h +++ b/compiler/optimizing/reference_type_propagation.h @@ -57,7 +57,6 @@ class ReferenceTypePropagation : public HOptimization { SHARED_REQUIRES(Locks::mutator_lock_); void ValidateTypes(); - void SetUntypedInstructionsToObject(); StackHandleScopeCollection* handles_; diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc index 9e869e18e9..f6bab8efcb 100644 --- a/compiler/optimizing/ssa_builder.cc +++ b/compiler/optimizing/ssa_builder.cc @@ -154,7 +154,7 @@ static bool TypePhiFromInputs(HPhi* phi) { Primitive::Type input_type = HPhi::ToPhiType(input->GetType()); if (common_type == input_type) { // No change in type. - } else if (Primitive::ComponentSize(common_type) != Primitive::ComponentSize(input_type)) { + } else if (Primitive::Is64BitType(common_type) != Primitive::Is64BitType(input_type)) { // Types are of different sizes, e.g. int vs. long. Must be a conflict. return false; } else if (Primitive::IsIntegralType(common_type)) { @@ -317,27 +317,15 @@ static HArrayGet* CreateFloatOrDoubleEquivalentOfArrayGet(HArrayGet* aget) { return equivalent; } -// Returns true if the array input of `aget` is either of type int[] or long[]. -// Should only be called on ArrayGets with ambiguous type (int/float, long/double) -// on arrays which were typed to an array class by RTP. -static bool IsArrayGetOnIntegralArray(HArrayGet* aget) SHARED_REQUIRES(Locks::mutator_lock_) { - ReferenceTypeInfo array_type = aget->GetArray()->GetReferenceTypeInfo(); +static Primitive::Type GetPrimitiveArrayComponentType(HInstruction* array) + SHARED_REQUIRES(Locks::mutator_lock_) { + ReferenceTypeInfo array_type = array->GetReferenceTypeInfo(); DCHECK(array_type.IsPrimitiveArrayClass()); - ReferenceTypeInfo::TypeHandle array_type_handle = array_type.GetTypeHandle(); - - bool is_integral_type; - if (Primitive::Is64BitType(aget->GetType())) { - is_integral_type = array_type_handle->GetComponentType()->IsPrimitiveLong(); - DCHECK(is_integral_type || array_type_handle->GetComponentType()->IsPrimitiveDouble()); - } else { - is_integral_type = array_type_handle->GetComponentType()->IsPrimitiveInt(); - DCHECK(is_integral_type || array_type_handle->GetComponentType()->IsPrimitiveFloat()); - } - return is_integral_type; + return array_type.GetTypeHandle()->GetComponentType()->GetPrimitiveType(); } -bool SsaBuilder::FixAmbiguousArrayGets() { - if (ambiguous_agets_.empty()) { +bool SsaBuilder::FixAmbiguousArrayOps() { + if (ambiguous_agets_.empty() && ambiguous_asets_.empty()) { return true; } @@ -351,13 +339,17 @@ bool SsaBuilder::FixAmbiguousArrayGets() { ScopedObjectAccess soa(Thread::Current()); for (HArrayGet* aget_int : ambiguous_agets_) { - if (!aget_int->GetArray()->GetReferenceTypeInfo().IsPrimitiveArrayClass()) { + HInstruction* array = aget_int->GetArray(); + if (!array->GetReferenceTypeInfo().IsPrimitiveArrayClass()) { // RTP did not type the input array. Bail. return false; } HArrayGet* aget_float = FindFloatOrDoubleEquivalentOfArrayGet(aget_int); - if (IsArrayGetOnIntegralArray(aget_int)) { + Primitive::Type array_type = GetPrimitiveArrayComponentType(array); + DCHECK_EQ(Primitive::Is64BitType(aget_int->GetType()), Primitive::Is64BitType(array_type)); + + if (Primitive::IsIntOrLongType(array_type)) { if (aget_float != nullptr) { // There is a float/double equivalent. We must replace it and re-run // primitive type propagation on all dependent instructions. @@ -366,6 +358,7 @@ bool SsaBuilder::FixAmbiguousArrayGets() { AddDependentInstructionsToWorklist(aget_int, &worklist); } } else { + DCHECK(Primitive::IsFloatingPointType(array_type)); if (aget_float == nullptr) { // This is a float/double ArrayGet but there were no typed uses which // would create the typed equivalent. Create it now. @@ -379,11 +372,47 @@ bool SsaBuilder::FixAmbiguousArrayGets() { AddDependentInstructionsToWorklist(aget_float, &worklist); } } - } - // Set a flag stating that types of ArrayGets have been resolved. This is used - // by GetFloatOrDoubleEquivalentOfArrayGet to report conflict. - agets_fixed_ = true; + // Set a flag stating that types of ArrayGets have been resolved. Requesting + // equivalent of the wrong type with GetFloatOrDoubleEquivalentOfArrayGet + // will fail from now on. + agets_fixed_ = true; + + for (HArraySet* aset : ambiguous_asets_) { + HInstruction* array = aset->GetArray(); + if (!array->GetReferenceTypeInfo().IsPrimitiveArrayClass()) { + // RTP did not type the input array. Bail. + return false; + } + + HInstruction* value = aset->GetValue(); + Primitive::Type value_type = value->GetType(); + Primitive::Type array_type = GetPrimitiveArrayComponentType(array); + DCHECK_EQ(Primitive::Is64BitType(value_type), Primitive::Is64BitType(array_type)); + + if (Primitive::IsFloatingPointType(array_type)) { + if (!Primitive::IsFloatingPointType(value_type)) { + DCHECK(Primitive::IsIntegralType(value_type)); + // Array elements are floating-point but the value has not been replaced + // with its floating-point equivalent. The replacement must always + // succeed in code validated by the verifier. + HInstruction* equivalent = GetFloatOrDoubleEquivalent(value, array_type); + DCHECK(equivalent != nullptr); + aset->ReplaceInput(equivalent, /* input_index */ 2); + if (equivalent->IsPhi()) { + // Returned equivalent is a phi which may not have had its inputs + // replaced yet. We need to run primitive type propagation on it. + worklist.push_back(equivalent->AsPhi()); + } + } + } else { + // Array elements are integral and the value assigned to it initially + // was integral too. Nothing to do. + DCHECK(Primitive::IsIntegralType(array_type)); + DCHECK(Primitive::IsIntegralType(value_type)); + } + } + } if (!worklist.empty()) { ProcessPrimitiveTypePropagationWorklist(&worklist); @@ -429,10 +458,11 @@ BuildSsaResult SsaBuilder::BuildSsa() { ReferenceTypePropagation(GetGraph(), handles_).Run(); // 7) Step 1) duplicated ArrayGet instructions with ambiguous type (int/float - // or long/double). Now that RTP computed the type of the array input, the - // ambiguity can be resolved and the correct equivalent kept. - if (!FixAmbiguousArrayGets()) { - return kBuildSsaFailAmbiguousArrayGet; + // or long/double) and marked ArraySets with ambiguous input type. Now that RTP + // computed the type of the array input, the ambiguity can be resolved and the + // correct equivalents kept. + if (!FixAmbiguousArrayOps()) { + return kBuildSsaFailAmbiguousArrayOp; } // 8) Mark dead phis. This will mark phis which are not used by instructions @@ -702,7 +732,7 @@ HArrayGet* SsaBuilder::GetFloatOrDoubleEquivalentOfArrayGet(HArrayGet* aget) { // int/long. Requesting a float/double equivalent should lead to a conflict. if (kIsDebugBuild) { ScopedObjectAccess soa(Thread::Current()); - DCHECK(IsArrayGetOnIntegralArray(aget)); + DCHECK(Primitive::IsIntOrLongType(GetPrimitiveArrayComponentType(aget->GetArray()))); } return nullptr; } else { @@ -847,4 +877,12 @@ void SsaBuilder::VisitArrayGet(HArrayGet* aget) { VisitInstruction(aget); } +void SsaBuilder::VisitArraySet(HArraySet* aset) { + Primitive::Type type = aset->GetValue()->GetType(); + if (Primitive::IsIntOrLongType(type)) { + ambiguous_asets_.push_back(aset); + } + VisitInstruction(aset); +} + } // namespace art diff --git a/compiler/optimizing/ssa_builder.h b/compiler/optimizing/ssa_builder.h index ed6f5cab51..0fcc3a1306 100644 --- a/compiler/optimizing/ssa_builder.h +++ b/compiler/optimizing/ssa_builder.h @@ -56,6 +56,7 @@ class SsaBuilder : public HGraphVisitor { current_locals_(nullptr), loop_headers_(graph->GetArena()->Adapter(kArenaAllocSsaBuilder)), ambiguous_agets_(graph->GetArena()->Adapter(kArenaAllocSsaBuilder)), + ambiguous_asets_(graph->GetArena()->Adapter(kArenaAllocSsaBuilder)), locals_for_(graph->GetBlocks().size(), ArenaVector<HInstruction*>(graph->GetArena()->Adapter(kArenaAllocSsaBuilder)), graph->GetArena()->Adapter(kArenaAllocSsaBuilder)) { @@ -75,6 +76,7 @@ class SsaBuilder : public HGraphVisitor { void VisitInstruction(HInstruction* instruction); void VisitTemporary(HTemporary* instruction); void VisitArrayGet(HArrayGet* aget); + void VisitArraySet(HArraySet* aset); static constexpr const char* kSsaBuilderPassName = "ssa_builder"; @@ -85,10 +87,10 @@ class SsaBuilder : public HGraphVisitor { void EquivalentPhisCleanup(); void RunPrimitiveTypePropagation(); - // Attempts to resolve types of aget and aget-wide instructions from reference - // type information on the input array. Returns false if the type of the array - // is unknown. - bool FixAmbiguousArrayGets(); + // Attempts to resolve types of aget(-wide) instructions and type values passed + // to aput(-wide) instructions from reference type information on the array + // input. Returns false if the type of an array is unknown. + bool FixAmbiguousArrayOps(); bool TypeInputsOfPhi(HPhi* phi, ArenaVector<HPhi*>* worklist); bool UpdatePrimitiveType(HPhi* phi, ArenaVector<HPhi*>* worklist); @@ -115,6 +117,7 @@ class SsaBuilder : public HGraphVisitor { ArenaVector<HBasicBlock*> loop_headers_; ArenaVector<HArrayGet*> ambiguous_agets_; + ArenaVector<HArraySet*> ambiguous_asets_; // HEnvironment for each block. ArenaVector<ArenaVector<HInstruction*>> locals_for_; diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc index 63aba88c2b..2eef307295 100644 --- a/compiler/optimizing/ssa_phi_elimination.cc +++ b/compiler/optimizing/ssa_phi_elimination.cc @@ -17,6 +17,7 @@ #include "ssa_phi_elimination.h" #include "base/arena_containers.h" +#include "base/bit_vector-inl.h" namespace art { @@ -129,6 +130,9 @@ void SsaRedundantPhiElimination::Run() { } } + ArenaSet<uint32_t> visited_phis_in_cycle(graph_->GetArena()->Adapter()); + ArenaVector<HPhi*> cycle_worklist(graph_->GetArena()->Adapter()); + while (!worklist_.empty()) { HPhi* phi = worklist_.back(); worklist_.pop_back(); @@ -143,46 +147,92 @@ void SsaRedundantPhiElimination::Run() { continue; } - // Find if the inputs of the phi are the same instruction. - HInstruction* candidate = phi->InputAt(0); - // A loop phi cannot have itself as the first phi. Note that this - // check relies on our simplification pass ensuring the pre-header - // block is first in the list of predecessors of the loop header. - DCHECK(!phi->IsLoopHeaderPhi() || phi->GetBlock()->IsLoopPreHeaderFirstPredecessor()); - DCHECK_NE(phi, candidate); - - for (size_t i = 1; i < phi->InputCount(); ++i) { - HInstruction* input = phi->InputAt(i); - // For a loop phi, if the input is the phi, the phi is still candidate for - // elimination. - if (input != candidate && input != phi) { + HInstruction* candidate = nullptr; + visited_phis_in_cycle.clear(); + cycle_worklist.clear(); + + cycle_worklist.push_back(phi); + visited_phis_in_cycle.insert(phi->GetId()); + bool catch_phi_in_cycle = phi->IsCatchPhi(); + + // First do a simple loop over inputs and check if they are all the same. + for (size_t j = 0; j < phi->InputCount(); ++j) { + HInstruction* input = phi->InputAt(j); + if (input == phi) { + continue; + } else if (candidate == nullptr) { + candidate = input; + } else if (candidate != input) { candidate = nullptr; break; } } - // If the inputs are not the same, continue. + // If we haven't found a candidate, check for a phi cycle. Note that we need to detect + // such cycles to avoid having reference and non-reference equivalents. We check this + // invariant in the graph checker. if (candidate == nullptr) { - continue; + // We iterate over the array as long as it grows. + for (size_t i = 0; i < cycle_worklist.size(); ++i) { + HPhi* current = cycle_worklist[i]; + DCHECK(!current->IsLoopHeaderPhi() || + current->GetBlock()->IsLoopPreHeaderFirstPredecessor()); + + for (size_t j = 0; j < current->InputCount(); ++j) { + HInstruction* input = current->InputAt(j); + if (input == current) { + continue; + } else if (input->IsPhi()) { + if (!ContainsElement(visited_phis_in_cycle, input->GetId())) { + cycle_worklist.push_back(input->AsPhi()); + visited_phis_in_cycle.insert(input->GetId()); + catch_phi_in_cycle |= input->AsPhi()->IsCatchPhi(); + } else { + // Already visited, nothing to do. + } + } else if (candidate == nullptr) { + candidate = input; + } else if (candidate != input) { + candidate = nullptr; + // Clear the cycle worklist to break out of the outer loop. + cycle_worklist.clear(); + break; + } + } + } } - // The candidate may not dominate a phi in a catch block. - if (phi->IsCatchPhi() && !candidate->StrictlyDominates(phi)) { + if (candidate == nullptr) { continue; } - // Because we're updating the users of this phi, we may have new candidates - // for elimination. Add phis that use this phi to the worklist. - for (HUseIterator<HInstruction*> it(phi->GetUses()); !it.Done(); it.Advance()) { - HUseListNode<HInstruction*>* current = it.Current(); - HInstruction* user = current->GetUser(); - if (user->IsPhi()) { - worklist_.push_back(user->AsPhi()); + for (HPhi* current : cycle_worklist) { + // The candidate may not dominate a phi in a catch block: there may be non-throwing + // instructions at the beginning of a try range, that may be the first input of + // catch phis. + // TODO(dbrazdil): Remove this situation by moving those non-throwing instructions + // before the try entry. + if (catch_phi_in_cycle) { + if (!candidate->StrictlyDominates(current)) { + continue; + } + } else { + DCHECK(candidate->StrictlyDominates(current)); + } + + // Because we're updating the users of this phi, we may have new candidates + // for elimination. Add phis that use this phi to the worklist. + for (HUseIterator<HInstruction*> it(current->GetUses()); !it.Done(); it.Advance()) { + HUseListNode<HInstruction*>* use = it.Current(); + HInstruction* user = use->GetUser(); + if (user->IsPhi() && !ContainsElement(visited_phis_in_cycle, user->GetId())) { + worklist_.push_back(user->AsPhi()); + } } + DCHECK(candidate->StrictlyDominates(current)); + current->ReplaceWith(candidate); + current->GetBlock()->RemovePhi(current); } - - phi->ReplaceWith(candidate); - phi->GetBlock()->RemovePhi(phi); } } diff --git a/compiler/profile_assistant.cc b/compiler/profile_assistant.cc new file mode 100644 index 0000000000..81f2a5692d --- /dev/null +++ b/compiler/profile_assistant.cc @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "profile_assistant.h" + +namespace art { + +// Minimum number of new methods that profiles must contain to enable recompilation. +static constexpr const uint32_t kMinNewMethodsForCompilation = 10; + +bool ProfileAssistant::ProcessProfiles( + const std::vector<std::string>& profile_files, + const std::vector<std::string>& reference_profile_files, + /*out*/ ProfileCompilationInfo** profile_compilation_info) { + DCHECK(!profile_files.empty()); + DCHECK(reference_profile_files.empty() || + (profile_files.size() == reference_profile_files.size())); + + std::vector<ProfileCompilationInfo> new_info(profile_files.size()); + bool should_compile = false; + // Read the main profile files. + for (size_t i = 0; i < profile_files.size(); i++) { + if (!new_info[i].Load(profile_files[i])) { + LOG(WARNING) << "Could not load profile file: " << profile_files[i]; + return false; + } + // Do we have enough new profiled methods that will make the compilation worthwhile? + should_compile |= (new_info[i].GetNumberOfMethods() > kMinNewMethodsForCompilation); + } + if (!should_compile) { + *profile_compilation_info = nullptr; + return true; + } + + std::unique_ptr<ProfileCompilationInfo> result(new ProfileCompilationInfo()); + for (size_t i = 0; i < new_info.size(); i++) { + // Merge all data into a single object. + result->Load(new_info[i]); + // If we have any reference profile information merge their information with + // the current profiles and save them back to disk. + if (!reference_profile_files.empty()) { + if (!new_info[i].Load(reference_profile_files[i])) { + LOG(WARNING) << "Could not load reference profile file: " << reference_profile_files[i]; + return false; + } + if (!new_info[i].Save(reference_profile_files[i])) { + LOG(WARNING) << "Could not save reference profile file: " << reference_profile_files[i]; + return false; + } + } + } + *profile_compilation_info = result.release(); + return true; +} + +} // namespace art diff --git a/compiler/profile_assistant.h b/compiler/profile_assistant.h new file mode 100644 index 0000000000..088c8bd1c7 --- /dev/null +++ b/compiler/profile_assistant.h @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_COMPILER_PROFILE_ASSISTANT_H_ +#define ART_COMPILER_PROFILE_ASSISTANT_H_ + +#include <string> +#include <vector> + +#include "jit/offline_profiling_info.cc" + +namespace art { + +class ProfileAssistant { + public: + // Process the profile information present in the given files. Returns true + // if the analysis ended up successfully (i.e. no errors during reading, + // merging or writing of profile files). + // + // If the returned value is true and there is a significant difference between + // profile_files and reference_profile_files: + // - profile_compilation_info is set to a not null object that + // can be used to drive compilation. It will be the merge of all the data + // found in profile_files and reference_profile_files. + // - the data from profile_files[i] is merged into + // reference_profile_files[i] and the corresponding backing file is + // updated. + // + // If the returned value is false or the difference is insignificant, + // profile_compilation_info will be set to null. + // + // Additional notes: + // - as mentioned above, this function may update the content of the files + // passed with the reference_profile_files. + // - if reference_profile_files is not empty it must be the same size as + // profile_files. + static bool ProcessProfiles( + const std::vector<std::string>& profile_files, + const std::vector<std::string>& reference_profile_files, + /*out*/ ProfileCompilationInfo** profile_compilation_info); + + private: + DISALLOW_COPY_AND_ASSIGN(ProfileAssistant); +}; + +} // namespace art + +#endif // ART_COMPILER_PROFILE_ASSISTANT_H_ diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc index afca8adcbb..0dc307c9ac 100644 --- a/compiler/utils/mips/assembler_mips.cc +++ b/compiler/utils/mips/assembler_mips.cc @@ -400,6 +400,20 @@ void MipsAssembler::Srav(Register rd, Register rt, Register rs) { EmitR(0, rs, rt, rd, 0, 0x07); } +void MipsAssembler::Ext(Register rd, Register rt, int pos, int size) { + CHECK(IsUint<5>(pos)) << pos; + CHECK(0 < size && size <= 32) << size; + CHECK(0 < pos + size && pos + size <= 32) << pos << " + " << size; + EmitR(0x1f, rt, rd, static_cast<Register>(size - 1), pos, 0x00); +} + +void MipsAssembler::Ins(Register rd, Register rt, int pos, int size) { + CHECK(IsUint<5>(pos)) << pos; + CHECK(0 < size && size <= 32) << size; + CHECK(0 < pos + size && pos + size <= 32) << pos << " + " << size; + EmitR(0x1f, rt, rd, static_cast<Register>(pos + size - 1), pos, 0x04); +} + void MipsAssembler::Lb(Register rt, Register rs, uint16_t imm16) { EmitI(0x20, rs, rt, imm16); } @@ -1121,8 +1135,14 @@ void MipsAssembler::LoadConst32(Register rd, int32_t value) { } void MipsAssembler::LoadConst64(Register reg_hi, Register reg_lo, int64_t value) { - LoadConst32(reg_lo, Low32Bits(value)); - LoadConst32(reg_hi, High32Bits(value)); + uint32_t low = Low32Bits(value); + uint32_t high = High32Bits(value); + LoadConst32(reg_lo, low); + if (high != low) { + LoadConst32(reg_hi, high); + } else { + Move(reg_hi, reg_lo); + } } void MipsAssembler::StoreConst32ToOffset(int32_t value, @@ -1136,7 +1156,11 @@ void MipsAssembler::StoreConst32ToOffset(int32_t value, base = AT; offset = 0; } - LoadConst32(temp, value); + if (value == 0) { + temp = ZERO; + } else { + LoadConst32(temp, value); + } Sw(temp, base, offset); } @@ -1152,22 +1176,48 @@ void MipsAssembler::StoreConst64ToOffset(int64_t value, base = AT; offset = 0; } - LoadConst32(temp, Low32Bits(value)); - Sw(temp, base, offset); - LoadConst32(temp, High32Bits(value)); - Sw(temp, base, offset + kMipsWordSize); + uint32_t low = Low32Bits(value); + uint32_t high = High32Bits(value); + if (low == 0) { + Sw(ZERO, base, offset); + } else { + LoadConst32(temp, low); + Sw(temp, base, offset); + } + if (high == 0) { + Sw(ZERO, base, offset + kMipsWordSize); + } else { + if (high != low) { + LoadConst32(temp, high); + } + Sw(temp, base, offset + kMipsWordSize); + } } void MipsAssembler::LoadSConst32(FRegister r, int32_t value, Register temp) { - LoadConst32(temp, value); + if (value == 0) { + temp = ZERO; + } else { + LoadConst32(temp, value); + } Mtc1(temp, r); } void MipsAssembler::LoadDConst64(FRegister rd, int64_t value, Register temp) { - LoadConst32(temp, Low32Bits(value)); - Mtc1(temp, rd); - LoadConst32(temp, High32Bits(value)); - Mthc1(temp, rd); + uint32_t low = Low32Bits(value); + uint32_t high = High32Bits(value); + if (low == 0) { + Mtc1(ZERO, rd); + } else { + LoadConst32(temp, low); + Mtc1(temp, rd); + } + if (high == 0) { + Mthc1(ZERO, rd); + } else { + LoadConst32(temp, high); + Mthc1(temp, rd); + } } void MipsAssembler::Addiu32(Register rt, Register rs, int32_t value, Register temp) { diff --git a/compiler/utils/mips/assembler_mips.h b/compiler/utils/mips/assembler_mips.h index f569aa858c..066e7b0014 100644 --- a/compiler/utils/mips/assembler_mips.h +++ b/compiler/utils/mips/assembler_mips.h @@ -156,6 +156,8 @@ class MipsAssembler FINAL : public Assembler { void Srlv(Register rd, Register rt, Register rs); void Rotrv(Register rd, Register rt, Register rs); // R2+ void Srav(Register rd, Register rt, Register rs); + void Ext(Register rd, Register rt, int pos, int size); // R2+ + void Ins(Register rd, Register rt, int pos, int size); // R2+ void Lb(Register rt, Register rs, uint16_t imm16); void Lh(Register rt, Register rs, uint16_t imm16); diff --git a/compiler/utils/mips/assembler_mips_test.cc b/compiler/utils/mips/assembler_mips_test.cc index 6f8b3e8c57..4361843c54 100644 --- a/compiler/utils/mips/assembler_mips_test.cc +++ b/compiler/utils/mips/assembler_mips_test.cc @@ -367,6 +367,44 @@ TEST_F(AssemblerMIPSTest, Srav) { DriverStr(RepeatRRR(&mips::MipsAssembler::Srav, "srav ${reg1}, ${reg2}, ${reg3}"), "Srav"); } +TEST_F(AssemblerMIPSTest, Ins) { + std::vector<mips::Register*> regs = GetRegisters(); + WarnOnCombinations(regs.size() * regs.size() * 33 * 16); + std::string expected; + for (mips::Register* reg1 : regs) { + for (mips::Register* reg2 : regs) { + for (int32_t pos = 0; pos < 32; pos++) { + for (int32_t size = 1; pos + size <= 32; size++) { + __ Ins(*reg1, *reg2, pos, size); + std::ostringstream instr; + instr << "ins $" << *reg1 << ", $" << *reg2 << ", " << pos << ", " << size << "\n"; + expected += instr.str(); + } + } + } + } + DriverStr(expected, "Ins"); +} + +TEST_F(AssemblerMIPSTest, Ext) { + std::vector<mips::Register*> regs = GetRegisters(); + WarnOnCombinations(regs.size() * regs.size() * 33 * 16); + std::string expected; + for (mips::Register* reg1 : regs) { + for (mips::Register* reg2 : regs) { + for (int32_t pos = 0; pos < 32; pos++) { + for (int32_t size = 1; pos + size <= 32; size++) { + __ Ext(*reg1, *reg2, pos, size); + std::ostringstream instr; + instr << "ext $" << *reg1 << ", $" << *reg2 << ", " << pos << ", " << size << "\n"; + expected += instr.str(); + } + } + } + } + DriverStr(expected, "Ext"); +} + TEST_F(AssemblerMIPSTest, Lb) { DriverStr(RepeatRRIb(&mips::MipsAssembler::Lb, -16, "lb ${reg1}, {imm}(${reg2})"), "Lb"); } diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index 50480d9043..32a237a126 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -63,6 +63,7 @@ #include "gc/space/space-inl.h" #include "image_writer.h" #include "interpreter/unstarted_runtime.h" +#include "jit/offline_profiling_info.h" #include "leb128.h" #include "mirror/class-inl.h" #include "mirror/class_loader.h" @@ -70,6 +71,7 @@ #include "mirror/object_array-inl.h" #include "oat_writer.h" #include "os.h" +#include "profile_assistant.h" #include "runtime.h" #include "runtime_options.h" #include "ScopedLocalRef.h" @@ -328,6 +330,16 @@ NO_RETURN static void Usage(const char* fmt, ...) { UsageError(" Example: --runtime-arg -Xms256m"); UsageError(""); UsageError(" --profile-file=<filename>: specify profiler output file to use for compilation."); + UsageError(" Can be specified multiple time, in which case the data from the different"); + UsageError(" profiles will be aggregated."); + UsageError(""); + UsageError(" --reference-profile-file=<filename>: specify a reference profile file to use when"); + UsageError(" compiling. The data in this file will be compared with the data in the"); + UsageError(" associated --profile-file and the compilation will proceed only if there is"); + UsageError(" a significant difference (--reference-profile-file is paired with"); + UsageError(" --profile-file in the natural order). If the compilation was attempted then"); + UsageError(" --profile-file will be merged into --reference-profile-file. Valid only when"); + UsageError(" specified together with --profile-file."); UsageError(""); UsageError(" --print-pass-names: print a list of pass names"); UsageError(""); @@ -767,6 +779,13 @@ class Dex2Oat FINAL { } } + if (!profile_files_.empty()) { + if (!reference_profile_files_.empty() && + (reference_profile_files_.size() != profile_files_.size())) { + Usage("If specified, --reference-profile-file should match the number of --profile-file."); + } + } + if (!parser_options->oat_symbols.empty()) { oat_unstripped_ = std::move(parser_options->oat_symbols); } @@ -1057,8 +1076,10 @@ class Dex2Oat FINAL { } else if (option.starts_with("--compiler-backend=")) { ParseCompilerBackend(option, parser_options.get()); } else if (option.starts_with("--profile-file=")) { - profile_file_ = option.substr(strlen("--profile-file=")).data(); - VLOG(compiler) << "dex2oat: profile file is " << profile_file_; + profile_files_.push_back(option.substr(strlen("--profile-file=")).ToString()); + } else if (option.starts_with("--reference-profile-file=")) { + reference_profile_files_.push_back( + option.substr(strlen("--reference-profile-file=")).ToString()); } else if (option == "--no-profile-file") { // No profile } else if (option == "--host") { @@ -1479,9 +1500,8 @@ class Dex2Oat FINAL { dump_cfg_append_, compiler_phases_timings_.get(), swap_fd_, - profile_file_, - &dex_file_oat_filename_map_)); - + &dex_file_oat_filename_map_, + profile_compilation_info_.get())); driver_->SetDexFilesForOatFile(dex_files_); driver_->CompileAll(class_loader, dex_files_, timings_); } @@ -1569,7 +1589,6 @@ class Dex2Oat FINAL { std::vector<gc::space::ImageSpace*> image_spaces = Runtime::Current()->GetHeap()->GetBootImageSpaces(); for (gc::space::ImageSpace* image_space : image_spaces) { - // TODO: IS THIS IN ORDER? JUST TAKE THE LAST ONE? image_base_ = std::max(image_base_, RoundUp( reinterpret_cast<uintptr_t>(image_space->GetImageHeader().GetOatFileEnd()), kPageSize)); @@ -1790,6 +1809,26 @@ class Dex2Oat FINAL { return is_host_; } + bool UseProfileGuidedCompilation() const { + return !profile_files_.empty(); + } + + bool ProcessProfiles() { + DCHECK(UseProfileGuidedCompilation()); + ProfileCompilationInfo* info = nullptr; + if (ProfileAssistant::ProcessProfiles(profile_files_, reference_profile_files_, &info)) { + profile_compilation_info_.reset(info); + return true; + } + return false; + } + + bool ShouldCompileBasedOnProfiles() const { + DCHECK(UseProfileGuidedCompilation()); + // If we are given profiles, compile only if we have new information. + return profile_compilation_info_ != nullptr; + } + private: template <typename T> static std::vector<T*> MakeNonOwningPointerVector(const std::vector<std::unique_ptr<T>>& src) { @@ -2263,7 +2302,9 @@ class Dex2Oat FINAL { int swap_fd_; std::string app_image_file_name_; int app_image_fd_; - std::string profile_file_; // Profile file to use + std::vector<std::string> profile_files_; + std::vector<std::string> reference_profile_files_; + std::unique_ptr<ProfileCompilationInfo> profile_compilation_info_; TimingLogger* timings_; std::unique_ptr<CumulativeLogger> compiler_phases_timings_; std::vector<std::vector<const DexFile*>> dex_files_per_oat_file_; @@ -2380,6 +2421,20 @@ static int dex2oat(int argc, char** argv) { // Parse arguments. Argument mistakes will lead to exit(EXIT_FAILURE) in UsageError. dex2oat.ParseArgs(argc, argv); + // Process profile information and assess if we need to do a profile guided compilation. + // This operation involves I/O. + if (dex2oat.UseProfileGuidedCompilation()) { + if (dex2oat.ProcessProfiles()) { + if (!dex2oat.ShouldCompileBasedOnProfiles()) { + LOG(INFO) << "Skipped compilation because of insignificant profile delta"; + return EXIT_SUCCESS; + } + } else { + LOG(WARNING) << "Failed to process profile files"; + return EXIT_FAILURE; + } + } + // Check early that the result of compilation can be written if (!dex2oat.OpenFile()) { return EXIT_FAILURE; diff --git a/disassembler/disassembler_mips.cc b/disassembler/disassembler_mips.cc index cd64a4f926..ee7b21ced7 100644 --- a/disassembler/disassembler_mips.cc +++ b/disassembler/disassembler_mips.cc @@ -150,7 +150,9 @@ static const MipsInstruction gMipsInstructions[] = { { kSpecial2Mask | 0x3f, (28 << kOpcodeShift) | 0x3f, "sdbbp", "" }, // TODO: code // SPECIAL3 + { kSpecial3Mask | 0x3f, (31 << kOpcodeShift), "ext", "TSAZ", }, { kSpecial3Mask | 0x3f, (31 << kOpcodeShift) | 3, "dext", "TSAZ", }, + { kSpecial3Mask | 0x3f, (31 << kOpcodeShift) | 4, "ins", "TSAz", }, { kSpecial3Mask | (0x1f << 21) | (0x1f << 6) | 0x3f, (31 << kOpcodeShift) | (16 << 6) | 32, "seb", @@ -421,7 +423,7 @@ size_t DisassemblerMips::Dump(std::ostream& os, const uint8_t* instr_ptr) { opcode = gMipsInstructions[i].name; for (const char* args_fmt = gMipsInstructions[i].args_fmt; *args_fmt; ++args_fmt) { switch (*args_fmt) { - case 'A': // sa (shift amount or [d]ext position). + case 'A': // sa (shift amount or [d]ins/[d]ext position). args << sa; break; case 'B': // Branch offset. @@ -519,7 +521,8 @@ size_t DisassemblerMips::Dump(std::ostream& os, const uint8_t* instr_ptr) { case 's': args << 'f' << rs; break; case 'T': args << 'r' << rt; break; case 't': args << 'f' << rt; break; - case 'Z': args << rd; break; // sz ([d]ext size). + case 'Z': args << (rd + 1); break; // sz ([d]ext size). + case 'z': args << (rd - sa + 1); break; // sz ([d]ins size). } if (*(args_fmt + 1)) { args << ", "; diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc index b403abd2b6..d836532ed2 100644 --- a/patchoat/patchoat.cc +++ b/patchoat/patchoat.cc @@ -150,100 +150,6 @@ static bool FinishFile(File* file, bool close) { } } -bool PatchOat::Patch(const std::string& image_location, off_t delta, - File* output_image, InstructionSet isa, - TimingLogger* timings) { - CHECK(Runtime::Current() == nullptr); - CHECK(output_image != nullptr); - CHECK_GE(output_image->Fd(), 0); - CHECK(!image_location.empty()) << "image file must have a filename."; - CHECK_NE(isa, kNone); - - TimingLogger::ScopedTiming t("Runtime Setup", timings); - const char *isa_name = GetInstructionSetString(isa); - std::string image_filename; - if (!LocationToFilename(image_location, isa, &image_filename)) { - LOG(ERROR) << "Unable to find image at location " << image_location; - return false; - } - std::unique_ptr<File> input_image(OS::OpenFileForReading(image_filename.c_str())); - if (input_image.get() == nullptr) { - LOG(ERROR) << "unable to open input image file at " << image_filename - << " for location " << image_location; - return false; - } - - int64_t image_len = input_image->GetLength(); - if (image_len < 0) { - LOG(ERROR) << "Error while getting image length"; - return false; - } - ImageHeader image_header; - if (sizeof(image_header) != input_image->Read(reinterpret_cast<char*>(&image_header), - sizeof(image_header), 0)) { - LOG(ERROR) << "Unable to read image header from image file " << input_image->GetPath(); - return false; - } - - if (image_header.GetStorageMode() != ImageHeader::kStorageModeUncompressed) { - LOG(ERROR) << "Patchoat is not supported with compressed image files " - << input_image->GetPath(); - return false; - } - - /*bool is_image_pic = */IsImagePic(image_header, input_image->GetPath()); - // Nothing special to do right now since the image always needs to get patched. - // Perhaps in some far-off future we may have images with relative addresses that are true-PIC. - - // Set up the runtime - RuntimeOptions options; - NoopCompilerCallbacks callbacks; - options.push_back(std::make_pair("compilercallbacks", &callbacks)); - std::string img = "-Ximage:" + image_location; - options.push_back(std::make_pair(img.c_str(), nullptr)); - options.push_back(std::make_pair("imageinstructionset", reinterpret_cast<const void*>(isa_name))); - options.push_back(std::make_pair("-Xno-sig-chain", nullptr)); - if (!Runtime::Create(options, false)) { - LOG(ERROR) << "Unable to initialize runtime"; - return false; - } - // Runtime::Create acquired the mutator_lock_ that is normally given away when we Runtime::Start, - // give it away now and then switch to a more manageable ScopedObjectAccess. - Thread::Current()->TransitionFromRunnableToSuspended(kNative); - ScopedObjectAccess soa(Thread::Current()); - - t.NewTiming("Image and oat Patching setup"); - // Create the map where we will write the image patches to. - std::string error_msg; - std::unique_ptr<MemMap> image(MemMap::MapFile(image_len, - PROT_READ | PROT_WRITE, - MAP_PRIVATE, - input_image->Fd(), - 0, - /*low_4gb*/false, - input_image->GetPath().c_str(), - &error_msg)); - if (image.get() == nullptr) { - LOG(ERROR) << "unable to map image file " << input_image->GetPath() << " : " << error_msg; - return false; - } - // TODO: Support multi-image when patchoat is only patching images. Ever used? b/26317072 - gc::space::ImageSpace* ispc = Runtime::Current()->GetHeap()->GetBootImageSpaces()[0]; - - PatchOat p(isa, image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(), delta, timings); - t.NewTiming("Patching files"); - if (!p.PatchImage(true)) { - LOG(ERROR) << "Failed to patch image file " << input_image->GetPath(); - return false; - } - - t.NewTiming("Writing files"); - if (!p.WriteImage(output_image)) { - return false; - } - return true; -} - bool PatchOat::Patch(File* input_oat, const std::string& image_location, off_t delta, File* output_oat, File* output_image, InstructionSet isa, TimingLogger* timings, @@ -634,7 +540,7 @@ void PatchOat::PatchInternedStrings(const ImageHeader* image_header) { // Note that we require that ReadFromMemory does not make an internal copy of the elements. // This also relies on visit roots not doing any verification which could fail after we update // the roots to be the image addresses. - temp_table.ReadFromMemory(image_->Begin() + section.Offset()); + temp_table.AddTableFromMemory(image_->Begin() + section.Offset()); FixupRootVisitor visitor(this); temp_table.VisitRoots(&visitor, kVisitRootFlagAllRoots); } @@ -765,8 +671,6 @@ bool PatchOat::InHeap(mirror::Object* o) { void PatchOat::PatchVisitor::operator() (mirror::Object* obj, MemberOffset off, bool is_static_unused ATTRIBUTE_UNUSED) const { mirror::Object* referent = obj->GetFieldObject<mirror::Object, kVerifyNone>(off); - // TODO: Modify check for multi-image support? b/26317072 - // DCHECK(patcher_->InHeap(referent)) << "Referent is not in the heap."; mirror::Object* moved_object = patcher_->RelocatedAddressOfPointer(referent); copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(off, moved_object); } @@ -775,8 +679,7 @@ void PatchOat::PatchVisitor::operator() (mirror::Class* cls ATTRIBUTE_UNUSED, mirror::Reference* ref) const { MemberOffset off = mirror::Reference::ReferentOffset(); mirror::Object* referent = ref->GetReferent(); - // TODO: Modify check for multi-image support? b/26317072 - // DCHECK(patcher_->InHeap(referent)) << "Referent is not in the heap."; + DCHECK(patcher_->InHeap(referent)) << "Referent is not in the heap."; mirror::Object* moved_object = patcher_->RelocatedAddressOfPointer(referent); copy_->SetFieldObjectWithoutWriteBarrier<false, true, kVerifyNone>(off, moved_object); } @@ -1271,8 +1174,12 @@ static int patchoat(int argc, char **argv) { bool have_image_files = have_output_image; bool have_oat_files = have_output_oat; - if (!have_oat_files && !have_image_files) { - Usage("Must be patching either an oat or an image file or both."); + if (!have_oat_files) { + if (have_image_files) { + Usage("Cannot patch an image file without an oat file"); + } else { + Usage("Must be patching either an oat file or an image file with an oat file."); + } } if (!have_oat_files && !isa_set) { @@ -1507,10 +1414,6 @@ static int patchoat(int argc, char **argv) { output_oat_fd >= 0, // was it opened from FD? new_oat_out); ret = FinishFile(output_oat.get(), ret); - } else if (have_image_files) { - TimingLogger::ScopedTiming pt("patch image", &timings); - ret = PatchOat::Patch(input_image_location, base_delta, output_image.get(), isa, &timings); - ret = FinishFile(output_image.get(), ret); } else { CHECK(false); ret = true; diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h index cb0d14b2c2..ceddc343be 100644 --- a/patchoat/patchoat.h +++ b/patchoat/patchoat.h @@ -133,12 +133,11 @@ class PatchOat { if (obj == nullptr) { return nullptr; } - // TODO: Fix these checks for multi-image. Some may still be valid. b/26317072 - // DCHECK_GT(reinterpret_cast<uintptr_t>(obj), reinterpret_cast<uintptr_t>(heap_->Begin())); - // DCHECK_LT(reinterpret_cast<uintptr_t>(obj), reinterpret_cast<uintptr_t>(heap_->End())); + DCHECK_GT(reinterpret_cast<uintptr_t>(obj), reinterpret_cast<uintptr_t>(heap_->Begin())); + DCHECK_LT(reinterpret_cast<uintptr_t>(obj), reinterpret_cast<uintptr_t>(heap_->End())); uintptr_t heap_off = reinterpret_cast<uintptr_t>(obj) - reinterpret_cast<uintptr_t>(heap_->Begin()); - // DCHECK_LT(heap_off, image_->Size()); + DCHECK_LT(heap_off, image_->Size()); return reinterpret_cast<T*>(image_->Begin() + heap_off); } diff --git a/runtime/Android.mk b/runtime/Android.mk index de4314c321..14e5ec9cfe 100644 --- a/runtime/Android.mk +++ b/runtime/Android.mk @@ -371,12 +371,6 @@ LIBART_CFLAGS := -DBUILDING_LIBART=1 LIBART_TARGET_CFLAGS := LIBART_HOST_CFLAGS := -ifeq ($(MALLOC_IMPL),dlmalloc) - LIBART_TARGET_CFLAGS += -DUSE_DLMALLOC -else - LIBART_TARGET_CFLAGS += -DUSE_JEMALLOC -endif - # Default dex2oat instruction set features. LIBART_HOST_DEFAULT_INSTRUCTION_SET_FEATURES := default LIBART_TARGET_DEFAULT_INSTRUCTION_SET_FEATURES := default diff --git a/runtime/art_method.cc b/runtime/art_method.cc index effa1c5d3f..6f36016d25 100644 --- a/runtime/art_method.cc +++ b/runtime/art_method.cc @@ -24,7 +24,6 @@ #include "debugger.h" #include "dex_file-inl.h" #include "dex_instruction.h" -#include "entrypoints/entrypoint_utils.h" #include "entrypoints/runtime_asm_entrypoints.h" #include "gc/accounting/card_table-inl.h" #include "interpreter/interpreter.h" diff --git a/runtime/base/mutex.cc b/runtime/base/mutex.cc index 70bd398415..82a5f9611c 100644 --- a/runtime/base/mutex.cc +++ b/runtime/base/mutex.cc @@ -855,6 +855,18 @@ void ConditionVariable::WaitHoldingLocks(Thread* self) { PLOG(FATAL) << "futex wait failed for " << name_; } } + if (self != nullptr) { + JNIEnvExt* const env = self->GetJniEnv(); + if (UNLIKELY(env != nullptr && env->runtime_deleted)) { + CHECK(self->IsDaemon()); + // If the runtime has been deleted, then we cannot proceed. Just sleep forever. This may + // occur for user daemon threads that get a spurious wakeup. This occurs for test 132 with + // --host and --gdb. + // After we wake up, the runtime may have been shutdown, which means that this condition may + // have been deleted. It is not safe to retry the wait. + SleepForever(); + } + } guard_.ExclusiveLock(self); CHECK_GE(num_waiters_, 0); num_waiters_--; diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index 0518911a86..41842e88f3 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -736,7 +736,7 @@ void ClassLinker::RunRootClinits() { static void SanityCheckArtMethod(ArtMethod* m, mirror::Class* expected_class, - std::vector<gc::space::ImageSpace*> spaces) + const std::vector<gc::space::ImageSpace*>& spaces) SHARED_REQUIRES(Locks::mutator_lock_) { if (m->IsRuntimeMethod()) { CHECK(m->GetDeclaringClass() == nullptr) << PrettyMethod(m); @@ -760,7 +760,7 @@ static void SanityCheckArtMethod(ArtMethod* m, static void SanityCheckArtMethodPointerArray(mirror::PointerArray* arr, mirror::Class* expected_class, size_t pointer_size, - std::vector<gc::space::ImageSpace*> spaces) + const std::vector<gc::space::ImageSpace*>& spaces) SHARED_REQUIRES(Locks::mutator_lock_) { CHECK(arr != nullptr); for (int32_t j = 0; j < arr->GetLength(); ++j) { @@ -775,27 +775,32 @@ static void SanityCheckArtMethodPointerArray(mirror::PointerArray* arr, } } -/* TODO: Modify check to support multiple image spaces and reenable. b/26317072 -static void SanityCheckArtMethodPointerArray( - ArtMethod** arr, - size_t size, - size_t pointer_size, - gc::space::ImageSpace* space) SHARED_REQUIRES(Locks::mutator_lock_) { +static void SanityCheckArtMethodPointerArray(ArtMethod** arr, + size_t size, + size_t pointer_size, + const std::vector<gc::space::ImageSpace*>& spaces) + SHARED_REQUIRES(Locks::mutator_lock_) { CHECK_EQ(arr != nullptr, size != 0u); if (arr != nullptr) { - auto offset = reinterpret_cast<uint8_t*>(arr) - space->Begin(); - CHECK(space->GetImageHeader().GetImageSection( - ImageHeader::kSectionDexCacheArrays).Contains(offset)); + bool contains = false; + for (auto space : spaces) { + auto offset = reinterpret_cast<uint8_t*>(arr) - space->Begin(); + if (space->GetImageHeader().GetImageSection( + ImageHeader::kSectionDexCacheArrays).Contains(offset)) { + contains = true; + break; + } + } + CHECK(contains); } for (size_t j = 0; j < size; ++j) { ArtMethod* method = mirror::DexCache::GetElementPtrSize(arr, j, pointer_size); // expected_class == null means we are a dex cache. if (method != nullptr) { - SanityCheckArtMethod(method, nullptr, space); + SanityCheckArtMethod(method, nullptr, spaces); } } } -*/ static void SanityCheckObjectsCallback(mirror::Object* obj, void* arg ATTRIBUTE_UNUSED) SHARED_REQUIRES(Locks::mutator_lock_) { @@ -1018,13 +1023,12 @@ bool ClassLinker::InitFromImage(std::string* error_msg) { return false; } - // TODO: Modify check to support multiple image spaces and reenable. -// if (kSanityCheckObjects) { -// SanityCheckArtMethodPointerArray(dex_cache->GetResolvedMethods(), -// dex_cache->NumResolvedMethods(), -// image_pointer_size_, -// spaces); -// } + if (kSanityCheckObjects) { + SanityCheckArtMethodPointerArray(dex_cache->GetResolvedMethods(), + dex_cache->NumResolvedMethods(), + image_pointer_size_, + spaces); + } if (dex_file->GetLocationChecksum() != oat_dex_file->GetDexFileLocationChecksum()) { *error_msg = StringPrintf("Checksums do not match for %s: %x vs %x", @@ -1109,6 +1113,7 @@ bool ClassLinker::InitFromImage(std::string* error_msg) { mirror::Throwable::SetClass(GetClassRoot(kJavaLangThrowable)); mirror::StackTraceElement::SetClass(GetClassRoot(kJavaLangStackTraceElement)); + size_t class_tables_added = 0; for (gc::space::ImageSpace* space : spaces) { const ImageHeader& header = space->GetImageHeader(); const ImageSection& section = header.GetImageSection(ImageHeader::kSectionClassTable); @@ -1116,9 +1121,17 @@ bool ClassLinker::InitFromImage(std::string* error_msg) { WriterMutexLock mu(self, *Locks::classlinker_classes_lock_); ClassTable* const class_table = InsertClassTableForClassLoader(nullptr); class_table->ReadFromMemory(space->Begin() + section.Offset()); - dex_cache_boot_image_class_lookup_required_ = false; + ++class_tables_added; } } + if (class_tables_added != 0) { + // Either all of the image spaces have an empty class section or none do. In the case where + // an image space has no classes, it will still have a non-empty class section that contains + // metadata. + CHECK_EQ(spaces.size(), class_tables_added) + << "Expected non-empty class section for each image space."; + dex_cache_boot_image_class_lookup_required_ = false; + } FinishInit(self); diff --git a/runtime/class_table.h b/runtime/class_table.h index c911365698..911f3c22db 100644 --- a/runtime/class_table.h +++ b/runtime/class_table.h @@ -106,8 +106,7 @@ class ClassTable { // Combines all of the tables into one class set. size_t WriteToMemory(uint8_t* ptr) const - REQUIRES(Locks::classlinker_classes_lock_) - SHARED_REQUIRES(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::classlinker_classes_lock_, Locks::mutator_lock_); size_t ReadFromMemory(uint8_t* ptr) REQUIRES(Locks::classlinker_classes_lock_) SHARED_REQUIRES(Locks::mutator_lock_); diff --git a/runtime/common_runtime_test.cc b/runtime/common_runtime_test.cc index 403dd4c0df..624abb9f6d 100644 --- a/runtime/common_runtime_test.cc +++ b/runtime/common_runtime_test.cc @@ -436,17 +436,26 @@ void CommonRuntimeTest::TearDown() { } } -std::vector<std::string> CommonRuntimeTest::GetLibCoreDexFileNames() { - return std::vector<std::string>({GetDexFileName("core-oj"), GetDexFileName("core-libart")}); -} - -std::string CommonRuntimeTest::GetDexFileName(const std::string& jar_prefix) { - if (IsHost()) { +static std::string GetDexFileName(const std::string& jar_prefix, bool host) { + std::string path; + if (host) { const char* host_dir = getenv("ANDROID_HOST_OUT"); CHECK(host_dir != nullptr); - return StringPrintf("%s/framework/%s-hostdex.jar", host_dir, jar_prefix.c_str()); + path = host_dir; + } else { + path = GetAndroidRoot(); } - return StringPrintf("%s/framework/%s.jar", GetAndroidRoot(), jar_prefix.c_str()); + + std::string suffix = host + ? "-hostdex" // The host version. + : "-testdex"; // The unstripped target version. + + return StringPrintf("%s/framework/%s%s.jar", path.c_str(), jar_prefix.c_str(), suffix.c_str()); +} + +std::vector<std::string> CommonRuntimeTest::GetLibCoreDexFileNames() { + return std::vector<std::string>({GetDexFileName("core-oj", IsHost()), + GetDexFileName("core-libart", IsHost())}); } std::string CommonRuntimeTest::GetTestAndroidRoot() { diff --git a/runtime/common_runtime_test.h b/runtime/common_runtime_test.h index 8d9e6281c0..7223b6ec6b 100644 --- a/runtime/common_runtime_test.h +++ b/runtime/common_runtime_test.h @@ -118,9 +118,6 @@ class CommonRuntimeTest : public testing::Test { // initializers, initialize well-known classes, and creates the heap thread pool. virtual void FinalizeSetup(); - // Gets the path of the specified dex file for host or target. - static std::string GetDexFileName(const std::string& jar_prefix); - std::string GetTestAndroidRoot(); std::string GetTestDexFileName(const char* name); diff --git a/runtime/common_throws.cc b/runtime/common_throws.cc index d68b463950..40e2b1593e 100644 --- a/runtime/common_throws.cc +++ b/runtime/common_throws.cc @@ -18,6 +18,8 @@ #include <sstream> +#include "ScopedLocalRef.h" + #include "art_field-inl.h" #include "art_method-inl.h" #include "base/logging.h" @@ -522,6 +524,104 @@ void ThrowRuntimeException(const char* fmt, ...) { va_end(args); } +// Stack overflow. + +void ThrowStackOverflowError(Thread* self) { + if (self->IsHandlingStackOverflow()) { + LOG(ERROR) << "Recursive stack overflow."; + // We don't fail here because SetStackEndForStackOverflow will print better diagnostics. + } + + self->SetStackEndForStackOverflow(); // Allow space on the stack for constructor to execute. + JNIEnvExt* env = self->GetJniEnv(); + std::string msg("stack size "); + msg += PrettySize(self->GetStackSize()); + + // Avoid running Java code for exception initialization. + // TODO: Checks to make this a bit less brittle. + + std::string error_msg; + + // Allocate an uninitialized object. + ScopedLocalRef<jobject> exc(env, + env->AllocObject(WellKnownClasses::java_lang_StackOverflowError)); + if (exc.get() != nullptr) { + // "Initialize". + // StackOverflowError -> VirtualMachineError -> Error -> Throwable -> Object. + // Only Throwable has "custom" fields: + // String detailMessage. + // Throwable cause (= this). + // List<Throwable> suppressedExceptions (= Collections.emptyList()). + // Object stackState; + // StackTraceElement[] stackTrace; + // Only Throwable has a non-empty constructor: + // this.stackTrace = EmptyArray.STACK_TRACE_ELEMENT; + // fillInStackTrace(); + + // detailMessage. + // TODO: Use String::FromModifiedUTF...? + ScopedLocalRef<jstring> s(env, env->NewStringUTF(msg.c_str())); + if (s.get() != nullptr) { + env->SetObjectField(exc.get(), WellKnownClasses::java_lang_Throwable_detailMessage, s.get()); + + // cause. + env->SetObjectField(exc.get(), WellKnownClasses::java_lang_Throwable_cause, exc.get()); + + // suppressedExceptions. + ScopedLocalRef<jobject> emptylist(env, env->GetStaticObjectField( + WellKnownClasses::java_util_Collections, + WellKnownClasses::java_util_Collections_EMPTY_LIST)); + CHECK(emptylist.get() != nullptr); + env->SetObjectField(exc.get(), + WellKnownClasses::java_lang_Throwable_suppressedExceptions, + emptylist.get()); + + // stackState is set as result of fillInStackTrace. fillInStackTrace calls + // nativeFillInStackTrace. + ScopedLocalRef<jobject> stack_state_val(env, nullptr); + { + ScopedObjectAccessUnchecked soa(env); + stack_state_val.reset(soa.Self()->CreateInternalStackTrace<false>(soa)); + } + if (stack_state_val.get() != nullptr) { + env->SetObjectField(exc.get(), + WellKnownClasses::java_lang_Throwable_stackState, + stack_state_val.get()); + + // stackTrace. + ScopedLocalRef<jobject> stack_trace_elem(env, env->GetStaticObjectField( + WellKnownClasses::libcore_util_EmptyArray, + WellKnownClasses::libcore_util_EmptyArray_STACK_TRACE_ELEMENT)); + env->SetObjectField(exc.get(), + WellKnownClasses::java_lang_Throwable_stackTrace, + stack_trace_elem.get()); + } else { + error_msg = "Could not create stack trace."; + } + // Throw the exception. + self->SetException(reinterpret_cast<mirror::Throwable*>(self->DecodeJObject(exc.get()))); + } else { + // Could not allocate a string object. + error_msg = "Couldn't throw new StackOverflowError because JNI NewStringUTF failed."; + } + } else { + error_msg = "Could not allocate StackOverflowError object."; + } + + if (!error_msg.empty()) { + LOG(WARNING) << error_msg; + CHECK(self->IsExceptionPending()); + } + + bool explicit_overflow_check = Runtime::Current()->ExplicitStackOverflowChecks(); + self->ResetDefaultStackEnd(); // Return to default stack size. + + // And restore protection if implicit checks are on. + if (!explicit_overflow_check) { + self->ProtectStack(); + } +} + // VerifyError void ThrowVerifyError(mirror::Class* referrer, const char* fmt, ...) { diff --git a/runtime/common_throws.h b/runtime/common_throws.h index 2a0934fb5b..85fe2b3997 100644 --- a/runtime/common_throws.h +++ b/runtime/common_throws.h @@ -154,10 +154,10 @@ void ThrowNegativeArraySizeException(const char* msg) void ThrowNoSuchFieldError(const StringPiece& scope, mirror::Class* c, const StringPiece& type, const StringPiece& name) - SHARED_REQUIRES(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; void ThrowNoSuchFieldException(mirror::Class* c, const StringPiece& name) - SHARED_REQUIRES(Locks::mutator_lock_); + SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; // NoSuchMethodError @@ -194,6 +194,10 @@ void ThrowRuntimeException(const char* fmt, ...) __attribute__((__format__(__printf__, 1, 2))) SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; +// Stack overflow. + +void ThrowStackOverflowError(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) COLD_ATTR; + // VerifyError void ThrowVerifyError(mirror::Class* referrer, const char* fmt, ...) diff --git a/runtime/debugger.cc b/runtime/debugger.cc index f009fe6acc..6e11cf88c4 100644 --- a/runtime/debugger.cc +++ b/runtime/debugger.cc @@ -726,12 +726,11 @@ JDWP::JdwpError Dbg::GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId* supercla JDWP::JdwpError Dbg::GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) { JDWP::JdwpError error; - mirror::Object* o = gRegistry->Get<mirror::Object*>(id, &error); - if (o == nullptr) { - return JDWP::ERR_INVALID_OBJECT; + mirror::Class* c = DecodeClass(id, &error); + if (c == nullptr) { + return error; } - DCHECK(o->IsClass()); - expandBufAddObjectId(pReply, gRegistry->Add(o->AsClass()->GetClassLoader())); + expandBufAddObjectId(pReply, gRegistry->Add(c->GetClassLoader())); return JDWP::ERR_NONE; } diff --git a/runtime/entrypoints/entrypoint_utils.cc b/runtime/entrypoints/entrypoint_utils.cc index 915d9ab5e7..b5a55bfa44 100644 --- a/runtime/entrypoints/entrypoint_utils.cc +++ b/runtime/entrypoints/entrypoint_utils.cc @@ -33,7 +33,6 @@ #include "oat_quick_method_header.h" #include "reflection.h" #include "scoped_thread_state_change.h" -#include "ScopedLocalRef.h" #include "well_known_classes.h" namespace art { @@ -120,102 +119,6 @@ mirror::Array* CheckAndAllocArrayFromCodeInstrumented(uint32_t type_idx, heap->GetCurrentAllocator()); } -void ThrowStackOverflowError(Thread* self) { - if (self->IsHandlingStackOverflow()) { - LOG(ERROR) << "Recursive stack overflow."; - // We don't fail here because SetStackEndForStackOverflow will print better diagnostics. - } - - self->SetStackEndForStackOverflow(); // Allow space on the stack for constructor to execute. - JNIEnvExt* env = self->GetJniEnv(); - std::string msg("stack size "); - msg += PrettySize(self->GetStackSize()); - - // Avoid running Java code for exception initialization. - // TODO: Checks to make this a bit less brittle. - - std::string error_msg; - - // Allocate an uninitialized object. - ScopedLocalRef<jobject> exc(env, - env->AllocObject(WellKnownClasses::java_lang_StackOverflowError)); - if (exc.get() != nullptr) { - // "Initialize". - // StackOverflowError -> VirtualMachineError -> Error -> Throwable -> Object. - // Only Throwable has "custom" fields: - // String detailMessage. - // Throwable cause (= this). - // List<Throwable> suppressedExceptions (= Collections.emptyList()). - // Object stackState; - // StackTraceElement[] stackTrace; - // Only Throwable has a non-empty constructor: - // this.stackTrace = EmptyArray.STACK_TRACE_ELEMENT; - // fillInStackTrace(); - - // detailMessage. - // TODO: Use String::FromModifiedUTF...? - ScopedLocalRef<jstring> s(env, env->NewStringUTF(msg.c_str())); - if (s.get() != nullptr) { - env->SetObjectField(exc.get(), WellKnownClasses::java_lang_Throwable_detailMessage, s.get()); - - // cause. - env->SetObjectField(exc.get(), WellKnownClasses::java_lang_Throwable_cause, exc.get()); - - // suppressedExceptions. - ScopedLocalRef<jobject> emptylist(env, env->GetStaticObjectField( - WellKnownClasses::java_util_Collections, - WellKnownClasses::java_util_Collections_EMPTY_LIST)); - CHECK(emptylist.get() != nullptr); - env->SetObjectField(exc.get(), - WellKnownClasses::java_lang_Throwable_suppressedExceptions, - emptylist.get()); - - // stackState is set as result of fillInStackTrace. fillInStackTrace calls - // nativeFillInStackTrace. - ScopedLocalRef<jobject> stack_state_val(env, nullptr); - { - ScopedObjectAccessUnchecked soa(env); - stack_state_val.reset(soa.Self()->CreateInternalStackTrace<false>(soa)); - } - if (stack_state_val.get() != nullptr) { - env->SetObjectField(exc.get(), - WellKnownClasses::java_lang_Throwable_stackState, - stack_state_val.get()); - - // stackTrace. - ScopedLocalRef<jobject> stack_trace_elem(env, env->GetStaticObjectField( - WellKnownClasses::libcore_util_EmptyArray, - WellKnownClasses::libcore_util_EmptyArray_STACK_TRACE_ELEMENT)); - env->SetObjectField(exc.get(), - WellKnownClasses::java_lang_Throwable_stackTrace, - stack_trace_elem.get()); - } else { - error_msg = "Could not create stack trace."; - } - // Throw the exception. - self->SetException(reinterpret_cast<mirror::Throwable*>(self->DecodeJObject(exc.get()))); - } else { - // Could not allocate a string object. - error_msg = "Couldn't throw new StackOverflowError because JNI NewStringUTF failed."; - } - } else { - error_msg = "Could not allocate StackOverflowError object."; - } - - if (!error_msg.empty()) { - LOG(WARNING) << error_msg; - CHECK(self->IsExceptionPending()); - } - - bool explicit_overflow_check = Runtime::Current()->ExplicitStackOverflowChecks(); - self->ResetDefaultStackEnd(); // Return to default stack size. - - // And restore protection if implicit checks are on. - if (!explicit_overflow_check) { - self->ProtectStack(); - } -} - void CheckReferenceResult(mirror::Object* o, Thread* self) { if (o == nullptr) { return; diff --git a/runtime/entrypoints/entrypoint_utils.h b/runtime/entrypoints/entrypoint_utils.h index 0469ee6eb6..a28376fadf 100644 --- a/runtime/entrypoints/entrypoint_utils.h +++ b/runtime/entrypoints/entrypoint_utils.h @@ -158,8 +158,6 @@ inline mirror::Class* ResolveVerifyAndClinit( uint32_t type_idx, ArtMethod* referrer, Thread* self, bool can_run_clinit, bool verify_access) SHARED_REQUIRES(Locks::mutator_lock_); -extern void ThrowStackOverflowError(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); - inline mirror::String* ResolveStringFromCode(ArtMethod* referrer, uint32_t string_idx) SHARED_REQUIRES(Locks::mutator_lock_); diff --git a/runtime/entrypoints/quick/quick_throw_entrypoints.cc b/runtime/entrypoints/quick/quick_throw_entrypoints.cc index 5a82b3ae2e..5256feae2b 100644 --- a/runtime/entrypoints/quick/quick_throw_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_throw_entrypoints.cc @@ -16,7 +16,6 @@ #include "callee_save_frame.h" #include "common_throws.h" -#include "entrypoints/entrypoint_utils-inl.h" #include "mirror/object-inl.h" #include "thread.h" #include "well_known_classes.h" diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index 7f67ae4f08..d6c1817f13 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -571,15 +571,19 @@ Heap::Heap(size_t initial_size, // Check that there's no gap between the image space and the non moving space so that the // immune region won't break (eg. due to a large object allocated in the gap). This is only // required when we're the zygote or using GSS. - /* TODO: Modify this check to support multi-images. b/26317072 - bool no_gap = MemMap::CheckNoGaps(GetBootImageSpace()->GetMemMap(), - non_moving_space_->GetMemMap()); + // Space with smallest Begin(). + space::ImageSpace* first_space = nullptr; + for (space::ImageSpace* space : boot_image_spaces_) { + if (first_space == nullptr || space->Begin() < first_space->Begin()) { + first_space = space; + } + } + bool no_gap = MemMap::CheckNoGaps(first_space->GetMemMap(), non_moving_space_->GetMemMap()); if (!no_gap) { PrintFileToLog("/proc/self/maps", LogSeverity::ERROR); MemMap::DumpMaps(LOG(ERROR), true); LOG(FATAL) << "There's a gap between the image space and the non-moving space"; } - */ } instrumentation::Instrumentation* const instrumentation = runtime->GetInstrumentation(); if (gc_stress_mode_) { @@ -2333,7 +2337,7 @@ void Heap::PreZygoteFork() { if (HasZygoteSpace()) { return; } - Runtime::Current()->GetInternTable()->SwapPostZygoteWithPreZygote(); + Runtime::Current()->GetInternTable()->AddNewTable(); Runtime::Current()->GetClassLinker()->MoveClassTableToPreZygote(); VLOG(heap) << "Starting PreZygoteFork"; // Trim the pages at the end of the non moving space. diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc index dfdbd04427..5f6bb8ee4b 100644 --- a/runtime/gc/space/image_space.cc +++ b/runtime/gc/space/image_space.cc @@ -47,13 +47,15 @@ ImageSpace::ImageSpace(const std::string& image_filename, const char* image_location, MemMap* mem_map, accounting::ContinuousSpaceBitmap* live_bitmap, - uint8_t* end, - MemMap* shadow_map) - : MemMapSpace(image_filename, mem_map, mem_map->Begin(), end, end, + uint8_t* end) + : MemMapSpace(image_filename, + mem_map, + mem_map->Begin(), + end, + end, kGcRetentionPolicyNeverCollect), oat_file_non_owned_(nullptr), - image_location_(image_location), - shadow_map_(shadow_map) { + image_location_(image_location) { DCHECK(live_bitmap != nullptr); live_bitmap_.reset(live_bitmap); } @@ -800,54 +802,19 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat uint32_t bitmap_index = bitmap_index_.FetchAndAddSequentiallyConsistent(1); std::string bitmap_name(StringPrintf("imagespace %s live-bitmap %u", image_filename, bitmap_index)); + // Bitmap only needs to cover until the end of the mirror objects section. + const ImageSection& image_objects = image_header.GetImageSection(ImageHeader::kSectionObjects); std::unique_ptr<accounting::ContinuousSpaceBitmap> bitmap( accounting::ContinuousSpaceBitmap::CreateFromMemMap( bitmap_name, image_bitmap_map.release(), reinterpret_cast<uint8_t*>(map->Begin()), - accounting::ContinuousSpaceBitmap::ComputeHeapSize(bitmap_section.Size()))); + image_objects.End())); if (bitmap == nullptr) { *error_msg = StringPrintf("Could not create bitmap '%s'", bitmap_name.c_str()); return nullptr; } - // In case of multi-images, the images are spaced apart so that the bitmaps don't overlap. We - // need to reserve the slack, as otherwise the large object space might allocate in there. - // TODO: Reconsider the multi-image layout. b/26317072 - std::unique_ptr<MemMap> shadow_map; - { - uintptr_t image_begin = reinterpret_cast<uintptr_t>(image_header.GetImageBegin()); - uintptr_t image_end = RoundUp(image_begin + image_header.GetImageSize(), kPageSize); - uintptr_t oat_begin = reinterpret_cast<uintptr_t>(image_header.GetOatFileBegin()); - if (image_end < oat_begin) { - // There's a gap. Could be multi-image, could be the oat file spaced apart. Go ahead and - // dummy-reserve the space covered by the bitmap (which will be a shadow that introduces - // a gap to the next image). - uintptr_t heap_size = bitmap->HeapSize(); - uintptr_t bitmap_coverage_end = RoundUp(image_begin + heap_size, kPageSize); - if (bitmap_coverage_end > image_end) { - VLOG(startup) << "Reserving bitmap shadow [" - << std::hex << image_end << ";" - << std::hex << bitmap_coverage_end << ";] (oat file begins at " - << std::hex << oat_begin; - // Note: we cannot use MemMap::Dummy here, as that won't reserve the space in 32-bit mode. - shadow_map.reset(MemMap::MapAnonymous("Image bitmap shadow", - reinterpret_cast<uint8_t*>(image_end), - bitmap_coverage_end - image_end, - PROT_NONE, - false, - false, - error_msg)); - if (shadow_map == nullptr) { - return nullptr; - } - // madvise it away, we don't really want it, just reserve the address space. - // TODO: Should we use MadviseDontNeedAndZero? b/26317072 - madvise(shadow_map->BaseBegin(), shadow_map->BaseSize(), MADV_DONTNEED); - } - } - } - // We only want the mirror object, not the ArtFields and ArtMethods. uint8_t* const image_end = map->Begin() + image_header.GetImageSection(ImageHeader::kSectionObjects).End(); @@ -855,8 +822,7 @@ ImageSpace* ImageSpace::Init(const char* image_filename, const char* image_locat image_location, map.release(), bitmap.release(), - image_end, - shadow_map.release())); + image_end)); // VerifyImageAllocations() will be called later in Runtime::Init() // as some class roots like ArtMethod::java_lang_reflect_ArtMethod_ diff --git a/runtime/gc/space/image_space.h b/runtime/gc/space/image_space.h index b8ae4a033a..9c8e8b2014 100644 --- a/runtime/gc/space/image_space.h +++ b/runtime/gc/space/image_space.h @@ -171,8 +171,7 @@ class ImageSpace : public MemMapSpace { const char* image_location, MemMap* mem_map, accounting::ContinuousSpaceBitmap* live_bitmap, - uint8_t* end, - MemMap* shadow_map = nullptr); + uint8_t* end); // The OatFile associated with the image during early startup to // reserve space contiguous to the image. It is later released to @@ -185,10 +184,6 @@ class ImageSpace : public MemMapSpace { const std::string image_location_; - // A MemMap reserving the space of the bitmap "shadow," so that we don't allocate into it. Only - // used in the multi-image case. - std::unique_ptr<MemMap> shadow_map_; - private: DISALLOW_COPY_AND_ASSIGN(ImageSpace); }; diff --git a/runtime/image.cc b/runtime/image.cc index 3856787e2f..3cb66428fa 100644 --- a/runtime/image.cc +++ b/runtime/image.cc @@ -24,7 +24,7 @@ namespace art { const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' }; -const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '4', '\0' }; +const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '5', '\0' }; ImageHeader::ImageHeader(uint32_t image_begin, uint32_t image_size, diff --git a/runtime/intern_table.cc b/runtime/intern_table.cc index d035f5d960..015bf98e38 100644 --- a/runtime/intern_table.cc +++ b/runtime/intern_table.cc @@ -32,7 +32,8 @@ namespace art { InternTable::InternTable() - : image_added_to_intern_table_(false), log_new_roots_(false), + : images_added_to_intern_table_(false), + log_new_roots_(false), weak_intern_condition_("New intern condition", *Locks::intern_table_lock_), weak_root_state_(gc::kWeakRootStateNormal) { } @@ -93,10 +94,10 @@ mirror::String* InternTable::LookupWeak(mirror::String* s) { return weak_interns_.Find(s); } -void InternTable::SwapPostZygoteWithPreZygote() { +void InternTable::AddNewTable() { MutexLock mu(Thread::Current(), *Locks::intern_table_lock_); - weak_interns_.SwapPostZygoteWithPreZygote(); - strong_interns_.SwapPostZygoteWithPreZygote(); + weak_interns_.AddNewTable(); + strong_interns_.AddNewTable(); } mirror::String* InternTable::InsertStrong(mirror::String* s) { @@ -150,15 +151,14 @@ void InternTable::RemoveWeakFromTransaction(mirror::String* s) { RemoveWeak(s); } -void InternTable::AddImageStringsToTable(gc::space::ImageSpace* image_space) { - CHECK(image_space != nullptr); +void InternTable::AddImagesStringsToTable(const std::vector<gc::space::ImageSpace*>& image_spaces) { MutexLock mu(Thread::Current(), *Locks::intern_table_lock_); - if (!image_added_to_intern_table_) { + for (gc::space::ImageSpace* image_space : image_spaces) { const ImageHeader* const header = &image_space->GetImageHeader(); // Check if we have the interned strings section. const ImageSection& section = header->GetImageSection(ImageHeader::kSectionInternedStrings); if (section.Size() > 0) { - ReadFromMemoryLocked(image_space->Begin() + section.Offset()); + AddTableFromMemoryLocked(image_space->Begin() + section.Offset()); } else { // TODO: Delete this logic? mirror::Object* root = header->GetImageRoot(ImageHeader::kDexCaches); @@ -179,15 +179,13 @@ void InternTable::AddImageStringsToTable(gc::space::ImageSpace* image_space) { } } } - image_added_to_intern_table_ = true; } + images_added_to_intern_table_ = true; } mirror::String* InternTable::LookupStringFromImage(mirror::String* s) { - if (image_added_to_intern_table_) { - return nullptr; - } - std::vector<gc::space::ImageSpace*> image_spaces = + DCHECK(!images_added_to_intern_table_); + const std::vector<gc::space::ImageSpace*>& image_spaces = Runtime::Current()->GetHeap()->GetBootImageSpaces(); if (image_spaces.empty()) { return nullptr; // No image present. @@ -284,9 +282,11 @@ mirror::String* InternTable::Insert(mirror::String* s, bool is_strong, bool hold return weak; } // Check the image for a match. - mirror::String* image = LookupStringFromImage(s); - if (image != nullptr) { - return is_strong ? InsertStrong(image) : InsertWeak(image); + if (!images_added_to_intern_table_) { + mirror::String* const image_string = LookupStringFromImage(s); + if (image_string != nullptr) { + return is_strong ? InsertStrong(image_string) : InsertWeak(image_string); + } } // No match in the strong table or the weak table. Insert into the strong / weak table. return is_strong ? InsertStrong(s) : InsertWeak(s); @@ -326,27 +326,18 @@ void InternTable::SweepInternTableWeaks(IsMarkedVisitor* visitor) { weak_interns_.SweepWeaks(visitor); } -void InternTable::AddImageInternTable(gc::space::ImageSpace* image_space) { - const ImageSection& intern_section = image_space->GetImageHeader().GetImageSection( - ImageHeader::kSectionInternedStrings); - // Read the string tables from the image. - const uint8_t* ptr = image_space->Begin() + intern_section.Offset(); - const size_t offset = ReadFromMemory(ptr); - CHECK_LE(offset, intern_section.Size()); -} - -size_t InternTable::ReadFromMemory(const uint8_t* ptr) { +size_t InternTable::AddTableFromMemory(const uint8_t* ptr) { MutexLock mu(Thread::Current(), *Locks::intern_table_lock_); - return ReadFromMemoryLocked(ptr); + return AddTableFromMemoryLocked(ptr); } -size_t InternTable::ReadFromMemoryLocked(const uint8_t* ptr) { - return strong_interns_.ReadIntoPreZygoteTable(ptr); +size_t InternTable::AddTableFromMemoryLocked(const uint8_t* ptr) { + return strong_interns_.AddTableFromMemory(ptr); } size_t InternTable::WriteToMemory(uint8_t* ptr) { MutexLock mu(Thread::Current(), *Locks::intern_table_lock_); - return strong_interns_.WriteFromPostZygoteTable(ptr); + return strong_interns_.WriteToMemory(ptr); } std::size_t InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& root) const { @@ -364,71 +355,87 @@ bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a, return a.Read()->Equals(b.Read()); } -size_t InternTable::Table::ReadIntoPreZygoteTable(const uint8_t* ptr) { - CHECK_EQ(pre_zygote_table_.Size(), 0u); +size_t InternTable::Table::AddTableFromMemory(const uint8_t* ptr) { size_t read_count = 0; - pre_zygote_table_ = UnorderedSet(ptr, false /* make copy */, &read_count); + UnorderedSet set(ptr, /*make copy*/false, &read_count); + // TODO: Disable this for app images if app images have intern tables. + static constexpr bool kCheckDuplicates = true; + if (kCheckDuplicates) { + for (GcRoot<mirror::String>& string : set) { + CHECK(Find(string.Read()) == nullptr) << "Already found " << string.Read()->ToModifiedUtf8(); + } + } + // Insert at the front since we insert into the back. + tables_.insert(tables_.begin(), std::move(set)); return read_count; } -size_t InternTable::Table::WriteFromPostZygoteTable(uint8_t* ptr) { - return post_zygote_table_.WriteToMemory(ptr); +size_t InternTable::Table::WriteToMemory(uint8_t* ptr) { + if (tables_.empty()) { + return 0; + } + UnorderedSet* table_to_write; + UnorderedSet combined; + if (tables_.size() > 1) { + table_to_write = &combined; + for (UnorderedSet& table : tables_) { + for (GcRoot<mirror::String>& string : table) { + combined.Insert(string); + } + } + } else { + table_to_write = &tables_.back(); + } + return table_to_write->WriteToMemory(ptr); } void InternTable::Table::Remove(mirror::String* s) { - auto it = post_zygote_table_.Find(GcRoot<mirror::String>(s)); - if (it != post_zygote_table_.end()) { - post_zygote_table_.Erase(it); - } else { - it = pre_zygote_table_.Find(GcRoot<mirror::String>(s)); - DCHECK(it != pre_zygote_table_.end()); - pre_zygote_table_.Erase(it); + for (UnorderedSet& table : tables_) { + auto it = table.Find(GcRoot<mirror::String>(s)); + if (it != table.end()) { + table.Erase(it); + return; + } } + LOG(FATAL) << "Attempting to remove non-interned string " << s->ToModifiedUtf8(); } mirror::String* InternTable::Table::Find(mirror::String* s) { Locks::intern_table_lock_->AssertHeld(Thread::Current()); - auto it = pre_zygote_table_.Find(GcRoot<mirror::String>(s)); - if (it != pre_zygote_table_.end()) { - return it->Read(); - } - it = post_zygote_table_.Find(GcRoot<mirror::String>(s)); - if (it != post_zygote_table_.end()) { - return it->Read(); + for (UnorderedSet& table : tables_) { + auto it = table.Find(GcRoot<mirror::String>(s)); + if (it != table.end()) { + return it->Read(); + } } return nullptr; } -void InternTable::Table::SwapPostZygoteWithPreZygote() { - if (pre_zygote_table_.Empty()) { - std::swap(pre_zygote_table_, post_zygote_table_); - VLOG(heap) << "Swapping " << pre_zygote_table_.Size() << " interns to the pre zygote table"; - } else { - // This case happens if read the intern table from the image. - VLOG(heap) << "Not swapping due to non-empty pre_zygote_table_"; - } +void InternTable::Table::AddNewTable() { + tables_.push_back(UnorderedSet()); } void InternTable::Table::Insert(mirror::String* s) { - // Always insert the post zygote table, this gets swapped when we create the zygote to be the - // pre zygote table. - post_zygote_table_.Insert(GcRoot<mirror::String>(s)); + // Always insert the last table, the image tables are before and we avoid inserting into these + // to prevent dirty pages. + DCHECK(!tables_.empty()); + tables_.back().Insert(GcRoot<mirror::String>(s)); } void InternTable::Table::VisitRoots(RootVisitor* visitor) { BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor( visitor, RootInfo(kRootInternedString)); - for (auto& intern : pre_zygote_table_) { - buffered_visitor.VisitRoot(intern); - } - for (auto& intern : post_zygote_table_) { - buffered_visitor.VisitRoot(intern); + for (UnorderedSet& table : tables_) { + for (auto& intern : table) { + buffered_visitor.VisitRoot(intern); + } } } void InternTable::Table::SweepWeaks(IsMarkedVisitor* visitor) { - SweepWeaks(&pre_zygote_table_, visitor); - SweepWeaks(&post_zygote_table_, visitor); + for (UnorderedSet& table : tables_) { + SweepWeaks(&table, visitor); + } } void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor) { @@ -446,7 +453,12 @@ void InternTable::Table::SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor) } size_t InternTable::Table::Size() const { - return pre_zygote_table_.Size() + post_zygote_table_.Size(); + return std::accumulate(tables_.begin(), + tables_.end(), + 0U, + [](size_t sum, const UnorderedSet& set) { + return sum + set.Size(); + }); } void InternTable::ChangeWeakRootState(gc::WeakRootState new_state) { @@ -464,10 +476,10 @@ void InternTable::ChangeWeakRootStateLocked(gc::WeakRootState new_state) { InternTable::Table::Table() { Runtime* const runtime = Runtime::Current(); - pre_zygote_table_.SetLoadFactor(runtime->GetHashTableMinLoadFactor(), - runtime->GetHashTableMaxLoadFactor()); - post_zygote_table_.SetLoadFactor(runtime->GetHashTableMinLoadFactor(), - runtime->GetHashTableMaxLoadFactor()); + // Initial table. + tables_.push_back(UnorderedSet()); + tables_.back().SetLoadFactor(runtime->GetHashTableMinLoadFactor(), + runtime->GetHashTableMaxLoadFactor()); } } // namespace art diff --git a/runtime/intern_table.h b/runtime/intern_table.h index 3a4e8d8f11..8f715a3dc3 100644 --- a/runtime/intern_table.h +++ b/runtime/intern_table.h @@ -98,22 +98,20 @@ class InternTable { void BroadcastForNewInterns() SHARED_REQUIRES(Locks::mutator_lock_); - // Adds all of the resolved image strings from the image space into the intern table. The - // advantage of doing this is preventing expensive DexFile::FindStringId calls. - void AddImageStringsToTable(gc::space::ImageSpace* image_space) + // Adds all of the resolved image strings from the image spaces into the intern table. The + // advantage of doing this is preventing expensive DexFile::FindStringId calls. Sets + // images_added_to_intern_table_ to true. + void AddImagesStringsToTable(const std::vector<gc::space::ImageSpace*>& image_spaces) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_); - // Copy the post zygote tables to pre zygote to save memory by preventing dirty pages. - void SwapPostZygoteWithPreZygote() - SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_); - - // Add an intern table which was serialized to the image. - void AddImageInternTable(gc::space::ImageSpace* image_space) + // Add a new intern table for inserting to, previous intern tables are still there but no + // longer inserted into and ideally unmodified. This is done to prevent dirty pages. + void AddNewTable() SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(!Locks::intern_table_lock_); // Read the intern table from memory. The elements aren't copied, the intern hash set data will // point to somewhere within ptr. Only reads the strong interns. - size_t ReadFromMemory(const uint8_t* ptr) REQUIRES(!Locks::intern_table_lock_) + size_t AddTableFromMemory(const uint8_t* ptr) REQUIRES(!Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // Write the post zygote intern table to a pointer. Only writes the strong interns since it is @@ -157,15 +155,17 @@ class InternTable { SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); void SweepWeaks(IsMarkedVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); - void SwapPostZygoteWithPreZygote() REQUIRES(Locks::intern_table_lock_); + // Add a new intern table that will only be inserted into from now on. + void AddNewTable() REQUIRES(Locks::intern_table_lock_); size_t Size() const REQUIRES(Locks::intern_table_lock_); - // Read pre zygote table is called from ReadFromMemory which happens during runtime creation - // when we load the image intern table. Returns how many bytes were read. - size_t ReadIntoPreZygoteTable(const uint8_t* ptr) + // Read and add an intern table from ptr. + // Tables read are inserted at the front of the table array. Only checks for conflicts in + // debug builds. Returns how many bytes were read. + size_t AddTableFromMemory(const uint8_t* ptr) REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_); - // The image writer calls WritePostZygoteTable through WriteToMemory, it writes the interns in - // the post zygote table. Returns how many bytes were written. - size_t WriteFromPostZygoteTable(uint8_t* ptr) + // Write the intern tables to ptr, if there are multiple tables they are combined into a single + // one. Returns how many bytes were written. + size_t WriteToMemory(uint8_t* ptr) REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_); private: @@ -175,12 +175,9 @@ class InternTable { void SweepWeaks(UnorderedSet* set, IsMarkedVisitor* visitor) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); - // We call SwapPostZygoteWithPreZygote when we create the zygote to reduce private dirty pages - // caused by modifying the zygote intern table hash table. The pre zygote table are the - // interned strings which were interned before we created the zygote space. Post zygote is self - // explanatory. - UnorderedSet pre_zygote_table_; - UnorderedSet post_zygote_table_; + // We call AddNewTable when we create the zygote to reduce private dirty pages caused by + // modifying the zygote intern table. The back of table is modified when strings are interned. + std::vector<UnorderedSet> tables_; }; // Insert if non null, otherwise return null. Must be called holding the mutator lock. @@ -214,7 +211,7 @@ class InternTable { void RemoveWeakFromTransaction(mirror::String* s) SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::intern_table_lock_); - size_t ReadFromMemoryLocked(const uint8_t* ptr) + size_t AddTableFromMemoryLocked(const uint8_t* ptr) REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_); // Change the weak root state. May broadcast to waiters. @@ -225,7 +222,7 @@ class InternTable { void WaitUntilAccessible(Thread* self) REQUIRES(Locks::intern_table_lock_) SHARED_REQUIRES(Locks::mutator_lock_); - bool image_added_to_intern_table_ GUARDED_BY(Locks::intern_table_lock_); + bool images_added_to_intern_table_ GUARDED_BY(Locks::intern_table_lock_); bool log_new_roots_ GUARDED_BY(Locks::intern_table_lock_); ConditionVariable weak_intern_condition_ GUARDED_BY(Locks::intern_table_lock_); // Since this contains (strong) roots, they need a read barrier to diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc index 871fad7b80..8d5a61a44b 100644 --- a/runtime/interpreter/interpreter.cc +++ b/runtime/interpreter/interpreter.cc @@ -18,6 +18,7 @@ #include <limits> +#include "common_throws.h" #include "interpreter_common.h" #include "mirror/string-inl.h" #include "scoped_thread_state_change.h" diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index 08eac0ec20..c260ca4629 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -668,6 +668,11 @@ ProfilingInfo* JitCodeCache::AddProfilingInfoInternal(Thread* self, return nullptr; } info = new (data) ProfilingInfo(method, entries); + + // Make sure other threads see the data in the profiling info object before the + // store in the ArtMethod's ProfilingInfo pointer. + QuasiAtomic::ThreadFenceRelease(); + method->SetProfilingInfo(info); profiling_infos_.push_back(info); return info; diff --git a/runtime/jit/offline_profiling_info.cc b/runtime/jit/offline_profiling_info.cc index 5dc0e45234..a132701796 100644 --- a/runtime/jit/offline_profiling_info.cc +++ b/runtime/jit/offline_profiling_info.cc @@ -30,38 +30,40 @@ namespace art { -void OfflineProfilingInfo::SaveProfilingInfo(const std::string& filename, - const std::vector<ArtMethod*>& methods) { +bool ProfileCompilationInfo::SaveProfilingInfo(const std::string& filename, + const std::vector<ArtMethod*>& methods) { if (methods.empty()) { VLOG(profiler) << "No info to save to " << filename; - return; + return true; } - DexFileToMethodsMap info; + ProfileCompilationInfo info; + if (!info.Load(filename)) { + LOG(WARNING) << "Could not load previous profile data from file " << filename; + return false; + } { ScopedObjectAccess soa(Thread::Current()); for (auto it = methods.begin(); it != methods.end(); it++) { - AddMethodInfo(*it, &info); + const DexFile* dex_file = (*it)->GetDexFile(); + if (!info.AddData(dex_file->GetLocation(), + dex_file->GetLocationChecksum(), + (*it)->GetDexMethodIndex())) { + return false; + } } } // This doesn't need locking because we are trying to lock the file for exclusive // access and fail immediately if we can't. - if (Serialize(filename, info)) { + bool result = info.Save(filename); + if (result) { VLOG(profiler) << "Successfully saved profile info to " << filename << " Size: " << GetFileSizeBytes(filename); + } else { + VLOG(profiler) << "Failed to save profile info to " << filename; } -} - -void OfflineProfilingInfo::AddMethodInfo(ArtMethod* method, DexFileToMethodsMap* info) { - DCHECK(method != nullptr); - const DexFile* dex_file = method->GetDexFile(); - - auto info_it = info->find(dex_file); - if (info_it == info->end()) { - info_it = info->Put(dex_file, std::set<uint32_t>()); - } - info_it->second.insert(method->GetDexMethodIndex()); + return result; } enum OpenMode { @@ -77,9 +79,7 @@ static int OpenFile(const std::string& filename, OpenMode open_mode) { break; case READ_WRITE: // TODO(calin) allow the shared uid of the app to access the file. - fd = open(filename.c_str(), - O_CREAT | O_WRONLY | O_TRUNC | O_NOFOLLOW | O_CLOEXEC, - S_IRUSR | S_IWUSR); + fd = open(filename.c_str(), O_WRONLY | O_TRUNC | O_NOFOLLOW | O_CLOEXEC); break; } @@ -137,8 +137,7 @@ static constexpr const char kLineSeparator = '\n'; * /system/priv-app/app/app.apk,131232145,11,23,454,54 * /system/priv-app/app/app.apk:classes5.dex,218490184,39,13,49,1 **/ -bool OfflineProfilingInfo::Serialize(const std::string& filename, - const DexFileToMethodsMap& info) const { +bool ProfileCompilationInfo::Save(const std::string& filename) { int fd = OpenFile(filename, READ_WRITE); if (fd == -1) { return false; @@ -148,14 +147,12 @@ bool OfflineProfilingInfo::Serialize(const std::string& filename, // TODO(calin): Profile this and see how much memory it takes. If too much, // write to file directly. std::ostringstream os; - for (auto it : info) { - const DexFile* dex_file = it.first; - const std::set<uint32_t>& method_dex_ids = it.second; - - os << dex_file->GetLocation() - << kFieldSeparator - << dex_file->GetLocationChecksum(); - for (auto method_it : method_dex_ids) { + for (const auto& it : info_) { + const std::string& dex_location = it.first; + const DexFileData& dex_data = it.second; + + os << dex_location << kFieldSeparator << dex_data.checksum; + for (auto method_it : dex_data.method_set) { os << kFieldSeparator << method_it; } os << kLineSeparator; @@ -190,8 +187,22 @@ static void SplitString(const std::string& s, char separator, std::vector<std::s } } -bool ProfileCompilationInfo::ProcessLine(const std::string& line, - const std::vector<const DexFile*>& dex_files) { +bool ProfileCompilationInfo::AddData(const std::string& dex_location, + uint32_t checksum, + uint16_t method_idx) { + auto info_it = info_.find(dex_location); + if (info_it == info_.end()) { + info_it = info_.Put(dex_location, DexFileData(checksum)); + } + if (info_it->second.checksum != checksum) { + LOG(WARNING) << "Checksum mismatch for dex " << dex_location; + return false; + } + info_it->second.method_set.insert(method_idx); + return true; +} + +bool ProfileCompilationInfo::ProcessLine(const std::string& line) { std::vector<std::string> parts; SplitString(line, kFieldSeparator, &parts); if (parts.size() < 3) { @@ -205,39 +216,13 @@ bool ProfileCompilationInfo::ProcessLine(const std::string& line, return false; } - const DexFile* current_dex_file = nullptr; - for (auto dex_file : dex_files) { - if (dex_file->GetLocation() == dex_location) { - if (checksum != dex_file->GetLocationChecksum()) { - LOG(WARNING) << "Checksum mismatch for " - << dex_file->GetLocation() << " when parsing " << filename_; - return false; - } - current_dex_file = dex_file; - break; - } - } - if (current_dex_file == nullptr) { - return true; - } - for (size_t i = 2; i < parts.size(); i++) { uint32_t method_idx; if (!ParseInt(parts[i].c_str(), &method_idx)) { LOG(WARNING) << "Cannot parse method_idx " << parts[i]; return false; } - uint16_t class_idx = current_dex_file->GetMethodId(method_idx).class_idx_; - auto info_it = info_.find(current_dex_file); - if (info_it == info_.end()) { - info_it = info_.Put(current_dex_file, ClassToMethodsMap()); - } - ClassToMethodsMap& class_map = info_it->second; - auto class_it = class_map.find(class_idx); - if (class_it == class_map.end()) { - class_it = class_map.Put(class_idx, std::set<uint32_t>()); - } - class_it->second.insert(method_idx); + AddData(dex_location, checksum, method_idx); } return true; } @@ -264,25 +249,8 @@ static int GetLineFromBuffer(char* buffer, int n, int start_from, std::string& l return new_line_pos == -1 ? new_line_pos : new_line_pos + 1; } -bool ProfileCompilationInfo::Load(const std::vector<const DexFile*>& dex_files) { - if (dex_files.empty()) { - return true; - } - if (kIsDebugBuild) { - // In debug builds verify that the locations are unique. - std::set<std::string> locations; - for (auto dex_file : dex_files) { - const std::string& location = dex_file->GetLocation(); - DCHECK(locations.find(location) == locations.end()) - << "DexFiles appear to belong to different apks." - << " There are multiple dex files with the same location: " - << location; - locations.insert(location); - } - } - info_.clear(); - - int fd = OpenFile(filename_, READ); +bool ProfileCompilationInfo::Load(const std::string& filename) { + int fd = OpenFile(filename, READ); if (fd == -1) { return false; } @@ -295,7 +263,7 @@ bool ProfileCompilationInfo::Load(const std::vector<const DexFile*>& dex_files) while (success) { int n = read(fd, buffer, kBufferSize); if (n < 0) { - PLOG(WARNING) << "Error when reading profile file " << filename_; + PLOG(WARNING) << "Error when reading profile file " << filename; success = false; break; } else if (n == 0) { @@ -309,7 +277,7 @@ bool ProfileCompilationInfo::Load(const std::vector<const DexFile*>& dex_files) if (current_start_pos == -1) { break; } - if (!ProcessLine(current_line, dex_files)) { + if (!ProcessLine(current_line)) { success = false; break; } @@ -320,25 +288,50 @@ bool ProfileCompilationInfo::Load(const std::vector<const DexFile*>& dex_files) if (!success) { info_.clear(); } - return CloseDescriptorForFile(fd, filename_) && success; + return CloseDescriptorForFile(fd, filename) && success; +} + +bool ProfileCompilationInfo::Load(const ProfileCompilationInfo& other) { + for (const auto& other_it : other.info_) { + const std::string& other_dex_location = other_it.first; + const DexFileData& other_dex_data = other_it.second; + + auto info_it = info_.find(other_dex_location); + if (info_it == info_.end()) { + info_it = info_.Put(other_dex_location, DexFileData(other_dex_data.checksum)); + } + if (info_it->second.checksum != other_dex_data.checksum) { + LOG(WARNING) << "Checksum mismatch for dex " << other_dex_location; + return false; + } + info_it->second.method_set.insert(other_dex_data.method_set.begin(), + other_dex_data.method_set.end()); + } + return true; } bool ProfileCompilationInfo::ContainsMethod(const MethodReference& method_ref) const { - auto info_it = info_.find(method_ref.dex_file); + auto info_it = info_.find(method_ref.dex_file->GetLocation()); if (info_it != info_.end()) { - uint16_t class_idx = method_ref.dex_file->GetMethodId(method_ref.dex_method_index).class_idx_; - const ClassToMethodsMap& class_map = info_it->second; - auto class_it = class_map.find(class_idx); - if (class_it != class_map.end()) { - const std::set<uint32_t>& methods = class_it->second; - return methods.find(method_ref.dex_method_index) != methods.end(); + if (method_ref.dex_file->GetLocationChecksum() != info_it->second.checksum) { + return false; } - return false; + const std::set<uint16_t>& methods = info_it->second.method_set; + return methods.find(method_ref.dex_method_index) != methods.end(); } return false; } -std::string ProfileCompilationInfo::DumpInfo(bool print_full_dex_location) const { +uint32_t ProfileCompilationInfo::GetNumberOfMethods() const { + uint32_t total = 0; + for (const auto& it : info_) { + total += it.second.method_set.size(); + } + return total; +} + +std::string ProfileCompilationInfo::DumpInfo(const std::vector<const DexFile*>* dex_files, + bool print_full_dex_location) const { std::ostringstream os; if (info_.empty()) { return "ProfileInfo: empty"; @@ -346,17 +339,11 @@ std::string ProfileCompilationInfo::DumpInfo(bool print_full_dex_location) const os << "ProfileInfo:"; - // Use an additional map to achieve a predefined order based on the dex locations. - SafeMap<const std::string, const DexFile*> dex_locations_map; - for (auto info_it : info_) { - dex_locations_map.Put(info_it.first->GetLocation(), info_it.first); - } - const std::string kFirstDexFileKeySubstitute = ":classes.dex"; - for (auto dex_file_it : dex_locations_map) { + for (const auto& it : info_) { os << "\n"; - const std::string& location = dex_file_it.first; - const DexFile* dex_file = dex_file_it.second; + const std::string& location = it.first; + const DexFileData& dex_data = it.second; if (print_full_dex_location) { os << location; } else { @@ -364,10 +351,19 @@ std::string ProfileCompilationInfo::DumpInfo(bool print_full_dex_location) const std::string multidex_suffix = DexFile::GetMultiDexSuffix(location); os << (multidex_suffix.empty() ? kFirstDexFileKeySubstitute : multidex_suffix); } - for (auto class_it : info_.find(dex_file)->second) { - for (auto method_it : class_it.second) { - os << "\n " << PrettyMethod(method_it, *dex_file, true); + for (const auto method_it : dex_data.method_set) { + if (dex_files != nullptr) { + const DexFile* dex_file = nullptr; + for (size_t i = 0; i < dex_files->size(); i++) { + if (location == (*dex_files)[i]->GetLocation()) { + dex_file = (*dex_files)[i]; + } + } + if (dex_file != nullptr) { + os << "\n " << PrettyMethod(method_it, *dex_file, true); + } } + os << "\n " << method_it; } } return os.str(); diff --git a/runtime/jit/offline_profiling_info.h b/runtime/jit/offline_profiling_info.h index 32d4c5bedc..26e1ac385f 100644 --- a/runtime/jit/offline_profiling_info.h +++ b/runtime/jit/offline_profiling_info.h @@ -29,60 +29,50 @@ namespace art { class ArtMethod; +// TODO: rename file. /** - * Profiling information in a format that can be serialized to disk. - * It is a serialize-friendly format based on information collected - * by the interpreter (ProfileInfo). + * Profile information in a format suitable to be queried by the compiler and + * performing profile guided compilation. + * It is a serialize-friendly format based on information collected by the + * interpreter (ProfileInfo). * Currently it stores only the hot compiled methods. */ -class OfflineProfilingInfo { - public: - void SaveProfilingInfo(const std::string& filename, const std::vector<ArtMethod*>& methods); - - private: - // Map identifying the location of the profiled methods. - // dex_file_ -> [dex_method_index]+ - using DexFileToMethodsMap = SafeMap<const DexFile*, std::set<uint32_t>>; - - void AddMethodInfo(ArtMethod* method, DexFileToMethodsMap* info) - SHARED_REQUIRES(Locks::mutator_lock_); - bool Serialize(const std::string& filename, const DexFileToMethodsMap& info) const; -}; - -/** - * Profile information in a format suitable to be queried by the compiler and performing - * profile guided compilation. - */ class ProfileCompilationInfo { public: - // Constructs a ProfileCompilationInfo backed by the provided file. - explicit ProfileCompilationInfo(const std::string& filename) : filename_(filename) {} - - // Loads profile information corresponding to the provided dex files. - // The dex files' multidex suffixes must be unique. - // This resets the state of the profiling information - // (i.e. all previously loaded info are cleared). - bool Load(const std::vector<const DexFile*>& dex_files); + static bool SaveProfilingInfo(const std::string& filename, + const std::vector<ArtMethod*>& methods); + + // Loads profile information from the given file. + bool Load(const std::string& profile_filename); + // Loads the data from another ProfileCompilationInfo object. + bool Load(const ProfileCompilationInfo& info); + // Saves the profile data to the given file. + bool Save(const std::string& profile_filename); + // Returns the number of methods that were profiled. + uint32_t GetNumberOfMethods() const; // Returns true if the method reference is present in the profiling info. bool ContainsMethod(const MethodReference& method_ref) const; - const std::string& GetFilename() const { return filename_; } - // Dumps all the loaded profile info into a string and returns it. + // If dex_files is not null then the method indices will be resolved to their + // names. // This is intended for testing and debugging. - std::string DumpInfo(bool print_full_dex_location = true) const; + std::string DumpInfo(const std::vector<const DexFile*>* dex_files, + bool print_full_dex_location = true) const; private: - bool ProcessLine(const std::string& line, - const std::vector<const DexFile*>& dex_files); + bool AddData(const std::string& dex_location, uint32_t checksum, uint16_t method_idx); + bool ProcessLine(const std::string& line); + + struct DexFileData { + explicit DexFileData(uint32_t location_checksum) : checksum(location_checksum) {} + uint32_t checksum; + std::set<uint16_t> method_set; + }; - using ClassToMethodsMap = SafeMap<uint32_t, std::set<uint32_t>>; - // Map identifying the location of the profiled methods. - // dex_file -> class_index -> [dex_method_index]+ - using DexFileToProfileInfoMap = SafeMap<const DexFile*, ClassToMethodsMap>; + using DexFileToProfileInfoMap = SafeMap<const std::string, DexFileData>; - const std::string filename_; DexFileToProfileInfoMap info_; }; diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc index 0278138d6e..ec289ea2b5 100644 --- a/runtime/jit/profile_saver.cc +++ b/runtime/jit/profile_saver.cc @@ -106,10 +106,9 @@ bool ProfileSaver::ProcessProfilingInfo() { VLOG(profiler) << "Not enough information to save. Nr of methods: " << methods.size(); return false; } - offline_profiling_info_.SaveProfilingInfo(output_filename_, methods); - - VLOG(profiler) << "Saved profile time: " << PrettyDuration(NanoTime() - start); + ProfileCompilationInfo::SaveProfilingInfo(output_filename_, methods); + VLOG(profiler) << "Profile process time: " << PrettyDuration(NanoTime() - start); return true; } diff --git a/runtime/jit/profile_saver.h b/runtime/jit/profile_saver.h index 88efd41156..d60142b205 100644 --- a/runtime/jit/profile_saver.h +++ b/runtime/jit/profile_saver.h @@ -66,7 +66,6 @@ class ProfileSaver { const std::string output_filename_; jit::JitCodeCache* jit_code_cache_; const std::set<const std::string> tracked_dex_base_locations_; - OfflineProfilingInfo offline_profiling_info_; uint64_t code_cache_last_update_time_ns_; bool shutting_down_ GUARDED_BY(Locks::profiler_lock_); diff --git a/runtime/jni_env_ext.cc b/runtime/jni_env_ext.cc index aa25f67bab..1ee1611ef7 100644 --- a/runtime/jni_env_ext.cc +++ b/runtime/jni_env_ext.cc @@ -59,6 +59,7 @@ JNIEnvExt::JNIEnvExt(Thread* self_in, JavaVMExt* vm_in) local_ref_cookie(IRT_FIRST_SEGMENT), locals(kLocalsInitial, kLocalsMax, kLocal, false), check_jni(false), + runtime_deleted(false), critical(0), monitors("monitors", kMonitorsInitial, kMonitorsMax) { functions = unchecked_functions = GetJniNativeInterface(); @@ -67,6 +68,11 @@ JNIEnvExt::JNIEnvExt(Thread* self_in, JavaVMExt* vm_in) } } +void JNIEnvExt::SetFunctionsToRuntimeShutdownFunctions() { + functions = GetRuntimeShutdownNativeInterface(); + runtime_deleted = true; +} + JNIEnvExt::~JNIEnvExt() { } diff --git a/runtime/jni_env_ext.h b/runtime/jni_env_ext.h index 2f8decf98f..d4accc342b 100644 --- a/runtime/jni_env_ext.h +++ b/runtime/jni_env_ext.h @@ -74,6 +74,9 @@ struct JNIEnvExt : public JNIEnv { // Frequently-accessed fields cached from JavaVM. bool check_jni; + // If we are a JNI env for a daemon thread with a deleted runtime. + bool runtime_deleted; + // How many nested "critical" JNI calls are we in? int critical; @@ -95,6 +98,9 @@ struct JNIEnvExt : public JNIEnv { // Check that no monitors are held that have been acquired in this JNI "segment." void CheckNoHeldMonitors() SHARED_REQUIRES(Locks::mutator_lock_); + // Set the functions to the runtime shutdown functions. + void SetFunctionsToRuntimeShutdownFunctions(); + private: // The constructor should not be called directly. It may leave the object in an erronuous state, // and the result needs to be checked. diff --git a/runtime/jni_internal.cc b/runtime/jni_internal.cc index cb67ee3b39..c893a0fc9d 100644 --- a/runtime/jni_internal.cc +++ b/runtime/jni_internal.cc @@ -2734,6 +2734,246 @@ const JNINativeInterface* GetJniNativeInterface() { return &gJniNativeInterface; } +void (*gJniSleepForeverStub[])() = { + nullptr, // reserved0. + nullptr, // reserved1. + nullptr, // reserved2. + nullptr, // reserved3. + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, + SleepForever, +}; + +const JNINativeInterface* GetRuntimeShutdownNativeInterface() { + return reinterpret_cast<JNINativeInterface*>(&gJniSleepForeverStub); +} + void RegisterNativeMethods(JNIEnv* env, const char* jni_class_name, const JNINativeMethod* methods, jint method_count) { ScopedLocalRef<jclass> c(env, env->FindClass(jni_class_name)); diff --git a/runtime/jni_internal.h b/runtime/jni_internal.h index 48b10f5825..3429962d3f 100644 --- a/runtime/jni_internal.h +++ b/runtime/jni_internal.h @@ -30,6 +30,7 @@ namespace art { const JNINativeInterface* GetJniNativeInterface(); +const JNINativeInterface* GetRuntimeShutdownNativeInterface(); // Similar to RegisterNatives except its passed a descriptor for a class name and failures are // fatal. diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc index 4b24f821cb..da4a891ff5 100644 --- a/runtime/native/dalvik_system_VMRuntime.cc +++ b/runtime/native/dalvik_system_VMRuntime.cc @@ -565,8 +565,8 @@ static void VMRuntime_preloadDexCaches(JNIEnv* env, jobject) { */ static void VMRuntime_registerAppInfo(JNIEnv* env, jclass clazz ATTRIBUTE_UNUSED, - jstring pkg_name, - jstring app_dir, + jstring profile_file, + jstring app_dir ATTRIBUTE_UNUSED, // TODO: remove argument jobjectArray code_paths) { std::vector<std::string> code_paths_vec; int code_paths_length = env->GetArrayLength(code_paths); @@ -577,13 +577,11 @@ static void VMRuntime_registerAppInfo(JNIEnv* env, env->ReleaseStringUTFChars(code_path, raw_code_path); } - const char* raw_app_dir = env->GetStringUTFChars(app_dir, nullptr); - const char* raw_pkg_name = env->GetStringUTFChars(pkg_name, nullptr); - std::string profile_file = StringPrintf("%s/code_cache/%s.prof", raw_app_dir, raw_pkg_name); - env->ReleaseStringUTFChars(pkg_name, raw_pkg_name); - env->ReleaseStringUTFChars(app_dir, raw_app_dir); + const char* raw_profile_file = env->GetStringUTFChars(profile_file, nullptr); + std::string profile_file_str(raw_profile_file); + env->ReleaseStringUTFChars(profile_file, raw_profile_file); - Runtime::Current()->RegisterAppInfo(code_paths_vec, profile_file); + Runtime::Current()->RegisterAppInfo(code_paths_vec, profile_file_str); } static jboolean VMRuntime_isBootClassPathOnDisk(JNIEnv* env, jclass, jstring java_instruction_set) { diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc index 19774811bc..e89c74da23 100644 --- a/runtime/native/java_lang_Class.cc +++ b/runtime/native/java_lang_Class.cc @@ -288,13 +288,6 @@ static jobject Class_getPublicFieldRecursive(JNIEnv* env, jobject javaThis, jstr GetPublicFieldRecursive(soa.Self(), DecodeClass(soa, javaThis), name_string)); } -static jobject Class_getDeclaredFieldInternal(JNIEnv* env, jobject javaThis, jstring name) { - ScopedFastNativeObjectAccess soa(env); - auto* name_string = soa.Decode<mirror::String*>(name); - return soa.AddLocalReference<jobject>( - GetDeclaredField(soa.Self(), DecodeClass(soa, javaThis), name_string)); -} - static jobject Class_getDeclaredField(JNIEnv* env, jobject javaThis, jstring name) { ScopedFastNativeObjectAccess soa(env); auto* name_string = soa.Decode<mirror::String*>(name); @@ -306,6 +299,12 @@ static jobject Class_getDeclaredField(JNIEnv* env, jobject javaThis, jstring nam mirror::Field* result = GetDeclaredField(soa.Self(), klass, name_string); if (result == nullptr) { std::string name_str = name_string->ToModifiedUtf8(); + if (name_str == "value" && klass->IsStringClass()) { + // We log the error for this specific case, as the user might just swallow the exception. + // This helps diagnose crashes when applications rely on the String#value field being + // there. + LOG(ERROR) << "The String#value field is not present on Android versions >= 6.0"; + } // We may have a pending exception if we failed to resolve. if (!soa.Self()->IsExceptionPending()) { ThrowNoSuchFieldException(DecodeClass(soa, javaThis), name_str.c_str()); @@ -723,7 +722,6 @@ static JNINativeMethod gMethods[] = { NATIVE_METHOD(Class, getDeclaredConstructorsInternal, "!(Z)[Ljava/lang/reflect/Constructor;"), NATIVE_METHOD(Class, getDeclaredField, "!(Ljava/lang/String;)Ljava/lang/reflect/Field;"), NATIVE_METHOD(Class, getPublicFieldRecursive, "!(Ljava/lang/String;)Ljava/lang/reflect/Field;"), - NATIVE_METHOD(Class, getDeclaredFieldInternal, "!(Ljava/lang/String;)Ljava/lang/reflect/Field;"), NATIVE_METHOD(Class, getDeclaredFields, "!()[Ljava/lang/reflect/Field;"), NATIVE_METHOD(Class, getDeclaredFieldsUnchecked, "!(Z)[Ljava/lang/reflect/Field;"), NATIVE_METHOD(Class, getDeclaredMethodInternal, diff --git a/runtime/native/java_lang_Runtime.cc b/runtime/native/java_lang_Runtime.cc index f42a17d538..c177f19b56 100644 --- a/runtime/native/java_lang_Runtime.cc +++ b/runtime/native/java_lang_Runtime.cc @@ -80,7 +80,7 @@ static jstring Runtime_nativeLoad(JNIEnv* env, jclass, jstring javaFilename, job // Starting with N nativeLoad uses classloader local // linker namespace instead of global LD_LIBRARY_PATH // (23 is Marshmallow) - if (target_sdk_version <= INT_MAX) { + if (target_sdk_version == 0) { SetLdLibraryPath(env, javaLibrarySearchPath); } diff --git a/runtime/oat_file.cc b/runtime/oat_file.cc index e3de14b667..83e594b169 100644 --- a/runtime/oat_file.cc +++ b/runtime/oat_file.cc @@ -983,7 +983,6 @@ const OatFile::OatDexFile* OatFile::GetOatDexFile(const char* dex_location, LOG(WARNING) << "Failed to find OatDexFile for DexFile " << dex_location << " ( canonical path " << dex_canonical_location << ")" << " with checksum " << checksum << " in OatFile " << GetLocation(); - /* TODO: Modify for multi-image support and reenable. b/26317072 if (kIsDebugBuild) { for (const OatDexFile* odf : oat_dex_files_storage_) { LOG(WARNING) << "OatFile " << GetLocation() @@ -992,7 +991,6 @@ const OatFile::OatDexFile* OatFile::GetOatDexFile(const char* dex_location, << " with checksum 0x" << std::hex << odf->GetDexFileLocationChecksum(); } } - */ } return nullptr; diff --git a/runtime/oat_file_assistant.cc b/runtime/oat_file_assistant.cc index 8543ff4ef9..d6b08684b9 100644 --- a/runtime/oat_file_assistant.cc +++ b/runtime/oat_file_assistant.cc @@ -952,7 +952,6 @@ const OatFileAssistant::ImageInfo* OatFileAssistant::GetImageInfo() { Runtime* runtime = Runtime::Current(); std::vector<gc::space::ImageSpace*> image_spaces = runtime->GetHeap()->GetBootImageSpaces(); if (!image_spaces.empty()) { - // TODO: Better support multi-images? b/26317072 cached_image_info_.location = image_spaces[0]->GetImageLocation(); if (isa_ == kRuntimeISA) { diff --git a/runtime/reflection.cc b/runtime/reflection.cc index 324bd9f580..28c27cd971 100644 --- a/runtime/reflection.cc +++ b/runtime/reflection.cc @@ -21,7 +21,6 @@ #include "class_linker.h" #include "common_throws.h" #include "dex_file-inl.h" -#include "entrypoints/entrypoint_utils.h" #include "indirect_reference_table-inl.h" #include "jni_internal.h" #include "mirror/abstract_method.h" diff --git a/runtime/runtime.cc b/runtime/runtime.cc index 5c72629320..6b8f17ddaa 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -214,6 +214,7 @@ Runtime::Runtime() } Runtime::~Runtime() { + ATRACE_BEGIN("Runtime shutdown"); if (is_native_bridge_loaded_) { UnloadNativeBridge(); } @@ -228,45 +229,55 @@ Runtime::~Runtime() { Thread* self = Thread::Current(); const bool attach_shutdown_thread = self == nullptr; if (attach_shutdown_thread) { + ATRACE_BEGIN("Attach shutdown thread"); CHECK(AttachCurrentThread("Shutdown thread", false, nullptr, false)); + ATRACE_END(); self = Thread::Current(); } else { LOG(WARNING) << "Current thread not detached in Runtime shutdown"; } { + ATRACE_BEGIN("Wait for shutdown cond"); MutexLock mu(self, *Locks::runtime_shutdown_lock_); shutting_down_started_ = true; while (threads_being_born_ > 0) { shutdown_cond_->Wait(self); } shutting_down_ = true; + ATRACE_END(); } // Shutdown and wait for the daemons. CHECK(self != nullptr); if (IsFinishedStarting()) { + ATRACE_BEGIN("Waiting for Daemons"); self->ClearException(); self->GetJniEnv()->CallStaticVoidMethod(WellKnownClasses::java_lang_Daemons, WellKnownClasses::java_lang_Daemons_stop); + ATRACE_END(); } Trace::Shutdown(); if (attach_shutdown_thread) { + ATRACE_BEGIN("Detach shutdown thread"); DetachCurrentThread(); + ATRACE_END(); self = nullptr; } // Make sure to let the GC complete if it is running. heap_->WaitForGcToComplete(gc::kGcCauseBackground, self); heap_->DeleteThreadPool(); - if (jit_.get() != nullptr) { + if (jit_ != nullptr) { + ATRACE_BEGIN("Delete jit"); VLOG(jit) << "Deleting jit thread pool"; // Delete thread pool before the thread list since we don't want to wait forever on the // JIT compiler threads. jit_->DeleteThreadPool(); // Similarly, stop the profile saver thread before deleting the thread list. jit_->StopProfileSaver(); + ATRACE_END(); } // Make sure our internal threads are dead before we start tearing down things they're using. @@ -274,11 +285,13 @@ Runtime::~Runtime() { delete signal_catcher_; // Make sure all other non-daemon threads have terminated, and all daemon threads are suspended. + ATRACE_BEGIN("Delete thread list"); delete thread_list_; + ATRACE_END(); // Delete the JIT after thread list to ensure that there is no remaining threads which could be // accessing the instrumentation when we delete it. - if (jit_.get() != nullptr) { + if (jit_ != nullptr) { VLOG(jit) << "Deleting jit"; jit_.reset(nullptr); } @@ -286,6 +299,7 @@ Runtime::~Runtime() { // Shutdown the fault manager if it was initialized. fault_manager.Shutdown(); + ATRACE_BEGIN("Delete state"); delete monitor_list_; delete monitor_pool_; delete class_linker_; @@ -302,10 +316,12 @@ Runtime::~Runtime() { low_4gb_arena_pool_.reset(); arena_pool_.reset(); MemMap::Shutdown(); + ATRACE_END(); // TODO: acquire a static mutex on Runtime to avoid racing. CHECK(instance_ == nullptr || instance_ == this); instance_ = nullptr; + ATRACE_END(); } struct AbortState { @@ -543,12 +559,9 @@ bool Runtime::Start() { // Use !IsAotCompiler so that we get test coverage, tests are never the zygote. if (!IsAotCompiler()) { ScopedObjectAccess soa(self); - std::vector<gc::space::ImageSpace*> image_spaces = heap_->GetBootImageSpaces(); - for (gc::space::ImageSpace* image_space : image_spaces) { - ATRACE_BEGIN("AddImageStringsToTable"); - GetInternTable()->AddImageStringsToTable(image_space); - ATRACE_END(); - } + ATRACE_BEGIN("AddImageStringsToTable"); + GetInternTable()->AddImagesStringsToTable(heap_->GetBootImageSpaces()); + ATRACE_END(); ATRACE_BEGIN("MoveImageClassesToClassTable"); GetClassLinker()->AddBootImageClassesToClassTable(); ATRACE_END(); @@ -1089,13 +1102,11 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { LOG(ERROR) << "Could not initialize from image: " << error_msg; return false; } - /* TODO: Modify check to support multiple image spaces and reenable. b/26317072 if (kIsDebugBuild) { for (auto image_space : GetHeap()->GetBootImageSpaces()) { image_space->VerifyImageAllocations(); } } - */ if (boot_class_path_string_.empty()) { // The bootclasspath is not explicitly specified: construct it from the loaded dex files. const std::vector<const DexFile*>& boot_class_path = GetClassLinker()->GetBootClassPath(); @@ -1685,13 +1696,29 @@ void Runtime::SetCalleeSaveMethod(ArtMethod* method, CalleeSaveType type) { void Runtime::RegisterAppInfo(const std::vector<std::string>& code_paths, const std::string& profile_output_filename) { - VLOG(profiler) << "Register app with " << profile_output_filename_ + if (jit_.get() == nullptr) { + // We are not JITing. Nothing to do. + return; + } + + VLOG(profiler) << "Register app with " << profile_output_filename << " " << Join(code_paths, ':'); - DCHECK(!profile_output_filename.empty()); - profile_output_filename_ = profile_output_filename; - if (jit_.get() != nullptr && !profile_output_filename.empty() && !code_paths.empty()) { - jit_->StartProfileSaver(profile_output_filename, code_paths); + + if (profile_output_filename.empty()) { + LOG(WARNING) << "JIT profile information will not be recorded: profile filename is empty."; + return; } + if (!FileExists(profile_output_filename)) { + LOG(WARNING) << "JIT profile information will not be recorded: profile file does not exits."; + return; + } + if (code_paths.empty()) { + LOG(WARNING) << "JIT profile information will not be recorded: code paths is empty."; + return; + } + + profile_output_filename_ = profile_output_filename; + jit_->StartProfileSaver(profile_output_filename, code_paths); } // Transaction support. diff --git a/runtime/thread_list.cc b/runtime/thread_list.cc index a390908635..ae18819e90 100644 --- a/runtime/thread_list.cc +++ b/runtime/thread_list.cc @@ -69,6 +69,7 @@ ThreadList::ThreadList() } ThreadList::~ThreadList() { + ATRACE_BEGIN(__FUNCTION__); // Detach the current thread if necessary. If we failed to start, there might not be any threads. // We need to detach the current thread here in case there's another thread waiting to join with // us. @@ -79,19 +80,27 @@ ThreadList::~ThreadList() { contains = Contains(self); } if (contains) { + ATRACE_BEGIN("DetachCurrentThread"); Runtime::Current()->DetachCurrentThread(); + ATRACE_END(); } + ATRACE_BEGIN("WaitForOtherNonDaemonThreadsToExit"); WaitForOtherNonDaemonThreadsToExit(); + ATRACE_END(); // Disable GC and wait for GC to complete in case there are still daemon threads doing // allocations. gc::Heap* const heap = Runtime::Current()->GetHeap(); heap->DisableGCForShutdown(); // In case a GC is in progress, wait for it to finish. + ATRACE_BEGIN("WaitForGcToComplete"); heap->WaitForGcToComplete(gc::kGcCauseBackground, Thread::Current()); - + ATRACE_END(); // TODO: there's an unaddressed race here where a thread may attach during shutdown, see // Thread::Init. - SuspendAllDaemonThreads(); + ATRACE_BEGIN("SuspendAllDaemonThreadsForShutdown"); + SuspendAllDaemonThreadsForShutdown(); + ATRACE_END(); + ATRACE_END(); } bool ThreadList::Contains(Thread* thread) { @@ -1133,7 +1142,7 @@ void ThreadList::WaitForOtherNonDaemonThreadsToExit() { } } -void ThreadList::SuspendAllDaemonThreads() { +void ThreadList::SuspendAllDaemonThreadsForShutdown() { Thread* self = Thread::Current(); MutexLock mu(self, *Locks::thread_list_lock_); { // Tell all the daemons it's time to suspend. @@ -1145,12 +1154,16 @@ void ThreadList::SuspendAllDaemonThreads() { if (thread != self) { thread->ModifySuspendCount(self, +1, nullptr, false); } + // We are shutting down the runtime, set the JNI functions of all the JNIEnvs to be + // the sleep forever one. + thread->GetJniEnv()->SetFunctionsToRuntimeShutdownFunctions(); } } // Give the threads a chance to suspend, complaining if they're slow. bool have_complained = false; - for (int i = 0; i < 10; ++i) { - usleep(200 * 1000); + static constexpr size_t kTimeoutMicroseconds = 2000 * 1000; + static constexpr size_t kSleepMicroseconds = 1000; + for (size_t i = 0; i < kTimeoutMicroseconds / kSleepMicroseconds; ++i) { bool all_suspended = true; for (const auto& thread : list_) { if (thread != self && thread->GetState() == kRunnable) { @@ -1164,8 +1177,9 @@ void ThreadList::SuspendAllDaemonThreads() { if (all_suspended) { return; } + usleep(kSleepMicroseconds); } - LOG(ERROR) << "suspend all daemons failed"; + LOG(WARNING) << "timed out suspending all daemon threads"; } void ThreadList::Register(Thread* self) { DCHECK_EQ(self, Thread::Current()); diff --git a/runtime/thread_list.h b/runtime/thread_list.h index 07ea10dbea..2e73f6af7f 100644 --- a/runtime/thread_list.h +++ b/runtime/thread_list.h @@ -164,7 +164,7 @@ class ThreadList { void DumpUnattachedThreads(std::ostream& os) REQUIRES(!Locks::thread_list_lock_); - void SuspendAllDaemonThreads() + void SuspendAllDaemonThreadsForShutdown() REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); void WaitForOtherNonDaemonThreadsToExit() REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); diff --git a/runtime/utils.cc b/runtime/utils.cc index ff6b4c0d20..8e9f12b7a0 100644 --- a/runtime/utils.cc +++ b/runtime/utils.cc @@ -1446,6 +1446,11 @@ bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg) { return true; } +bool FileExists(const std::string& filename) { + struct stat buffer; + return stat(filename.c_str(), &buffer) == 0; +} + std::string PrettyDescriptor(Primitive::Type type) { return PrettyDescriptor(Primitive::Descriptor(type)); } @@ -1866,4 +1871,10 @@ int64_t GetFileSizeBytes(const std::string& filename) { return rc == 0 ? stat_buf.st_size : -1; } +void SleepForever() { + while (true) { + usleep(1000000); + } +} + } // namespace art diff --git a/runtime/utils.h b/runtime/utils.h index 3f94a80a68..153749eff4 100644 --- a/runtime/utils.h +++ b/runtime/utils.h @@ -288,6 +288,9 @@ std::string GetSystemImageFilename(const char* location, InstructionSet isa); // Wrapper on fork/execv to run a command in a subprocess. bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg); +// Returns true if the file exists. +bool FileExists(const std::string& filename); + class VoidFunctor { public: template <typename A> @@ -382,6 +385,9 @@ T GetRandomNumber(T min, T max) { // Return the file size in bytes or -1 if the file does not exists. int64_t GetFileSizeBytes(const std::string& filename); +// Sleep forever and never come back. +NO_RETURN void SleepForever(); + } // namespace art #endif // ART_RUNTIME_UTILS_H_ diff --git a/test/004-ThreadStress/src/Main.java b/test/004-ThreadStress/src/Main.java index 9461c0b967..b9a46deba8 100644 --- a/test/004-ThreadStress/src/Main.java +++ b/test/004-ThreadStress/src/Main.java @@ -57,12 +57,14 @@ public class Main implements Runnable { } private final static class OOM extends Operation { + private final static int ALLOC_SIZE = 1024; + @Override public boolean perform() { try { List<byte[]> l = new ArrayList<byte[]>(); while (true) { - l.add(new byte[1024]); + l.add(new byte[ALLOC_SIZE]); } } catch (OutOfMemoryError e) { } @@ -115,12 +117,33 @@ public class Main implements Runnable { } private final static class Alloc extends Operation { + private final static int ALLOC_SIZE = 1024; // Needs to be small enough to not be in LOS. + private final static int ALLOC_COUNT = 1024; + + @Override + public boolean perform() { + try { + List<byte[]> l = new ArrayList<byte[]>(); + for (int i = 0; i < ALLOC_COUNT; i++) { + l.add(new byte[ALLOC_SIZE]); + } + } catch (OutOfMemoryError e) { + } + return true; + } + } + + private final static class LargeAlloc extends Operation { + private final static int PAGE_SIZE = 4096; + private final static int PAGE_SIZE_MODIFIER = 10; // Needs to be large enough for LOS. + private final static int ALLOC_COUNT = 100; + @Override public boolean perform() { try { List<byte[]> l = new ArrayList<byte[]>(); - for (int i = 0; i < 1024; i++) { - l.add(new byte[1024]); + for (int i = 0; i < ALLOC_COUNT; i++) { + l.add(new byte[PAGE_SIZE_MODIFIER * PAGE_SIZE]); } } catch (OutOfMemoryError e) { } @@ -144,10 +167,12 @@ public class Main implements Runnable { } private final static class Sleep extends Operation { + private final static int SLEEP_TIME = 100; + @Override public boolean perform() { try { - Thread.sleep(100); + Thread.sleep(SLEEP_TIME); } catch (InterruptedException ignored) { } return true; @@ -155,6 +180,8 @@ public class Main implements Runnable { } private final static class TimedWait extends Operation { + private final static int SLEEP_TIME = 100; + private final Object lock; public TimedWait(Object lock) { @@ -165,7 +192,7 @@ public class Main implements Runnable { public boolean perform() { synchronized (lock) { try { - lock.wait(100, 0); + lock.wait(SLEEP_TIME, 0); } catch (InterruptedException ignored) { } } @@ -215,7 +242,8 @@ public class Main implements Runnable { Map<Operation, Double> frequencyMap = new HashMap<Operation, Double>(); frequencyMap.put(new OOM(), 0.005); // 1/200 frequencyMap.put(new SigQuit(), 0.095); // 19/200 - frequencyMap.put(new Alloc(), 0.3); // 60/200 + frequencyMap.put(new Alloc(), 0.25); // 50/200 + frequencyMap.put(new LargeAlloc(), 0.05); // 10/200 frequencyMap.put(new StackTrace(), 0.1); // 20/200 frequencyMap.put(new Exit(), 0.25); // 50/200 frequencyMap.put(new Sleep(), 0.125); // 25/200 @@ -261,6 +289,8 @@ public class Main implements Runnable { op = new SigQuit(); } else if (split[0].equals("-alloc")) { op = new Alloc(); + } else if (split[0].equals("-largealloc")) { + op = new LargeAlloc(); } else if (split[0].equals("-stacktrace")) { op = new StackTrace(); } else if (split[0].equals("-exit")) { diff --git a/test/118-noimage-dex2oat/run b/test/118-noimage-dex2oat/run index 4b1d0cea59..07bdb08785 100644 --- a/test/118-noimage-dex2oat/run +++ b/test/118-noimage-dex2oat/run @@ -41,7 +41,6 @@ fi bpath="${framework}/core-libart${bpath_suffix}.jar" bpath="${bpath}:${framework}/conscrypt${bpath_suffix}.jar" bpath="${bpath}:${framework}/okhttp${bpath_suffix}.jar" -bpath="${bpath}:${framework}/core-junit${bpath_suffix}.jar" bpath="${bpath}:${framework}/bouncycastle${bpath_suffix}.jar" bpath_arg="--runtime-option -Xbootclasspath:${bpath}" diff --git a/test/143-string-value/check b/test/143-string-value/check new file mode 100755 index 0000000000..cdf7b783a3 --- /dev/null +++ b/test/143-string-value/check @@ -0,0 +1,20 @@ +#!/bin/bash +# +# Copyright (C) 2015 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Strip run-specific numbers (pid and line number) +sed -e 's/^art E[ ]\+[0-9]\+[ ]\+[0-9]\+ art\/runtime\/native\/java_lang_Class.cc:[0-9]\+\] //' "$2" > "$2.tmp" + +diff --strip-trailing-cr -q "$1" "$2.tmp" >/dev/null diff --git a/test/143-string-value/expected.txt b/test/143-string-value/expected.txt new file mode 100644 index 0000000000..06cdb89e90 --- /dev/null +++ b/test/143-string-value/expected.txt @@ -0,0 +1 @@ +The String#value field is not present on Android versions >= 6.0 diff --git a/test/143-string-value/info.txt b/test/143-string-value/info.txt new file mode 100644 index 0000000000..61ec816ab8 --- /dev/null +++ b/test/143-string-value/info.txt @@ -0,0 +1,2 @@ +Test to ensure we emit an error message when being asked +for String#value. diff --git a/test/143-string-value/src/Main.java b/test/143-string-value/src/Main.java new file mode 100644 index 0000000000..e97069200a --- /dev/null +++ b/test/143-string-value/src/Main.java @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main { + public static void main(String[] args) { + try { + String.class.getDeclaredField("value"); + throw new Error("Expected to fail"); + } catch (ReflectiveOperationException e) { + // Ignore... + } + } +} diff --git a/test/442-checker-constant-folding/src/Main.java b/test/442-checker-constant-folding/src/Main.java index 43bc9d06a2..0e07f47288 100644 --- a/test/442-checker-constant-folding/src/Main.java +++ b/test/442-checker-constant-folding/src/Main.java @@ -120,9 +120,10 @@ public class Main { /// CHECK-DAG: <<Const2:i\d+>> IntConstant 2 /// CHECK-DAG: <<Const5:i\d+>> IntConstant 5 /// CHECK-DAG: <<Const6:i\d+>> IntConstant 6 + /// CHECK-DAG: <<Const11:i\d+>> IntConstant 11 /// CHECK-DAG: <<Add1:i\d+>> Add [<<Const1>>,<<Const2>>] - /// CHECK-DAG: <<Add2:i\d+>> Add [<<Const5>>,<<Const6>>] - /// CHECK-DAG: <<Add3:i\d+>> Add [<<Add1>>,<<Add2>>] + /// CHECK-DAG: Add [<<Const5>>,<<Const6>>] + /// CHECK-DAG: <<Add3:i\d+>> Add [<<Add1>>,<<Const11>>] /// CHECK-DAG: Return [<<Add3>>] /// CHECK-START: int Main.IntAddition2() constant_folding (after) @@ -522,7 +523,7 @@ public class Main { /// CHECK-DAG: <<Const10L:j\d+>> LongConstant 10 /// CHECK-DAG: <<Const3:i\d+>> IntConstant 3 /// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const3>>] - /// CHECK-DAG: <<And:j\d+>> And [<<Const10L>>,<<TypeConv>>] + /// CHECK-DAG: <<And:j\d+>> And [<<TypeConv>>,<<Const10L>>] /// CHECK-DAG: Return [<<And>>] /// CHECK-START: long Main.AndLongInt() constant_folding (after) @@ -567,7 +568,7 @@ public class Main { /// CHECK-DAG: <<Const10L:j\d+>> LongConstant 10 /// CHECK-DAG: <<Const3:i\d+>> IntConstant 3 /// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const3>>] - /// CHECK-DAG: <<Or:j\d+>> Or [<<Const10L>>,<<TypeConv>>] + /// CHECK-DAG: <<Or:j\d+>> Or [<<TypeConv>>,<<Const10L>>] /// CHECK-DAG: Return [<<Or>>] /// CHECK-START: long Main.OrLongInt() constant_folding (after) @@ -612,7 +613,7 @@ public class Main { /// CHECK-DAG: <<Const10L:j\d+>> LongConstant 10 /// CHECK-DAG: <<Const3:i\d+>> IntConstant 3 /// CHECK-DAG: <<TypeConv:j\d+>> TypeConversion [<<Const3>>] - /// CHECK-DAG: <<Xor:j\d+>> Xor [<<Const10L>>,<<TypeConv>>] + /// CHECK-DAG: <<Xor:j\d+>> Xor [<<TypeConv>>,<<Const10L>>] /// CHECK-DAG: Return [<<Xor>>] /// CHECK-START: long Main.XorLongInt() constant_folding (after) @@ -749,7 +750,7 @@ public class Main { /// CHECK-START: long Main.Mul0(long) constant_folding (before) /// CHECK-DAG: <<Arg:j\d+>> ParameterValue /// CHECK-DAG: <<Const0:j\d+>> LongConstant 0 - /// CHECK-DAG: <<Mul:j\d+>> Mul [<<Arg>>,<<Const0>>] + /// CHECK-DAG: <<Mul:j\d+>> Mul [<<Const0>>,<<Arg>>] /// CHECK-DAG: Return [<<Mul>>] /// CHECK-START: long Main.Mul0(long) constant_folding (after) diff --git a/test/449-checker-bce/src/Main.java b/test/449-checker-bce/src/Main.java index 6e7ba406e0..3e6d1f4d94 100644 --- a/test/449-checker-bce/src/Main.java +++ b/test/449-checker-bce/src/Main.java @@ -127,7 +127,7 @@ public class Main { } - /// CHECK-START: void Main.constantIndexing2(int[]) BCE (before) + /// CHECK-START: void Main.$opt$noinline$constantIndexing2(int[]) BCE (before) /// CHECK: BoundsCheck /// CHECK: ArraySet /// CHECK: BoundsCheck @@ -137,7 +137,7 @@ public class Main { /// CHECK: BoundsCheck /// CHECK: ArraySet - /// CHECK-START: void Main.constantIndexing2(int[]) BCE (after) + /// CHECK-START: void Main.$opt$noinline$constantIndexing2(int[]) BCE (after) /// CHECK: LessThanOrEqual /// CHECK: Deoptimize /// CHECK-NOT: BoundsCheck @@ -151,12 +151,15 @@ public class Main { /// CHECK: BoundsCheck /// CHECK: ArraySet - static void constantIndexing2(int[] array) { + static void $opt$noinline$constantIndexing2(int[] array) { array[1] = 1; array[2] = 1; array[3] = 1; array[4] = 1; array[-1] = 1; + if (array[1] == 1) { + throw new Error(""); + } } @@ -655,10 +658,10 @@ public class Main { try { assertIsManaged(); // This will cause AIOOBE. - constantIndexing2(new int[3]); + $opt$noinline$constantIndexing2(new int[3]); } catch (ArrayIndexOutOfBoundsException e) { assertIsManaged(); // This is to ensure that single-frame deoptimization works. - // Will need to be updated if constantIndexing2 is inlined. + // Will need to be updated if $opt$noinline$constantIndexing2 is inlined. try { // This will cause AIOOBE. constantIndexingForward6(new int[3]); diff --git a/test/450-checker-types/src/Main.java b/test/450-checker-types/src/Main.java index fd4dd5ecbf..92cf807c2b 100644 --- a/test/450-checker-types/src/Main.java +++ b/test/450-checker-types/src/Main.java @@ -722,22 +722,6 @@ public class Main { } } - /// CHECK-START: void Main.testLoopPhisWithNullAndCrossUses(boolean) ssa_builder (after) - /// CHECK-DAG: <<Null:l\d+>> NullConstant - /// CHECK-DAG: <<PhiA:l\d+>> Phi [<<Null>>,<<PhiB:l\d+>>,<<PhiA>>] klass:java.lang.Object exact:false - /// CHECK-DAG: <<PhiB>> Phi [<<Null>>,<<PhiB>>,<<PhiA>>] klass:java.lang.Object exact:false - private void testLoopPhisWithNullAndCrossUses(boolean cond) { - Main a = null; - Main b = null; - while (a == null) { - if (cond) { - a = b; - } else { - b = a; - } - } - } - /// CHECK-START: java.lang.Object[] Main.testInstructionsWithUntypedParent() ssa_builder (after) /// CHECK-DAG: <<Null:l\d+>> NullConstant /// CHECK-DAG: <<LoopPhi:l\d+>> Phi [<<Null>>,<<Phi:l\d+>>] klass:java.lang.Object[] exact:true diff --git a/test/458-checker-instruction-simplification/src/Main.java b/test/458-checker-instruction-simplification/src/Main.java index 6151fc10f2..0fd7801d48 100644 --- a/test/458-checker-instruction-simplification/src/Main.java +++ b/test/458-checker-instruction-simplification/src/Main.java @@ -288,7 +288,7 @@ public class Main { /// CHECK-START: long Main.Mul1(long) instruction_simplifier (before) /// CHECK-DAG: <<Arg:j\d+>> ParameterValue /// CHECK-DAG: <<Const1:j\d+>> LongConstant 1 - /// CHECK-DAG: <<Mul:j\d+>> Mul [<<Arg>>,<<Const1>>] + /// CHECK-DAG: <<Mul:j\d+>> Mul [<<Const1>>,<<Arg>>] /// CHECK-DAG: Return [<<Mul>>] /// CHECK-START: long Main.Mul1(long) instruction_simplifier (after) @@ -323,7 +323,7 @@ public class Main { /// CHECK-START: long Main.MulPowerOfTwo128(long) instruction_simplifier (before) /// CHECK-DAG: <<Arg:j\d+>> ParameterValue /// CHECK-DAG: <<Const128:j\d+>> LongConstant 128 - /// CHECK-DAG: <<Mul:j\d+>> Mul [<<Arg>>,<<Const128>>] + /// CHECK-DAG: <<Mul:j\d+>> Mul [<<Const128>>,<<Arg>>] /// CHECK-DAG: Return [<<Mul>>] /// CHECK-START: long Main.MulPowerOfTwo128(long) instruction_simplifier (after) @@ -705,7 +705,7 @@ public class Main { /// CHECK-DAG: <<Arg:i\d+>> ParameterValue /// CHECK-DAG: <<Neg1:i\d+>> Neg [<<Arg>>] /// CHECK-DAG: <<Neg2:i\d+>> Neg [<<Neg1>>] - /// CHECK-DAG: <<Add:i\d+>> Add [<<Neg1>>,<<Neg2>>] + /// CHECK-DAG: <<Add:i\d+>> Add [<<Neg2>>,<<Neg1>>] /// CHECK-DAG: Return [<<Add>>] /// CHECK-START: int Main.NegNeg2(int) instruction_simplifier (after) @@ -841,13 +841,13 @@ public class Main { /// CHECK-DAG: <<ConstF1:i\d+>> IntConstant -1 /// CHECK-DAG: <<Xor1:i\d+>> Xor [<<Arg>>,<<ConstF1>>] /// CHECK-DAG: <<Xor2:i\d+>> Xor [<<Xor1>>,<<ConstF1>>] - /// CHECK-DAG: <<Add:i\d+>> Add [<<Xor1>>,<<Xor2>>] + /// CHECK-DAG: <<Add:i\d+>> Add [<<Xor2>>,<<Xor1>>] /// CHECK-DAG: Return [<<Add>>] /// CHECK-START: int Main.NotNot2(int) instruction_simplifier (after) /// CHECK-DAG: <<Arg:i\d+>> ParameterValue /// CHECK-DAG: <<Not:i\d+>> Not [<<Arg>>] - /// CHECK-DAG: <<Add:i\d+>> Add [<<Not>>,<<Arg>>] + /// CHECK-DAG: <<Add:i\d+>> Add [<<Arg>>,<<Not>>] /// CHECK-DAG: Return [<<Add>>] /// CHECK-START: int Main.NotNot2(int) instruction_simplifier (after) @@ -1005,7 +1005,7 @@ public class Main { /// CHECK-START: int Main.EqualFalseLhs(boolean) instruction_simplifier (before) /// CHECK-DAG: <<Arg:z\d+>> ParameterValue /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 - /// CHECK-DAG: <<Cond:z\d+>> Equal [<<Const0>>,<<Arg>>] + /// CHECK-DAG: <<Cond:z\d+>> Equal [<<Arg>>,<<Const0>>] /// CHECK-DAG: If [<<Cond>>] /// CHECK-START: int Main.EqualFalseLhs(boolean) instruction_simplifier (after) @@ -1064,7 +1064,7 @@ public class Main { /// CHECK-START: int Main.NotEqualFalseLhs(boolean) instruction_simplifier (before) /// CHECK-DAG: <<Arg:z\d+>> ParameterValue /// CHECK-DAG: <<Const0:i\d+>> IntConstant 0 - /// CHECK-DAG: <<Cond:z\d+>> NotEqual [<<Const0>>,<<Arg>>] + /// CHECK-DAG: <<Cond:z\d+>> NotEqual [<<Arg>>,<<Const0>>] /// CHECK-DAG: If [<<Cond>>] /// CHECK-START: int Main.NotEqualFalseLhs(boolean) instruction_simplifier (after) @@ -1234,7 +1234,7 @@ public class Main { /// CHECK-START: long Main.mulPow2Minus1(long) instruction_simplifier (before) /// CHECK-DAG: <<Arg:j\d+>> ParameterValue /// CHECK-DAG: <<Const31:j\d+>> LongConstant 31 - /// CHECK: Mul [<<Arg>>,<<Const31>>] + /// CHECK: Mul [<<Const31>>,<<Arg>>] /// CHECK-START: long Main.mulPow2Minus1(long) instruction_simplifier (after) /// CHECK-DAG: <<Arg:j\d+>> ParameterValue diff --git a/test/466-get-live-vreg/get_live_vreg_jni.cc b/test/466-get-live-vreg/get_live_vreg_jni.cc index 375a3fc824..4f89e9134b 100644 --- a/test/466-get-live-vreg/get_live_vreg_jni.cc +++ b/test/466-get-live-vreg/get_live_vreg_jni.cc @@ -40,15 +40,17 @@ class TestVisitor : public StackVisitor { uint32_t value = 0; CHECK(GetVReg(m, 0, kIntVReg, &value)); CHECK_EQ(value, 42u); - } else if (m_name.compare("testIntervalHole") == 0) { + } else if (m_name.compare("$opt$noinline$testIntervalHole") == 0) { + uint32_t number_of_dex_registers = m->GetCodeItem()->registers_size_; + uint32_t dex_register_of_first_parameter = number_of_dex_registers - 2; found_method_ = true; uint32_t value = 0; if (GetCurrentQuickFrame() != nullptr && GetCurrentOatQuickMethodHeader()->IsOptimized() && !Runtime::Current()->IsDebuggable()) { - CHECK_EQ(GetVReg(m, 0, kIntVReg, &value), false); + CHECK_EQ(GetVReg(m, dex_register_of_first_parameter, kIntVReg, &value), false); } else { - CHECK(GetVReg(m, 0, kIntVReg, &value)); + CHECK(GetVReg(m, dex_register_of_first_parameter, kIntVReg, &value)); CHECK_EQ(value, 1u); } } diff --git a/test/466-get-live-vreg/src/Main.java b/test/466-get-live-vreg/src/Main.java index d036a24459..19032601fa 100644 --- a/test/466-get-live-vreg/src/Main.java +++ b/test/466-get-live-vreg/src/Main.java @@ -31,7 +31,7 @@ public class Main { } } - static void testIntervalHole(int arg, boolean test) { + static void $opt$noinline$testIntervalHole(int arg, boolean test) { // Move the argument to callee save to ensure it is in // a readable register. moveArgToCalleeSave(); @@ -44,6 +44,9 @@ public class Main { // The environment use of `arg` should not make it live. doStaticNativeCallLiveVreg(); } + if (staticField1 == 2) { + throw new Error(""); + } } static native void doStaticNativeCallLiveVreg(); @@ -67,7 +70,7 @@ public class Main { static void testWrapperIntervalHole(int arg, boolean test) { try { Thread.sleep(0); - testIntervalHole(arg, test); + $opt$noinline$testIntervalHole(arg, test); } catch (Exception e) { throw new Error(e); } diff --git a/test/476-checker-ctor-memory-barrier/src/Main.java b/test/476-checker-ctor-memory-barrier/src/Main.java index 41bec057ee..c2a2a100fb 100644 --- a/test/476-checker-ctor-memory-barrier/src/Main.java +++ b/test/476-checker-ctor-memory-barrier/src/Main.java @@ -25,13 +25,14 @@ class ClassWithoutFinals { class ClassWithFinals { public final int x; public ClassWithFinals obj; + public static boolean doThrow = false; /// CHECK-START: void ClassWithFinals.<init>(boolean) register (after) /// CHECK: MemoryBarrier kind:StoreStore /// CHECK-NEXT: ReturnVoid public ClassWithFinals(boolean cond) { x = 0; - if (cond) { + if (doThrow) { // avoid inlining throw new RuntimeException(); } diff --git a/test/482-checker-loop-back-edge-use/src/Main.java b/test/482-checker-loop-back-edge-use/src/Main.java index 6b4da9de27..d0b33b9282 100644 --- a/test/482-checker-loop-back-edge-use/src/Main.java +++ b/test/482-checker-loop-back-edge-use/src/Main.java @@ -163,8 +163,8 @@ public class Main { /// CHECK: <<Arg:z\d+>> StaticFieldGet liveness:<<ArgLiv:\d+>> ranges:{[<<ArgLiv>>,<<ArgLoopUse:\d+>>)} uses:[<<ArgUse:\d+>>,<<ArgLoopUse>>] /// CHECK: If [<<Arg>>] liveness:<<IfLiv:\d+>> /// CHECK: Goto liveness:<<GotoLiv1:\d+>> - /// CHECK: Goto liveness:<<GotoLiv2:\d+>> /// CHECK: Exit + /// CHECK: Goto liveness:<<GotoLiv2:\d+>> /// CHECK-EVAL: <<IfLiv>> + 1 == <<ArgUse>> /// CHECK-EVAL: <<GotoLiv1>> < <<GotoLiv2>> /// CHECK-EVAL: <<GotoLiv1>> + 2 == <<ArgLoopUse>> diff --git a/test/529-checker-unresolved/expected.txt b/test/529-checker-unresolved/expected.txt index 1e7dbfed2e..1590a2a280 100644 --- a/test/529-checker-unresolved/expected.txt +++ b/test/529-checker-unresolved/expected.txt @@ -5,3 +5,6 @@ UnresolvedClass.interfaceMethod() UnresolvedClass.superMethod() instanceof ok checkcast ok +UnresolvedClass.directCall() +UnresolvedClass.directCall() +UnresolvedClass.directCall() diff --git a/test/529-checker-unresolved/src/Main.java b/test/529-checker-unresolved/src/Main.java index 5219c04c37..872fa6d0dd 100644 --- a/test/529-checker-unresolved/src/Main.java +++ b/test/529-checker-unresolved/src/Main.java @@ -138,6 +138,27 @@ public class Main extends UnresolvedSuperClass { callUnresolvedInstanceFieldAccess(c); testInstanceOf(m); testCheckCast(m); + testLicm(2); + } + + /// CHECK-START: void Main.testLicm(int) licm (before) + /// CHECK: <<Class:l\d+>> LoadClass loop:B2 + /// CHECK-NEXT: <<Clinit:l\d+>> ClinitCheck [<<Class>>] loop:B2 + /// CHECK-NEXT: <<New:l\d+>> NewInstance [<<Clinit>>,<<Method:[i|j]\d+>>] loop:B2 + /// CHECK-NEXT: InvokeUnresolved [<<New>>] loop:B2 + + /// CHECK-START: void Main.testLicm(int) licm (after) + /// CHECK: <<Class:l\d+>> LoadClass loop:none + /// CHECK-NEXT: <<Clinit:l\d+>> ClinitCheck [<<Class>>] loop:none + /// CHECK: <<New:l\d+>> NewInstance [<<Clinit>>,<<Method:[i|j]\d+>>] loop:B2 + /// CHECK-NEXT: InvokeUnresolved [<<New>>] loop:B2 + static public void testLicm(int count) { + // Test to make sure we keep the initialization check after loading an unresolved class. + UnresolvedClass c; + int i = 0; + do { + c = new UnresolvedClass(); + } while (i++ != count); } public static void expectEquals(byte expected, byte result) { diff --git a/test/552-checker-primitive-typeprop/smali/ArraySet.smali b/test/552-checker-primitive-typeprop/smali/ArraySet.smali new file mode 100644 index 0000000000..57d8606abb --- /dev/null +++ b/test/552-checker-primitive-typeprop/smali/ArraySet.smali @@ -0,0 +1,51 @@ +# Copyright (C) 2015 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public LArraySet; +.super Ljava/lang/Object; + +# Test ArraySet on int[] and float[] arrays. The input should be typed accordingly. +# Note that the input is a Phi to make sure primitive type propagation is re-run +# on the replaced inputs. + +## CHECK-START: void ArraySet.ambiguousSet(int[], float[], boolean) ssa_builder (after) +## CHECK-DAG: <<IntArray:l\d+>> ParameterValue klass:int[] +## CHECK-DAG: <<IntA:i\d+>> IntConstant 0 +## CHECK-DAG: <<IntB:i\d+>> IntConstant 1073741824 +## CHECK-DAG: <<IntPhi:i\d+>> Phi [<<IntA>>,<<IntB>>] reg:0 +## CHECK-DAG: <<IntNC:l\d+>> NullCheck [<<IntArray>>] +## CHECK-DAG: ArraySet [<<IntNC>>,{{i\d+}},<<IntPhi>>] + +## CHECK-DAG: <<FloatArray:l\d+>> ParameterValue klass:float[] +## CHECK-DAG: <<FloatA:f\d+>> FloatConstant 0 +## CHECK-DAG: <<FloatB:f\d+>> FloatConstant 2 +## CHECK-DAG: <<FloatPhi:f\d+>> Phi [<<FloatA>>,<<FloatB>>] reg:0 +## CHECK-DAG: <<FloatNC:l\d+>> NullCheck [<<FloatArray>>] +## CHECK-DAG: ArraySet [<<FloatNC>>,{{i\d+}},<<FloatPhi>>] + +.method public static ambiguousSet([I[FZ)V + .registers 8 + + const v0, 0x0 + if-eqz p2, :else + const v0, 0x40000000 + :else + # v0 = Phi [0.0f, 2.0f] + + const v1, 0x1 + aput v0, p0, v1 + aput v0, p1, v1 + + return-void +.end method diff --git a/test/554-checker-rtp-checkcast/expected.txt b/test/554-checker-rtp-checkcast/expected.txt new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/test/554-checker-rtp-checkcast/expected.txt diff --git a/test/554-checker-rtp-checkcast/info.txt b/test/554-checker-rtp-checkcast/info.txt new file mode 100644 index 0000000000..2a60971081 --- /dev/null +++ b/test/554-checker-rtp-checkcast/info.txt @@ -0,0 +1 @@ +Tests that phis with check-casted reference type inputs are typed. diff --git a/test/554-checker-rtp-checkcast/src/Main.java b/test/554-checker-rtp-checkcast/src/Main.java new file mode 100644 index 0000000000..607f71afb5 --- /dev/null +++ b/test/554-checker-rtp-checkcast/src/Main.java @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +public class Main { + + public static Object returnIntArray() { return new int[10]; } + + /// CHECK-START: void Main.boundTypeForMergingPhi() ssa_builder (after) + /// CHECK-DAG: ArraySet [<<NC:l\d+>>,{{i\d+}},{{i\d+}}] + /// CHECK-DAG: <<NC>> NullCheck [<<Phi:l\d+>>] + /// CHECK-DAG: <<Phi>> Phi klass:int[] + + public static void boundTypeForMergingPhi() { + int[] array = new int[20]; + if (array.hashCode() > 5) { + array = (int[]) returnIntArray(); + } + array[0] = 14; + } + + /// CHECK-START: void Main.boundTypeForLoopPhi() ssa_builder (after) + /// CHECK-DAG: ArraySet [<<NC:l\d+>>,{{i\d+}},{{i\d+}}] + /// CHECK-DAG: <<NC>> NullCheck [<<Phi:l\d+>>] + /// CHECK-DAG: <<Phi>> Phi klass:int[] + + public static void boundTypeForLoopPhi() { + int[] array = new int[20]; + int i = 0; + while (i < 4) { + ++i; + array[i] = i; + if (i > 2) { + array = (int[]) returnIntArray(); + } + } + array[0] = 14; + } + + /// CHECK-START: void Main.boundTypeForCatchPhi() ssa_builder (after) + /// CHECK-DAG: ArraySet [<<NC:l\d+>>,{{i\d+}},{{i\d+}}] + /// CHECK-DAG: <<NC>> NullCheck [<<Phi:l\d+>>] + /// CHECK-DAG: <<Phi>> Phi is_catch_phi:true klass:int[] + + public static void boundTypeForCatchPhi() { + int[] array1 = new int[20]; + int[] array2 = (int[]) returnIntArray(); + + int[] catch_phi = array1; + try { + System.nanoTime(); + catch_phi = array2; + System.nanoTime(); + } catch (Throwable ex) { + catch_phi[0] = 14; + } + } + + public static void main(String[] args) { } +} diff --git a/test/557-checker-ref-equivalent/expected.txt b/test/557-checker-ref-equivalent/expected.txt new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/test/557-checker-ref-equivalent/expected.txt diff --git a/test/557-checker-ref-equivalent/info.txt b/test/557-checker-ref-equivalent/info.txt new file mode 100644 index 0000000000..30e763b909 --- /dev/null +++ b/test/557-checker-ref-equivalent/info.txt @@ -0,0 +1 @@ +Checker tests to ensure we do not get reference and integer phi equivalents. diff --git a/test/557-checker-ref-equivalent/smali/TestCase.smali b/test/557-checker-ref-equivalent/smali/TestCase.smali new file mode 100644 index 0000000000..24729572c4 --- /dev/null +++ b/test/557-checker-ref-equivalent/smali/TestCase.smali @@ -0,0 +1,51 @@ +# Copyright (C) 2015 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public LTestCase; + +.super Ljava/lang/Object; + +## CHECK-START: void TestCase.testIntRefEquivalent() ssa_builder (after) +## CHECK-NOT: Phi +.method public static testIntRefEquivalent()V + .registers 4 + + const v0, 0 + + :try_start + invoke-static {v0,v0}, LTestCase;->foo(ILjava/lang/Object;)V + if-eqz v0, :end_if + const v0, 0 + :end_if + invoke-static {v0,v0}, LTestCase;->foo(ILjava/lang/Object;)V + goto :no_catch + :try_end + + .catch Ljava/lang/Exception; {:try_start .. :try_end} :exception + :exception + # We used to have a reference and an integer phi equivalents here, which + # broke the invariant of not sharing the same spill slot between those two + # types. + invoke-static {v0,v0}, LTestCase;->foo(ILjava/lang/Object;)V + + :no_catch + goto :try_start + return-void + +.end method + +.method public static foo(ILjava/lang/Object;)V + .registers 4 + return-void +.end method diff --git a/test/557-checker-ref-equivalent/src/Main.java b/test/557-checker-ref-equivalent/src/Main.java new file mode 100644 index 0000000000..a970af5cdf --- /dev/null +++ b/test/557-checker-ref-equivalent/src/Main.java @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +public class Main { + + /// CHECK-START: void Main.testRedundantPhiCycle(boolean) ssa_builder (after) + /// CHECK-NOT: Phi + private void testRedundantPhiCycle(boolean cond) { + Object o = null; + while (true) { + if (cond) { + o = null; + } + System.out.println(o); + } + } + + /// CHECK-START: void Main.testLoopPhisWithNullAndCrossUses(boolean) ssa_builder (after) + /// CHECK-NOT: Phi + private void testLoopPhisWithNullAndCrossUses(boolean cond) { + Main a = null; + Main b = null; + while (a == null) { + if (cond) { + a = b; + } else { + b = a; + } + } + } + + public static void main(String[] args) { + } +} diff --git a/test/559-checker-rtp-ifnotnull/expected.txt b/test/559-checker-rtp-ifnotnull/expected.txt new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/test/559-checker-rtp-ifnotnull/expected.txt diff --git a/test/559-checker-rtp-ifnotnull/info.txt b/test/559-checker-rtp-ifnotnull/info.txt new file mode 100644 index 0000000000..c08aa0c5c2 --- /dev/null +++ b/test/559-checker-rtp-ifnotnull/info.txt @@ -0,0 +1,2 @@ +Tests that BoundType created for if-not-null does not force untyped loop phis +to Object.
\ No newline at end of file diff --git a/test/559-checker-rtp-ifnotnull/src/Main.java b/test/559-checker-rtp-ifnotnull/src/Main.java new file mode 100644 index 0000000000..8f401292da --- /dev/null +++ b/test/559-checker-rtp-ifnotnull/src/Main.java @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +public class Main { + + /// CHECK-START: void Main.boundTypeForIfNotNull() ssa_builder (after) + /// CHECK-DAG: <<Method:(i|j)\d+>> CurrentMethod + /// CHECK-DAG: <<Null:l\d+>> NullConstant + /// CHECK-DAG: <<Cst5:i\d+>> IntConstant 5 + /// CHECK-DAG: <<Cst10:i\d+>> IntConstant 10 + + /// CHECK-DAG: InvokeVirtual [<<NullCheck:l\d+>>] + /// CHECK-DAG: <<NullCheck>> NullCheck [<<LoopPhi:l\d+>>] klass:int[] + /// CHECK-DAG: <<LoopPhi>> Phi [<<Null>>,<<MergePhi:l\d+>>] klass:int[] + + /// CHECK-DAG: <<BoundType:l\d+>> BoundType [<<LoopPhi>>] klass:int[] can_be_null:false + /// CHECK-DAG: <<NewArray10:l\d+>> NewArray [<<Cst10>>,<<Method>>] klass:int[] + /// CHECK-DAG: <<NotNullPhi:l\d+>> Phi [<<BoundType>>,<<NewArray10>>] klass:int[] + + /// CHECK-DAG: <<NewArray5:l\d+>> NewArray [<<Cst5>>,<<Method>>] klass:int[] + /// CHECK-DAG: <<MergePhi>> Phi [<<NewArray5>>,<<NotNullPhi>>] klass:int[] + + public static void boundTypeForIfNotNull() { + int[] array = null; + for (int i = -1; i < 10; ++i) { + if (array == null) { + array = new int[5]; + } else { + if (i == 5) { + array = new int[10]; + } + array[i] = i; + } + } + array.hashCode(); + } + + public static void main(String[] args) { } +} diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk index 81cfb7003a..010d0708c2 100644 --- a/test/Android.run-test.mk +++ b/test/Android.run-test.mk @@ -222,6 +222,7 @@ ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES), # Tests that are timing sensitive and flaky on heavily loaded systems. TEST_ART_TIMING_SENSITIVE_RUN_TESTS := \ + 002-sleep \ 053-wait-some \ 055-enum-performance \ 133-static-invoke-super @@ -266,6 +267,26 @@ endif TEST_ART_BROKEN_PREBUILD_RUN_TESTS := +# b/26483935 +TEST_ART_BROKEN_HOST_RUN_TESTS := \ + 132-daemon-locks-shutdown \ + +ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,host,$(RUN_TYPES),$(PREBUILD_TYPES), \ + $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \ + $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_PREBUILD_RUN_TESTS), $(ALL_ADDRESS_SIZES)) + +TEST_ART_BROKEN_HOST_RUN_TESTS := + +# 143-string-value tests for a LOG(E) tag, which is only supported on host. +TEST_ART_BROKEN_TARGET_RUN_TESTS := \ + 143-string-value \ + +ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(RUN_TYPES),$(PREBUILD_TYPES), \ + $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \ + $(IMAGE_TYPES), $(PICTEST_TYPES), $(DEBUGGABLE_TYPES), $(TEST_ART_BROKEN_PREBUILD_RUN_TESTS), $(ALL_ADDRESS_SIZES)) + +TEST_ART_BROKEN_TARGET_RUN_TESTS := + # 554-jit-profile-file is disabled because it needs a primary oat file to know what it should save. TEST_ART_BROKEN_NO_PREBUILD_TESTS := \ 117-nopatchoat \ @@ -587,8 +608,7 @@ endif TEST_ART_BROKEN_DEFAULT_HEAP_POISONING_RUN_TESTS := TEST_ART_BROKEN_OPTIMIZING_HEAP_POISONING_RUN_TESTS := - -# Tests broken by multi-image. b/26317072 +# Tests broken by multi-image. TEST_ART_BROKEN_MULTI_IMAGE_RUN_TESTS := \ 476-checker-ctor-memory-barrier \ 530-checker-lse diff --git a/test/etc/run-test-jar b/test/etc/run-test-jar index dacb7b9d3b..e004b6cff4 100755 --- a/test/etc/run-test-jar +++ b/test/etc/run-test-jar @@ -361,9 +361,7 @@ fi dex2oat_cmdline="true" mkdir_cmdline="mkdir -p ${DEX_LOCATION}/dalvik-cache/$ISA" -# TODO: allow app-image to work with multi-image. b/26317072 -app_image="" -# app_image="--app-image-file=$DEX_LOCATION/dalvik-cache/$ISA/$(echo $DEX_LOCATION/$TEST_NAME.jar/classes.art | cut -d/ -f 2- | sed "s:/:@:g")" +app_image="--app-image-file=$DEX_LOCATION/dalvik-cache/$ISA/$(echo $DEX_LOCATION/$TEST_NAME.jar/classes.art | cut -d/ -f 2- | sed "s:/:@:g")" if [ "$PREBUILD" = "y" ]; then dex2oat_cmdline="$INVOKE_WITH $ANDROID_ROOT/bin/dex2oatd \ diff --git a/test/run-test b/test/run-test index d07668726b..4f111d2dcf 100755 --- a/test/run-test +++ b/test/run-test @@ -462,7 +462,7 @@ fi if [ "$runtime" = "dalvik" ]; then if [ "$target_mode" = "no" ]; then framework="${ANDROID_PRODUCT_OUT}/system/framework" - bpath="${framework}/core-libart.jar:${framework}/core-oj.jar:${framework}/conscrypt.jar:${framework}/okhttp.jar:${framework}/core-junit.jar:${framework}/bouncycastle.jar:${framework}/ext.jar" + bpath="${framework}/core-libart.jar:${framework}/core-oj.jar:${framework}/conscrypt.jar:${framework}/okhttp.jar:${framework}/bouncycastle.jar:${framework}/ext.jar" run_args="${run_args} --boot -Xbootclasspath:${bpath}" else true # defaults to using target BOOTCLASSPATH @@ -509,7 +509,6 @@ if [ "$have_image" = "no" ]; then bpath="${bpath}:${framework}/core-oj${bpath_suffix}.jar" bpath="${bpath}:${framework}/conscrypt${bpath_suffix}.jar" bpath="${bpath}:${framework}/okhttp${bpath_suffix}.jar" - bpath="${bpath}:${framework}/core-junit${bpath_suffix}.jar" bpath="${bpath}:${framework}/bouncycastle${bpath_suffix}.jar" # Pass down the bootclasspath run_args="${run_args} --runtime-option -Xbootclasspath:${bpath}" @@ -684,11 +683,6 @@ function arch_supports_read_barrier() { # Tests named '<number>-checker-*' will also have their CFGs verified with # Checker when compiled with Optimizing on host. if [[ "$TEST_NAME" =~ ^[0-9]+-checker- ]]; then - # Build Checker DEX files without dx's optimizations so the input to dex2oat - # better resembles the Java source. We always build the DEX the same way, even - # if Checker is not invoked and the test only runs the program. - build_args="${build_args} --dx-option --no-optimize" - # Jack does not necessarily generate the same DEX output than dx. Because these tests depend # on a particular DEX output, keep building them with dx for now (b/19467889). USE_JACK="false" diff --git a/tools/ahat/README.txt b/tools/ahat/README.txt index adc4d03a7a..a3ecf86ab7 100644 --- a/tools/ahat/README.txt +++ b/tools/ahat/README.txt @@ -23,8 +23,6 @@ TODO: - Make sortable by clicking on headers. * For HeapTable with single heap shown, the heap name isn't centered? * Consistently document functions. - * Should help be part of an AhatHandler, that automatically gets the menu and - stylesheet link rather than duplicating that? * Show version number with --version. * Show somewhere where to send bugs. * Include a link to /objects in the overview and menu? @@ -79,6 +77,12 @@ Things to move to perflib: * Instance.isRoot and Instance.getRootTypes. Release History: + 0.3 Dec 15, 2015 + Fix page loading performance by showing a limited number of entries by default. + Fix mismatch between overview and "roots" totals. + Annotate root objects and show their types. + Annotate references with their referents. + 0.2 Oct 20, 2015 Take into account 'count' and 'offset' when displaying strings. diff --git a/tools/ahat/src/AhatHttpHandler.java b/tools/ahat/src/AhatHttpHandler.java index 178747c29a..1d05a66653 100644 --- a/tools/ahat/src/AhatHttpHandler.java +++ b/tools/ahat/src/AhatHttpHandler.java @@ -41,15 +41,7 @@ class AhatHttpHandler implements HttpHandler { PrintStream ps = new PrintStream(exchange.getResponseBody()); try { HtmlDoc doc = new HtmlDoc(ps, DocString.text("ahat"), DocString.uri("style.css")); - DocString menu = new DocString(); - menu.appendLink(DocString.uri("/"), DocString.text("overview")); - menu.append(" - "); - menu.appendLink(DocString.uri("rooted"), DocString.text("rooted")); - menu.append(" - "); - menu.appendLink(DocString.uri("sites"), DocString.text("allocations")); - menu.append(" - "); - menu.appendLink(DocString.uri("help"), DocString.text("help")); - doc.menu(menu); + doc.menu(Menu.getMenu()); mAhatHandler.handle(doc, new Query(exchange.getRequestURI())); doc.close(); } catch (RuntimeException e) { diff --git a/tools/ahat/src/HelpHandler.java b/tools/ahat/src/HelpHandler.java new file mode 100644 index 0000000000..8de3c85f5c --- /dev/null +++ b/tools/ahat/src/HelpHandler.java @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.android.ahat; + +import com.google.common.io.ByteStreams; +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import java.io.IOException; +import java.io.InputStream; +import java.io.PrintStream; + +/** + * HelpHandler. + * + * HttpHandler to show the help page. + */ +class HelpHandler implements HttpHandler { + + @Override + public void handle(HttpExchange exchange) throws IOException { + ClassLoader loader = HelpHandler.class.getClassLoader(); + exchange.getResponseHeaders().add("Content-Type", "text/html;charset=utf-8"); + exchange.sendResponseHeaders(200, 0); + PrintStream ps = new PrintStream(exchange.getResponseBody()); + HtmlDoc doc = new HtmlDoc(ps, DocString.text("ahat"), DocString.uri("style.css")); + doc.menu(Menu.getMenu()); + + InputStream is = loader.getResourceAsStream("help.html"); + if (is == null) { + ps.println("No help available."); + } else { + ByteStreams.copy(is, ps); + } + + doc.close(); + ps.close(); + } +} diff --git a/tools/ahat/src/Main.java b/tools/ahat/src/Main.java index ebd49d7e2c..091820f7fc 100644 --- a/tools/ahat/src/Main.java +++ b/tools/ahat/src/Main.java @@ -79,7 +79,7 @@ public class Main { server.createContext("/objects", new AhatHttpHandler(new ObjectsHandler(ahat))); server.createContext("/site", new AhatHttpHandler(new SiteHandler(ahat))); server.createContext("/bitmap", new BitmapHandler(ahat)); - server.createContext("/help", new StaticHandler("help.html", "text/html")); + server.createContext("/help", new HelpHandler()); server.createContext("/style.css", new StaticHandler("style.css", "text/css")); server.setExecutor(Executors.newFixedThreadPool(1)); System.out.println("Server started on localhost:" + port); diff --git a/tools/ahat/src/Menu.java b/tools/ahat/src/Menu.java new file mode 100644 index 0000000000..018e019503 --- /dev/null +++ b/tools/ahat/src/Menu.java @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2015 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package com.android.ahat; + +/** + * A menu showed in the UI that can be used to jump to common pages. + */ +class Menu { + private static DocString mMenu = + DocString.link(DocString.uri("/"), DocString.text("overview")) + .append(" - ") + .appendLink(DocString.uri("rooted"), DocString.text("rooted")) + .append(" - ") + .appendLink(DocString.uri("sites"), DocString.text("allocations")) + .append(" - ") + .appendLink(DocString.uri("help"), DocString.text("help")); + + /** + * Returns the menu as a DocString. + */ + public static DocString getMenu() { + return mMenu; + } +} diff --git a/tools/ahat/src/OverviewHandler.java b/tools/ahat/src/OverviewHandler.java index 0fe4fba716..720fcb42ff 100644 --- a/tools/ahat/src/OverviewHandler.java +++ b/tools/ahat/src/OverviewHandler.java @@ -48,14 +48,7 @@ class OverviewHandler implements AhatHandler { doc.section("Heap Sizes"); printHeapSizes(doc, query); - - DocString menu = new DocString(); - menu.appendLink(DocString.uri("rooted"), DocString.text("Rooted")); - menu.append(" - "); - menu.appendLink(DocString.uri("site"), DocString.text("Allocations")); - menu.append(" - "); - menu.appendLink(DocString.uri("help"), DocString.text("Help")); - doc.big(menu); + doc.big(Menu.getMenu()); } private void printHeapSizes(Doc doc, Query query) { diff --git a/tools/ahat/src/help.html b/tools/ahat/src/help.html index 92ec37d984..ff04ad2840 100644 --- a/tools/ahat/src/help.html +++ b/tools/ahat/src/help.html @@ -14,17 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. --> -<head> -<link rel="stylesheet" type="text/css" href="style.css"> -</head> - -<div class="menu"> - <a href="/">overview</a> - - <a href="rooted">rooted</a> - - <a href="sites">allocations</a> - - <a href="help">help</a> -</div> - <h1>Help</h1> <h2>Information shown by ahat:</h2> <ul> diff --git a/tools/ahat/src/manifest.txt b/tools/ahat/src/manifest.txt index 421de1715a..368b744f28 100644 --- a/tools/ahat/src/manifest.txt +++ b/tools/ahat/src/manifest.txt @@ -1,4 +1,4 @@ Name: ahat/ Implementation-Title: ahat -Implementation-Version: 0.3 +Implementation-Version: 0.4 Main-Class: com.android.ahat.Main diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh index 02787fba43..9e02ce2f90 100755 --- a/tools/buildbot-build.sh +++ b/tools/buildbot-build.sh @@ -48,7 +48,7 @@ done if [[ $mode == "host" ]]; then make_command="make $j_arg $showcommands build-art-host-tests $common_targets ${out_dir}/host/linux-x86/lib/libjavacoretests.so ${out_dir}/host/linux-x86/lib64/libjavacoretests.so" elif [[ $mode == "target" ]]; then - make_command="make $j_arg $showcommands build-art-target-tests $common_targets libjavacrypto libjavacoretests linker toybox toolbox sh ${out_dir}/host/linux-x86/bin/adb" + make_command="make $j_arg $showcommands build-art-target-tests $common_targets libjavacrypto libjavacoretests linker toybox toolbox sh ${out_dir}/host/linux-x86/bin/adb libstdc++" fi echo "Executing $make_command" diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt index 880be26792..6e1ec49b40 100644 --- a/tools/libcore_failures.txt +++ b/tools/libcore_failures.txt @@ -173,7 +173,8 @@ { description: "Assertion failing on the concurrent collector configuration.", result: EXEC_FAILED, - names: ["jsr166.LinkedTransferQueueTest#testTransfer2"], + names: ["jsr166.LinkedTransferQueueTest#testTransfer2", + "jsr166.LinkedTransferQueueTest#testWaitingConsumer"], bug: 25883050 }, { @@ -183,7 +184,6 @@ names: ["libcore.icu.RelativeDateTimeFormatterTest#test_getRelativeDateTimeStringDST", "libcore.java.lang.OldSystemTest#test_load", "libcore.java.text.NumberFormatTest#test_currencyWithPatternDigits", - "libcore.java.text.NumberFormatTest#test_customCurrencySymbol", "libcore.java.text.NumberFormatTest#test_setCurrency", "libcore.java.text.OldNumberFormatTest#test_getIntegerInstanceLjava_util_Locale", "libcore.java.util.CalendarTest#testAddOneDayAndOneDayOver30MinuteDstForwardAdds48Hours", @@ -192,8 +192,34 @@ "libcore.java.util.CalendarTest#test_nullLocale", "libcore.java.util.FormatterTest#test_numberLocalization", "libcore.java.util.FormatterTest#test_uppercaseConversions", - "libcore.java.util.TimeZoneTest#testTimeZoneIDLocalization", - "libcore.java.util.prefs.OldAbstractPreferencesTest#testClear", + "libcore.javax.crypto.CipherTest#testCipher_getInstance_WrongType_Failure", + "libcore.javax.crypto.CipherTest#testDecryptBufferZeroSize_mustDecodeToEmptyString", + "libcore.javax.security.auth.x500.X500PrincipalTest#testExceptionsForWrongDNs", + "org.apache.harmony.luni.tests.java.net.URLConnectionTest#test_getDate", + "org.apache.harmony.luni.tests.java.net.URLConnectionTest#test_getExpiration", + "org.apache.harmony.regex.tests.java.util.regex.PatternSyntaxExceptionTest#testPatternSyntaxException", + "org.apache.harmony.tests.java.lang.FloatTest#test_parseFloat_LString_Harmony6261", + "org.apache.harmony.tests.java.lang.ThreadTest#test_isDaemon", + "org.apache.harmony.tests.java.text.DecimalFormatSymbolsTest#test_setInternationalCurrencySymbolLjava_lang_String", + "org.apache.harmony.tests.java.text.DecimalFormatTest#testSerializationHarmonyRICompatible", + "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parseLjava_lang_StringLjava_text_ParsePosition", + "org.apache.harmony.tests.java.util.jar.JarFileTest#test_getInputStreamLjava_util_jar_JarEntry_subtest0", + "libcore.java.util.CalendarTest#test_clear_45877", + "org.apache.harmony.crypto.tests.javax.crypto.spec.SecretKeySpecTest#testGetFormat", + "org.apache.harmony.tests.java.util.TimerTaskTest#test_scheduledExecutionTime"] +}, +{ + description: "'cat -' does not work anymore", + result: EXEC_FAILED, + bug: 26395656, + modes: [device], + names: ["org.apache.harmony.tests.java.lang.ProcessTest#test_getOutputStream"] +}, +{ + description: "Missing resource in classpath", + result: EXEC_FAILED, + modes: [device], + names: ["libcore.java.util.prefs.OldAbstractPreferencesTest#testClear", "libcore.java.util.prefs.OldAbstractPreferencesTest#testExportNode", "libcore.java.util.prefs.OldAbstractPreferencesTest#testExportSubtree", "libcore.java.util.prefs.OldAbstractPreferencesTest#testGet", @@ -217,37 +243,11 @@ "libcore.java.util.prefs.OldAbstractPreferencesTest#testSync", "libcore.java.util.prefs.PreferencesTest#testHtmlEncoding", "libcore.java.util.prefs.PreferencesTest#testPreferencesClobbersExistingFiles", - "libcore.javax.crypto.CipherTest#testCipher_getInstance_WrongType_Failure", - "libcore.javax.crypto.CipherTest#testDecryptBufferZeroSize_mustDecodeToEmptyString", - "libcore.javax.security.auth.x500.X500PrincipalTest#testExceptionsForWrongDNs", - "org.apache.harmony.luni.tests.java.net.URLConnectionTest#test_getDate", - "org.apache.harmony.luni.tests.java.net.URLConnectionTest#test_getExpiration", - "org.apache.harmony.regex.tests.java.util.regex.PatternSyntaxExceptionTest#testPatternSyntaxException", - "org.apache.harmony.tests.java.lang.Character_UnicodeBlockTest#test_forNameLjava_lang_StringExceptions", - "org.apache.harmony.tests.java.lang.FloatTest#test_parseFloat_LString_Harmony6261", - "org.apache.harmony.tests.java.lang.ThreadTest#test_isDaemon", - "org.apache.harmony.tests.java.text.DecimalFormatSymbolsTest#test_setInternationalCurrencySymbolLjava_lang_String", - "org.apache.harmony.tests.java.text.DecimalFormatTest#testSerializationHarmonyRICompatible", - "org.apache.harmony.tests.java.text.SimpleDateFormatTest#test_parseLjava_lang_StringLjava_text_ParsePosition", - "org.apache.harmony.tests.java.util.CalendarTest#test_getDisplayNamesIILjava_util_Locale", "org.apache.harmony.tests.java.util.PropertiesTest#test_storeToXMLLjava_io_OutputStreamLjava_lang_StringLjava_lang_String", - "org.apache.harmony.tests.java.util.jar.JarFileTest#test_getInputStreamLjava_util_jar_JarEntry_subtest0", "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testExportNode", "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testExportSubtree", "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testFlush", "org.apache.harmony.tests.java.util.prefs.AbstractPreferencesTest#testSync", - "org.apache.harmony.tests.java.util.prefs.FilePreferencesImplTest#testPutGet", - "libcore.java.util.CalendarTest#test_clear_45877", - "org.apache.harmony.crypto.tests.javax.crypto.spec.SecretKeySpecTest#testGetFormat", - "org.apache.harmony.tests.java.util.TimerTaskTest#test_scheduledExecutionTime"] -}, -{ - description: "Failing tests after enso move, only on arm32", - result: EXEC_FAILED, - bug: 26353151, - modes_variants: [[device, X32]], - names: ["org.apache.harmony.tests.java.text.DecimalFormatTest#test_formatDouble_withFieldPosition", - "org.apache.harmony.tests.java.text.DecimalFormatTest#test_formatToCharacterIterator_original"] + "org.apache.harmony.tests.java.util.prefs.FilePreferencesImplTest#testPutGet"] } - ] |