diff options
137 files changed, 2631 insertions, 1099 deletions
diff --git a/build/Android.gtest.mk b/build/Android.gtest.mk index cd463ecc7c..3b459c3ad1 100644 --- a/build/Android.gtest.mk +++ b/build/Android.gtest.mk @@ -650,11 +650,11 @@ endef # define-art-gtest ifeq ($(ART_BUILD_TARGET),true) $(foreach file,$(RUNTIME_GTEST_TARGET_SRC_FILES), $(eval $(call define-art-gtest,target,$(file),,libbacktrace))) - $(foreach file,$(COMPILER_GTEST_TARGET_SRC_FILES), $(eval $(call define-art-gtest,target,$(file),art/compiler,libartd-compiler libbacktrace))) + $(foreach file,$(COMPILER_GTEST_TARGET_SRC_FILES), $(eval $(call define-art-gtest,target,$(file),art/compiler,libartd-compiler libbacktrace libnativeloader))) endif ifeq ($(ART_BUILD_HOST),true) $(foreach file,$(RUNTIME_GTEST_HOST_SRC_FILES), $(eval $(call define-art-gtest,host,$(file),,libbacktrace))) - $(foreach file,$(COMPILER_GTEST_HOST_SRC_FILES), $(eval $(call define-art-gtest,host,$(file),art/compiler,libartd-compiler libbacktrace))) + $(foreach file,$(COMPILER_GTEST_HOST_SRC_FILES), $(eval $(call define-art-gtest,host,$(file),art/compiler,libartd-compiler libbacktrace libnativeloader))) endif # Used outside the art project to get a list of the current tests diff --git a/cmdline/cmdline_parser_test.cc b/cmdline/cmdline_parser_test.cc index 81b854e9c3..7c53e01c47 100644 --- a/cmdline/cmdline_parser_test.cc +++ b/cmdline/cmdline_parser_test.cc @@ -461,8 +461,8 @@ TEST_F(CmdlineParserTest, TestJitOptions) { * Test successes */ { - EXPECT_SINGLE_PARSE_VALUE(true, "-Xusejit:true", M::UseJIT); - EXPECT_SINGLE_PARSE_VALUE(false, "-Xusejit:false", M::UseJIT); + EXPECT_SINGLE_PARSE_VALUE(true, "-Xusejit:true", M::UseJitCompilation); + EXPECT_SINGLE_PARSE_VALUE(false, "-Xusejit:false", M::UseJitCompilation); } { EXPECT_SINGLE_PARSE_VALUE( diff --git a/cmdline/cmdline_types.h b/cmdline/cmdline_types.h index c0a00cce70..4797540c35 100644 --- a/cmdline/cmdline_types.h +++ b/cmdline/cmdline_types.h @@ -620,6 +620,8 @@ struct CmdlineType<LogVerbosity> : CmdlineTypeParser<LogVerbosity> { log_verbosity.verifier = true; } else if (verbose_options[j] == "image") { log_verbosity.image = true; + } else if (verbose_options[j] == "systrace-locks") { + log_verbosity.systrace_lock_logging = true; } else { return Result::Usage(std::string("Unknown -verbose option ") + verbose_options[j]); } diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc index f75a252df2..bf29e1c31d 100644 --- a/compiler/common_compiler_test.cc +++ b/compiler/common_compiler_test.cc @@ -180,6 +180,7 @@ void CommonCompilerTest::CreateCompilerDriver(Compiler::Kind kind, isa, instruction_set_features_.get(), /* boot_image */ true, + /* app_image */ false, GetImageClasses(), GetCompiledClasses(), GetCompiledMethods(), diff --git a/compiler/dex/verification_results.cc b/compiler/dex/verification_results.cc index 1491a183f1..606302bd78 100644 --- a/compiler/dex/verification_results.cc +++ b/compiler/dex/verification_results.cc @@ -60,7 +60,7 @@ void VerificationResults::ProcessVerifiedMethod(verifier::MethodVerifier* method // TODO: Investigate why are we doing the work again for this method and try to avoid it. LOG(WARNING) << "Method processed more than once: " << PrettyMethod(ref.dex_method_index, *ref.dex_file); - if (!Runtime::Current()->UseJit()) { + if (!Runtime::Current()->UseJitCompilation()) { DCHECK_EQ(it->second->GetDevirtMap().size(), verified_method->GetDevirtMap().size()); DCHECK_EQ(it->second->GetSafeCastSet().size(), verified_method->GetSafeCastSet().size()); } diff --git a/compiler/dex/verified_method.cc b/compiler/dex/verified_method.cc index 5c0253c29e..bace014713 100644 --- a/compiler/dex/verified_method.cc +++ b/compiler/dex/verified_method.cc @@ -54,7 +54,8 @@ const VerifiedMethod* VerifiedMethod::Create(verifier::MethodVerifier* method_ve } // Only need dequicken info for JIT so far. - if (Runtime::Current()->UseJit() && !verified_method->GenerateDequickenMap(method_verifier)) { + if (Runtime::Current()->UseJitCompilation() && + !verified_method->GenerateDequickenMap(method_verifier)) { return nullptr; } } @@ -72,7 +73,7 @@ const MethodReference* VerifiedMethod::GetDevirtTarget(uint32_t dex_pc) const { } const DexFileReference* VerifiedMethod::GetDequickenIndex(uint32_t dex_pc) const { - DCHECK(Runtime::Current()->UseJit()); + DCHECK(Runtime::Current()->UseJitCompilation()); auto it = dequicken_map_.find(dex_pc); return (it != dequicken_map_.end()) ? &it->second : nullptr; } diff --git a/compiler/driver/compiled_method_storage_test.cc b/compiler/driver/compiled_method_storage_test.cc index 9e0c22c68c..6863f42d11 100644 --- a/compiler/driver/compiled_method_storage_test.cc +++ b/compiler/driver/compiled_method_storage_test.cc @@ -36,6 +36,7 @@ TEST(CompiledMethodStorage, Deduplicate) { /* instruction_set_ */ kNone, /* instruction_set_features */ nullptr, /* boot_image */ false, + /* app_image */ false, /* image_classes */ nullptr, /* compiled_classes */ nullptr, /* compiled_methods */ nullptr, diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index be82956e76..1ab1d31f09 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -341,6 +341,7 @@ CompilerDriver::CompilerDriver( InstructionSet instruction_set, const InstructionSetFeatures* instruction_set_features, bool boot_image, + bool app_image, std::unordered_set<std::string>* image_classes, std::unordered_set<std::string>* compiled_classes, std::unordered_set<std::string>* compiled_methods, @@ -363,6 +364,7 @@ CompilerDriver::CompilerDriver( compiled_methods_(MethodTable::key_compare()), non_relative_linker_patch_count_(0u), boot_image_(boot_image), + app_image_(app_image), image_classes_(image_classes), classes_to_compile_(compiled_classes), methods_to_compile_(compiled_methods), @@ -473,7 +475,7 @@ static optimizer::DexToDexCompilationLevel GetDexToDexCompilationLevel( const DexFile& dex_file, const DexFile::ClassDef& class_def) SHARED_REQUIRES(Locks::mutator_lock_) { auto* const runtime = Runtime::Current(); - if (runtime->UseJit() || driver.GetCompilerOptions().VerifyAtRuntime()) { + if (runtime->UseJitCompilation() || driver.GetCompilerOptions().VerifyAtRuntime()) { // Verify at runtime shouldn't dex to dex since we didn't resolve of verify. return optimizer::DexToDexCompilationLevel::kDontDexToDexCompile; } @@ -945,7 +947,7 @@ bool CompilerDriver::ShouldVerifyClassBasedOnProfile(const DexFile& dex_file, class ResolveCatchBlockExceptionsClassVisitor : public ClassVisitor { public: - ResolveCatchBlockExceptionsClassVisitor( + explicit ResolveCatchBlockExceptionsClassVisitor( std::set<std::pair<uint16_t, const DexFile*>>& exceptions_to_resolve) : exceptions_to_resolve_(exceptions_to_resolve) {} @@ -1268,7 +1270,7 @@ void CompilerDriver::UpdateImageClasses(TimingLogger* timings) { bool CompilerDriver::CanAssumeClassIsLoaded(mirror::Class* klass) { Runtime* runtime = Runtime::Current(); if (!runtime->IsAotCompiler()) { - DCHECK(runtime->UseJit()); + DCHECK(runtime->UseJitCompilation()); // Having the klass reference here implies that the klass is already loaded. return true; } @@ -1289,7 +1291,7 @@ bool CompilerDriver::CanAssumeTypeIsPresentInDexCache(Handle<mirror::DexCache> d if ((IsBootImage() && IsImageClass(dex_cache->GetDexFile()->StringDataByIdx( dex_cache->GetDexFile()->GetTypeId(type_idx).descriptor_idx_))) || - Runtime::Current()->UseJit()) { + Runtime::Current()->UseJitCompilation()) { mirror::Class* resolved_class = dex_cache->GetResolvedType(type_idx); result = (resolved_class != nullptr); } @@ -1307,7 +1309,7 @@ bool CompilerDriver::CanAssumeStringIsPresentInDexCache(const DexFile& dex_file, // See also Compiler::ResolveDexFile bool result = false; - if (IsBootImage() || Runtime::Current()->UseJit()) { + if (IsBootImage() || Runtime::Current()->UseJitCompilation()) { ScopedObjectAccess soa(Thread::Current()); StackHandleScope<1> hs(soa.Self()); ClassLinker* const class_linker = Runtime::Current()->GetClassLinker(); @@ -1319,7 +1321,7 @@ bool CompilerDriver::CanAssumeStringIsPresentInDexCache(const DexFile& dex_file, result = true; } else { // Just check whether the dex cache already has the string. - DCHECK(Runtime::Current()->UseJit()); + DCHECK(Runtime::Current()->UseJitCompilation()); result = (dex_cache->GetResolvedString(string_idx) != nullptr); } } @@ -1427,7 +1429,7 @@ bool CompilerDriver::CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_i } else { return false; } - } else if (runtime->UseJit() && !heap->IsMovableObject(resolved_class)) { + } else if (runtime->UseJitCompilation() && !heap->IsMovableObject(resolved_class)) { *is_type_initialized = resolved_class->IsInitialized(); // If the class may move around, then don't embed it as a direct pointer. *use_direct_type_ptr = true; @@ -1604,7 +1606,7 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType } } } - if (runtime->UseJit()) { + if (runtime->UseJitCompilation()) { // If we are the JIT, then don't allow a direct call to the interpreter bridge since this will // never be updated even after we compile the method. if (cl->IsQuickToInterpreterBridge( @@ -1636,7 +1638,7 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType bool must_use_direct_pointers = false; mirror::DexCache* dex_cache = declaring_class->GetDexCache(); if (target_method->dex_file == dex_cache->GetDexFile() && - !(runtime->UseJit() && dex_cache->GetResolvedMethod( + !(runtime->UseJitCompilation() && dex_cache->GetResolvedMethod( method->GetDexMethodIndex(), pointer_size) == nullptr)) { target_method->dex_method_index = method->GetDexMethodIndex(); } else { @@ -1673,7 +1675,7 @@ void CompilerDriver::GetCodeAndMethodForDirectCall(InvokeType* type, InvokeType break; } } - if (method_in_image || compiling_boot || runtime->UseJit()) { + if (method_in_image || compiling_boot || runtime->UseJitCompilation()) { // We know we must be able to get to the method in the image, so use that pointer. // In the case where we are the JIT, we can always use direct pointers since we know where // the method and its code are / will be. We don't sharpen to interpreter bridge since we @@ -2440,9 +2442,12 @@ void CompilerDriver::InitializeClasses(jobject jni_class_loader, context.ForAll(0, dex_file.NumClassDefs(), &visitor, init_thread_count); } -class InitializeArrayClassVisitor : public ClassVisitor { +class InitializeArrayClassesAndCreateConflictTablesVisitor : public ClassVisitor { public: virtual bool operator()(mirror::Class* klass) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + if (Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(klass)) { + return true; + } if (klass->IsArrayClass()) { StackHandleScope<1> hs(Thread::Current()); Runtime::Current()->GetClassLinker()->EnsureInitialized(hs.Self(), @@ -2450,6 +2455,10 @@ class InitializeArrayClassVisitor : public ClassVisitor { true, true); } + // Create the conflict tables. + if (!klass->IsTemp() && klass->ShouldHaveEmbeddedImtAndVTable()) { + Runtime::Current()->GetClassLinker()->FillIMTAndConflictTables(klass); + } return true; } }; @@ -2462,13 +2471,15 @@ void CompilerDriver::InitializeClasses(jobject class_loader, CHECK(dex_file != nullptr); InitializeClasses(class_loader, *dex_file, dex_files, timings); } - { + if (boot_image_ || app_image_) { // Make sure that we call EnsureIntiailized on all the array classes to call // SetVerificationAttempted so that the access flags are set. If we do not do this they get // changed at runtime resulting in more dirty image pages. + // Also create conflict tables. + // Only useful if we are compiling an image (image_classes_ is not null). ScopedObjectAccess soa(Thread::Current()); - InitializeArrayClassVisitor visitor; - Runtime::Current()->GetClassLinker()->VisitClasses(&visitor); + InitializeArrayClassesAndCreateConflictTablesVisitor visitor; + Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(&visitor); } if (IsBootImage()) { // Prune garbage objects created during aborted transactions. diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h index d63dffa49a..19a1ecc494 100644 --- a/compiler/driver/compiler_driver.h +++ b/compiler/driver/compiler_driver.h @@ -92,6 +92,7 @@ class CompilerDriver { InstructionSet instruction_set, const InstructionSetFeatures* instruction_set_features, bool boot_image, + bool app_image, std::unordered_set<std::string>* image_classes, std::unordered_set<std::string>* compiled_classes, std::unordered_set<std::string>* compiled_methods, @@ -652,6 +653,7 @@ class CompilerDriver { size_t non_relative_linker_patch_count_ GUARDED_BY(compiled_methods_lock_); const bool boot_image_; + const bool app_image_; // If image_ is true, specifies the classes that will be included in the image. // Note if image_classes_ is null, all classes are included in the image. diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index 8bb462c667..00ff522c9a 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -653,8 +653,7 @@ bool ImageWriter::AllocMemory() { for (ImageInfo& image_info : image_infos_) { ImageSection unused_sections[ImageHeader::kSectionCount]; const size_t length = RoundUp( - image_info.CreateImageSections(target_ptr_size_, unused_sections), - kPageSize); + image_info.CreateImageSections(unused_sections), kPageSize); std::string error_msg; image_info.image_.reset(MemMap::MapAnonymous("image writer image", @@ -1214,6 +1213,20 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) { AssignMethodOffset(&m, type, oat_index); } (any_dirty ? dirty_methods_ : clean_methods_) += num_methods; + + // Assign offsets for all runtime methods in the IMT since these may hold conflict tables + // live. + if (as_klass->ShouldHaveEmbeddedImtAndVTable()) { + for (size_t i = 0; i < mirror::Class::kImtSize; ++i) { + ArtMethod* imt_method = as_klass->GetEmbeddedImTableEntry(i, target_ptr_size_); + DCHECK(imt_method != nullptr); + if (imt_method->IsRuntimeMethod() && + !IsInBootImage(imt_method) && + !NativeRelocationAssigned(imt_method)) { + AssignMethodOffset(imt_method, kNativeObjectRelocationTypeRuntimeMethod, oat_index); + } + } + } } } else if (h_obj->IsObjectArray()) { // Walk elements of an object array. @@ -1237,13 +1250,37 @@ void ImageWriter::WalkFieldsInOrder(mirror::Object* obj) { } } +bool ImageWriter::NativeRelocationAssigned(void* ptr) const { + return native_object_relocations_.find(ptr) != native_object_relocations_.end(); +} + +void ImageWriter::TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index) { + // No offset, or already assigned. + if (table == nullptr || NativeRelocationAssigned(table)) { + return; + } + CHECK(!IsInBootImage(table)); + // If the method is a conflict method we also want to assign the conflict table offset. + ImageInfo& image_info = GetImageInfo(oat_index); + const size_t size = table->ComputeSize(target_ptr_size_); + native_object_relocations_.emplace( + table, + NativeObjectRelocation { + oat_index, + image_info.bin_slot_sizes_[kBinIMTConflictTable], + kNativeObjectRelocationTypeIMTConflictTable}); + image_info.bin_slot_sizes_[kBinIMTConflictTable] += size; +} + void ImageWriter::AssignMethodOffset(ArtMethod* method, NativeObjectRelocationType type, size_t oat_index) { DCHECK(!IsInBootImage(method)); - auto it = native_object_relocations_.find(method); - CHECK(it == native_object_relocations_.end()) << "Method " << method << " already assigned " + CHECK(!NativeRelocationAssigned(method)) << "Method " << method << " already assigned " << PrettyMethod(method); + if (method->IsRuntimeMethod()) { + TryAssignConflictTableOffset(method->GetImtConflictTable(target_ptr_size_), oat_index); + } ImageInfo& image_info = GetImageInfo(oat_index); size_t& offset = image_info.bin_slot_sizes_[BinTypeForNativeRelocationType(type)]; native_object_relocations_.emplace(method, NativeObjectRelocation { oat_index, offset, type }); @@ -1292,8 +1329,7 @@ void ImageWriter::CalculateNewObjectOffsets() { // know where image_roots is going to end up image_objects_offset_begin_ = RoundUp(sizeof(ImageHeader), kObjectAlignment); // 64-bit-alignment - // Clear any pre-existing monitors which may have been in the monitor words, assign bin slots. - heap->VisitObjects(WalkFieldsCallback, this); + const size_t method_alignment = ArtMethod::Alignment(target_ptr_size_); // Write the image runtime methods. image_methods_[ImageHeader::kResolutionMethod] = runtime->GetResolutionMethod(); image_methods_[ImageHeader::kImtConflictMethod] = runtime->GetImtConflictMethod(); @@ -1303,31 +1339,19 @@ void ImageWriter::CalculateNewObjectOffsets() { runtime->GetCalleeSaveMethod(Runtime::kRefsOnly); image_methods_[ImageHeader::kRefsAndArgsSaveMethod] = runtime->GetCalleeSaveMethod(Runtime::kRefsAndArgs); - - // Add room for fake length prefixed array for holding the image methods. - const auto image_method_type = kNativeObjectRelocationTypeArtMethodArrayClean; - auto it = native_object_relocations_.find(&image_method_array_); - CHECK(it == native_object_relocations_.end()); - ImageInfo& default_image_info = GetImageInfo(GetDefaultOatIndex()); - size_t& offset = - default_image_info.bin_slot_sizes_[BinTypeForNativeRelocationType(image_method_type)]; - if (!compile_app_image_) { - native_object_relocations_.emplace(&image_method_array_, - NativeObjectRelocation { GetDefaultOatIndex(), offset, image_method_type }); - } - size_t method_alignment = ArtMethod::Alignment(target_ptr_size_); - const size_t array_size = LengthPrefixedArray<ArtMethod>::ComputeSize( - 0, ArtMethod::Size(target_ptr_size_), method_alignment); - CHECK_ALIGNED_PARAM(array_size, method_alignment); - offset += array_size; + // Visit image methods first to have the main runtime methods in the first image. for (auto* m : image_methods_) { CHECK(m != nullptr); CHECK(m->IsRuntimeMethod()); DCHECK_EQ(compile_app_image_, IsInBootImage(m)) << "Trampolines should be in boot image"; if (!IsInBootImage(m)) { - AssignMethodOffset(m, kNativeObjectRelocationTypeArtMethodClean, GetDefaultOatIndex()); + AssignMethodOffset(m, kNativeObjectRelocationTypeRuntimeMethod, GetDefaultOatIndex()); } } + + // Clear any pre-existing monitors which may have been in the monitor words, assign bin slots. + heap->VisitObjects(WalkFieldsCallback, this); + // Calculate size of the dex cache arrays slot and prepare offsets. PrepareDexCacheArraySlots(); @@ -1346,15 +1370,22 @@ void ImageWriter::CalculateNewObjectOffsets() { for (ImageInfo& image_info : image_infos_) { size_t bin_offset = image_objects_offset_begin_; for (size_t i = 0; i != kBinSize; ++i) { + switch (i) { + case kBinArtMethodClean: + case kBinArtMethodDirty: { + bin_offset = RoundUp(bin_offset, method_alignment); + break; + } + case kBinIMTConflictTable: { + bin_offset = RoundUp(bin_offset, target_ptr_size_); + break; + } + default: { + // Normal alignment. + } + } image_info.bin_slot_offsets_[i] = bin_offset; bin_offset += image_info.bin_slot_sizes_[i]; - if (i == kBinArtField) { - static_assert(kBinArtField + 1 == kBinArtMethodClean, "Methods follow fields."); - static_assert(alignof(ArtField) == 4u, "ArtField alignment is 4."); - DCHECK_ALIGNED(bin_offset, 4u); - DCHECK(method_alignment == 4u || method_alignment == 8u); - bin_offset = RoundUp(bin_offset, method_alignment); - } } // NOTE: There may be additional padding between the bin slots and the intern table. DCHECK_EQ(image_info.image_end_, @@ -1367,9 +1398,7 @@ void ImageWriter::CalculateNewObjectOffsets() { image_info.image_begin_ = global_image_begin_ + image_offset; image_info.image_offset_ = image_offset; ImageSection unused_sections[ImageHeader::kSectionCount]; - image_info.image_size_ = RoundUp( - image_info.CreateImageSections(target_ptr_size_, unused_sections), - kPageSize); + image_info.image_size_ = RoundUp(image_info.CreateImageSections(unused_sections), kPageSize); // There should be no gaps until the next image. image_offset += image_info.image_size_; } @@ -1396,42 +1425,52 @@ void ImageWriter::CalculateNewObjectOffsets() { // Note that image_info.image_end_ is left at end of used mirror object section. } -size_t ImageWriter::ImageInfo::CreateImageSections(size_t target_ptr_size, - ImageSection* out_sections) const { +size_t ImageWriter::ImageInfo::CreateImageSections(ImageSection* out_sections) const { DCHECK(out_sections != nullptr); + + // Do not round up any sections here that are represented by the bins since it will break + // offsets. + // Objects section - auto* objects_section = &out_sections[ImageHeader::kSectionObjects]; + ImageSection* objects_section = &out_sections[ImageHeader::kSectionObjects]; *objects_section = ImageSection(0u, image_end_); - size_t cur_pos = objects_section->End(); + // Add field section. - auto* field_section = &out_sections[ImageHeader::kSectionArtFields]; - *field_section = ImageSection(cur_pos, bin_slot_sizes_[kBinArtField]); + ImageSection* field_section = &out_sections[ImageHeader::kSectionArtFields]; + *field_section = ImageSection(bin_slot_offsets_[kBinArtField], bin_slot_sizes_[kBinArtField]); CHECK_EQ(bin_slot_offsets_[kBinArtField], field_section->Offset()); - cur_pos = field_section->End(); - // Round up to the alignment the required by the method section. - cur_pos = RoundUp(cur_pos, ArtMethod::Alignment(target_ptr_size)); + // Add method section. - auto* methods_section = &out_sections[ImageHeader::kSectionArtMethods]; - *methods_section = ImageSection(cur_pos, - bin_slot_sizes_[kBinArtMethodClean] + - bin_slot_sizes_[kBinArtMethodDirty]); - CHECK_EQ(bin_slot_offsets_[kBinArtMethodClean], methods_section->Offset()); - cur_pos = methods_section->End(); + ImageSection* methods_section = &out_sections[ImageHeader::kSectionArtMethods]; + *methods_section = ImageSection( + bin_slot_offsets_[kBinArtMethodClean], + bin_slot_sizes_[kBinArtMethodClean] + bin_slot_sizes_[kBinArtMethodDirty]); + + // Conflict tables section. + ImageSection* imt_conflict_tables_section = &out_sections[ImageHeader::kSectionIMTConflictTables]; + *imt_conflict_tables_section = ImageSection(bin_slot_offsets_[kBinIMTConflictTable], + bin_slot_sizes_[kBinIMTConflictTable]); + + // Runtime methods section. + ImageSection* runtime_methods_section = &out_sections[ImageHeader::kSectionRuntimeMethods]; + *runtime_methods_section = ImageSection(bin_slot_offsets_[kBinRuntimeMethod], + bin_slot_sizes_[kBinRuntimeMethod]); + // Add dex cache arrays section. - auto* dex_cache_arrays_section = &out_sections[ImageHeader::kSectionDexCacheArrays]; - *dex_cache_arrays_section = ImageSection(cur_pos, bin_slot_sizes_[kBinDexCacheArray]); - CHECK_EQ(bin_slot_offsets_[kBinDexCacheArray], dex_cache_arrays_section->Offset()); - cur_pos = dex_cache_arrays_section->End(); + ImageSection* dex_cache_arrays_section = &out_sections[ImageHeader::kSectionDexCacheArrays]; + *dex_cache_arrays_section = ImageSection(bin_slot_offsets_[kBinDexCacheArray], + bin_slot_sizes_[kBinDexCacheArray]); + // Round up to the alignment the string table expects. See HashSet::WriteToMemory. - cur_pos = RoundUp(cur_pos, sizeof(uint64_t)); + size_t cur_pos = RoundUp(dex_cache_arrays_section->End(), sizeof(uint64_t)); // Calculate the size of the interned strings. - auto* interned_strings_section = &out_sections[ImageHeader::kSectionInternedStrings]; + ImageSection* interned_strings_section = &out_sections[ImageHeader::kSectionInternedStrings]; *interned_strings_section = ImageSection(cur_pos, intern_table_bytes_); cur_pos = interned_strings_section->End(); // Round up to the alignment the class table expects. See HashSet::WriteToMemory. cur_pos = RoundUp(cur_pos, sizeof(uint64_t)); // Calculate the size of the class table section. - auto* class_table_section = &out_sections[ImageHeader::kSectionClassTable]; + ImageSection* class_table_section = &out_sections[ImageHeader::kSectionClassTable]; *class_table_section = ImageSection(cur_pos, class_table_bytes_); cur_pos = class_table_section->End(); // Image end goes right before the start of the image bitmap. @@ -1446,7 +1485,7 @@ void ImageWriter::CreateHeader(size_t oat_index) { // Create the image sections. ImageSection sections[ImageHeader::kSectionCount]; - const size_t image_end = image_info.CreateImageSections(target_ptr_size_, sections); + const size_t image_end = image_info.CreateImageSections(sections); // Finally bitmap section. const size_t bitmap_bytes = image_info.image_bitmap_->Size(); @@ -1531,8 +1570,20 @@ class FixupRootVisitor : public RootVisitor { ImageWriter* const image_writer_; }; +void ImageWriter::CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy) { + const size_t count = orig->NumEntries(target_ptr_size_); + for (size_t i = 0; i < count; ++i) { + ArtMethod* interface_method = orig->GetInterfaceMethod(i, target_ptr_size_); + ArtMethod* implementation_method = orig->GetImplementationMethod(i, target_ptr_size_); + copy->SetInterfaceMethod(i, target_ptr_size_, NativeLocationInImage(interface_method)); + copy->SetImplementationMethod(i, + target_ptr_size_, + NativeLocationInImage(implementation_method)); + } +} + void ImageWriter::CopyAndFixupNativeData(size_t oat_index) { - ImageInfo& image_info = GetImageInfo(oat_index); + const ImageInfo& image_info = GetImageInfo(oat_index); // Copy ArtFields and methods to their locations and update the array for convenience. for (auto& pair : native_object_relocations_) { NativeObjectRelocation& relocation = pair.second; @@ -1550,6 +1601,7 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) { GetImageAddress(reinterpret_cast<ArtField*>(pair.first)->GetDeclaringClass())); break; } + case kNativeObjectRelocationTypeRuntimeMethod: case kNativeObjectRelocationTypeArtMethodClean: case kNativeObjectRelocationTypeArtMethodDirty: { CopyAndFixupMethod(reinterpret_cast<ArtMethod*>(pair.first), @@ -1575,26 +1627,22 @@ void ImageWriter::CopyAndFixupNativeData(size_t oat_index) { case kNativeObjectRelocationTypeDexCacheArray: // Nothing to copy here, everything is done in FixupDexCache(). break; + case kNativeObjectRelocationTypeIMTConflictTable: { + auto* orig_table = reinterpret_cast<ImtConflictTable*>(pair.first); + CopyAndFixupImtConflictTable( + orig_table, + new(dest)ImtConflictTable(orig_table->NumEntries(target_ptr_size_), target_ptr_size_)); + break; + } } } // Fixup the image method roots. auto* image_header = reinterpret_cast<ImageHeader*>(image_info.image_->Begin()); - const ImageSection& methods_section = image_header->GetMethodsSection(); for (size_t i = 0; i < ImageHeader::kImageMethodsCount; ++i) { ArtMethod* method = image_methods_[i]; CHECK(method != nullptr); - // Only place runtime methods in the image of the default oat file. - if (method->IsRuntimeMethod() && oat_index != GetDefaultOatIndex()) { - continue; - } if (!IsInBootImage(method)) { - auto it = native_object_relocations_.find(method); - CHECK(it != native_object_relocations_.end()) << "No forwarding for " << PrettyMethod(method); - NativeObjectRelocation& relocation = it->second; - CHECK(methods_section.Contains(relocation.offset)) << relocation.offset << " not in " - << methods_section; - CHECK(relocation.IsArtMethodRelocation()) << relocation.type; - method = reinterpret_cast<ArtMethod*>(global_image_begin_ + it->second.offset); + method = NativeLocationInImage(method); } image_header->SetImageMethod(static_cast<ImageHeader::ImageMethod>(i), method); } @@ -2057,24 +2105,28 @@ void ImageWriter::CopyAndFixupMethod(ArtMethod* orig, // The resolution method has a special trampoline to call. Runtime* runtime = Runtime::Current(); - if (UNLIKELY(orig == runtime->GetResolutionMethod())) { - copy->SetEntryPointFromQuickCompiledCodePtrSize( - GetOatAddress(kOatAddressQuickResolutionTrampoline), target_ptr_size_); - } else if (UNLIKELY(orig == runtime->GetImtConflictMethod() || - orig == runtime->GetImtUnimplementedMethod())) { - copy->SetEntryPointFromQuickCompiledCodePtrSize( - GetOatAddress(kOatAddressQuickIMTConflictTrampoline), target_ptr_size_); - } else if (UNLIKELY(orig->IsRuntimeMethod())) { - bool found_one = false; - for (size_t i = 0; i < static_cast<size_t>(Runtime::kLastCalleeSaveType); ++i) { - auto idx = static_cast<Runtime::CalleeSaveType>(i); - if (runtime->HasCalleeSaveMethod(idx) && runtime->GetCalleeSaveMethod(idx) == orig) { - found_one = true; - break; + if (orig->IsRuntimeMethod()) { + ImtConflictTable* orig_table = orig->GetImtConflictTable(target_ptr_size_); + if (orig_table != nullptr) { + // Special IMT conflict method, normal IMT conflict method or unimplemented IMT method. + copy->SetEntryPointFromQuickCompiledCodePtrSize( + GetOatAddress(kOatAddressQuickIMTConflictTrampoline), target_ptr_size_); + copy->SetImtConflictTable(NativeLocationInImage(orig_table), target_ptr_size_); + } else if (UNLIKELY(orig == runtime->GetResolutionMethod())) { + copy->SetEntryPointFromQuickCompiledCodePtrSize( + GetOatAddress(kOatAddressQuickResolutionTrampoline), target_ptr_size_); + } else { + bool found_one = false; + for (size_t i = 0; i < static_cast<size_t>(Runtime::kLastCalleeSaveType); ++i) { + auto idx = static_cast<Runtime::CalleeSaveType>(i); + if (runtime->HasCalleeSaveMethod(idx) && runtime->GetCalleeSaveMethod(idx) == orig) { + found_one = true; + break; + } } + CHECK(found_one) << "Expected to find callee save method but got " << PrettyMethod(orig); + CHECK(copy->IsRuntimeMethod()); } - CHECK(found_one) << "Expected to find callee save method but got " << PrettyMethod(orig); - CHECK(copy->IsRuntimeMethod()); } else { // We assume all methods have code. If they don't currently then we set them to the use the // resolution trampoline. Abstract methods never have code and so we need to make sure their @@ -2141,6 +2193,10 @@ ImageWriter::Bin ImageWriter::BinTypeForNativeRelocationType(NativeObjectRelocat return kBinArtMethodDirty; case kNativeObjectRelocationTypeDexCacheArray: return kBinDexCacheArray; + case kNativeObjectRelocationTypeRuntimeMethod: + return kBinRuntimeMethod; + case kNativeObjectRelocationTypeIMTConflictTable: + return kBinIMTConflictTable; } UNREACHABLE(); } @@ -2242,7 +2298,6 @@ ImageWriter::ImageWriter( compile_app_image_(compile_app_image), target_ptr_size_(InstructionSetPointerSize(compiler_driver_.GetInstructionSet())), image_infos_(oat_filenames.size()), - image_method_array_(ImageHeader::kImageMethodsCount), dirty_methods_(0u), clean_methods_(0u), image_storage_mode_(image_storage_mode), diff --git a/compiler/image_writer.h b/compiler/image_writer.h index 0cb6aea9b2..51976c511f 100644 --- a/compiler/image_writer.h +++ b/compiler/image_writer.h @@ -169,6 +169,10 @@ class ImageWriter FINAL { // ArtMethods may be dirty if the class has native methods or a declaring class that isn't // initialized. kBinArtMethodDirty, + // Conflict tables (clean). + kBinIMTConflictTable, + // Runtime methods (always clean, do not have a length prefix array). + kBinRuntimeMethod, // Dex cache arrays have a special slot for PC-relative addressing. Since they are // huge, and as such their dirtiness is not important for the clean/dirty separation, // we arbitrarily keep them at the end of the native data. @@ -186,6 +190,8 @@ class ImageWriter FINAL { kNativeObjectRelocationTypeArtMethodArrayClean, kNativeObjectRelocationTypeArtMethodDirty, kNativeObjectRelocationTypeArtMethodArrayDirty, + kNativeObjectRelocationTypeRuntimeMethod, + kNativeObjectRelocationTypeIMTConflictTable, kNativeObjectRelocationTypeDexCacheArray, }; friend std::ostream& operator<<(std::ostream& stream, const NativeObjectRelocationType& type); @@ -240,7 +246,7 @@ class ImageWriter FINAL { // Create the image sections into the out sections variable, returns the size of the image // excluding the bitmap. - size_t CreateImageSections(size_t target_ptr_size, ImageSection* out_sections) const; + size_t CreateImageSections(ImageSection* out_sections) const; std::unique_ptr<MemMap> image_; // Memory mapped for generating the image. @@ -395,6 +401,8 @@ class ImageWriter FINAL { void CopyAndFixupObject(mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); void CopyAndFixupMethod(ArtMethod* orig, ArtMethod* copy, const ImageInfo& image_info) SHARED_REQUIRES(Locks::mutator_lock_); + void CopyAndFixupImtConflictTable(ImtConflictTable* orig, ImtConflictTable* copy) + SHARED_REQUIRES(Locks::mutator_lock_); void FixupClass(mirror::Class* orig, mirror::Class* copy) SHARED_REQUIRES(Locks::mutator_lock_); void FixupObject(mirror::Object* orig, mirror::Object* copy) @@ -425,6 +433,11 @@ class ImageWriter FINAL { size_t oat_index) SHARED_REQUIRES(Locks::mutator_lock_); + // Assign the offset for an IMT conflict table. Does nothing if the table already has a native + // relocation. + void TryAssignConflictTableOffset(ImtConflictTable* table, size_t oat_index) + SHARED_REQUIRES(Locks::mutator_lock_); + // Return true if klass is loaded by the boot class loader but not in the boot image. bool IsBootClassLoaderNonImageClass(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); @@ -481,6 +494,9 @@ class ImageWriter FINAL { // remove duplicates in the multi image and app image case. mirror::String* FindInternedString(mirror::String* string) SHARED_REQUIRES(Locks::mutator_lock_); + // Return true if there already exists a native allocation for an object. + bool NativeRelocationAssigned(void* ptr) const; + const CompilerDriver& compiler_driver_; // Beginning target image address for the first image. @@ -517,16 +533,14 @@ class ImageWriter FINAL { bool IsArtMethodRelocation() const { return type == kNativeObjectRelocationTypeArtMethodClean || - type == kNativeObjectRelocationTypeArtMethodDirty; + type == kNativeObjectRelocationTypeArtMethodDirty || + type == kNativeObjectRelocationTypeRuntimeMethod; } }; std::unordered_map<void*, NativeObjectRelocation> native_object_relocations_; // Runtime ArtMethods which aren't reachable from any Class but need to be copied into the image. ArtMethod* image_methods_[ImageHeader::kImageMethodsCount]; - // Fake length prefixed array for image methods. This array does not contain the actual - // ArtMethods. We only use it for the header and relocation addresses. - LengthPrefixedArray<ArtMethod> image_method_array_; // Counters for measurements, used for logging only. uint64_t dirty_methods_; diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc index 5de9842d90..c2d7ff7795 100644 --- a/compiler/jit/jit_compiler.cc +++ b/compiler/jit/jit_compiler.cc @@ -155,7 +155,8 @@ JitCompiler::JitCompiler() { Compiler::kOptimizing, instruction_set, instruction_set_features_.get(), - /* image */ false, + /* boot_image */ false, + /* app_image */ false, /* image_classes */ nullptr, /* compiled_classes */ nullptr, /* compiled_methods */ nullptr, diff --git a/compiler/jni/jni_compiler_test.cc b/compiler/jni/jni_compiler_test.cc index cf836a9c9f..251dc39864 100644 --- a/compiler/jni/jni_compiler_test.cc +++ b/compiler/jni/jni_compiler_test.cc @@ -31,6 +31,7 @@ #include "mirror/object_array-inl.h" #include "mirror/object-inl.h" #include "mirror/stack_trace_element.h" +#include "nativeloader/native_loader.h" #include "runtime.h" #include "ScopedLocalRef.h" #include "scoped_thread_state_change.h" @@ -53,6 +54,11 @@ class JniCompilerTest : public CommonCompilerTest { check_generic_jni_ = false; } + void TearDown() OVERRIDE { + android::ResetNativeLoader(); + CommonCompilerTest::TearDown(); + } + void SetCheckGenericJni(bool generic) { check_generic_jni_ = generic; } @@ -93,10 +99,12 @@ class JniCompilerTest : public CommonCompilerTest { // Start runtime. Thread::Current()->TransitionFromSuspendedToRunnable(); bool started = runtime_->Start(); + android::InitializeNativeLoader(); CHECK(started); } // JNI operations after runtime start. env_ = Thread::Current()->GetJniEnv(); + library_search_path_ = env_->NewStringUTF(""); jklass_ = env_->FindClass("MyClassNatives"); ASSERT_TRUE(jklass_ != nullptr) << method_name << " " << method_sig; @@ -168,6 +176,7 @@ class JniCompilerTest : public CommonCompilerTest { void StackArgsSignExtendedMips64Impl(); JNIEnv* env_; + jstring library_search_path_; jmethodID jmethod_; bool check_generic_jni_; }; @@ -220,7 +229,7 @@ void JniCompilerTest::CompileAndRunIntMethodThroughStubImpl() { std::string reason; ASSERT_TRUE(Runtime::Current()->GetJavaVM()-> - LoadNativeLibrary(env_, "", class_loader_, nullptr, &reason)) + LoadNativeLibrary(env_, "", class_loader_, library_search_path_, &reason)) << reason; jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 24); @@ -235,7 +244,7 @@ void JniCompilerTest::CompileAndRunStaticIntMethodThroughStubImpl() { std::string reason; ASSERT_TRUE(Runtime::Current()->GetJavaVM()-> - LoadNativeLibrary(env_, "", class_loader_, nullptr, &reason)) + LoadNativeLibrary(env_, "", class_loader_, library_search_path_, &reason)) << reason; jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 42); diff --git a/compiler/linker/relative_patcher_test.h b/compiler/linker/relative_patcher_test.h index c07de79984..ec69107d8f 100644 --- a/compiler/linker/relative_patcher_test.h +++ b/compiler/linker/relative_patcher_test.h @@ -51,6 +51,7 @@ class RelativePatcherTest : public testing::Test { instruction_set, /* instruction_set_features*/ nullptr, /* boot_image */ false, + /* app_image */ false, /* image_classes */ nullptr, /* compiled_classes */ nullptr, /* compiled_methods */ nullptr, diff --git a/compiler/oat_test.cc b/compiler/oat_test.cc index 73b16d5b46..5b192846ba 100644 --- a/compiler/oat_test.cc +++ b/compiler/oat_test.cc @@ -112,6 +112,7 @@ class OatTest : public CommonCompilerTest { insn_set, insn_features_.get(), /* boot_image */ false, + /* app_image */ false, /* image_classes */ nullptr, /* compiled_classes */ nullptr, /* compiled_methods */ nullptr, diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc index e804beef0d..8da9f06dd9 100644 --- a/compiler/oat_writer.cc +++ b/compiler/oat_writer.cc @@ -1127,17 +1127,23 @@ class OatWriter::WriteCodeMethodVisitor : public OatDexMethodVisitor { return target_offset; } - mirror::Class* GetTargetType(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) { - mirror::DexCache* dex_cache = (dex_file_ == patch.TargetTypeDexFile()) + mirror::DexCache* GetDexCache(const DexFile* target_dex_file) + SHARED_REQUIRES(Locks::mutator_lock_) { + return (target_dex_file == dex_file_) ? dex_cache_ - : class_linker_->FindDexCache(Thread::Current(), *patch.TargetTypeDexFile()); + : class_linker_->FindDexCache(Thread::Current(), *target_dex_file); + } + + mirror::Class* GetTargetType(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) { + mirror::DexCache* dex_cache = GetDexCache(patch.TargetTypeDexFile()); mirror::Class* type = dex_cache->GetResolvedType(patch.TargetTypeIndex()); CHECK(type != nullptr); return type; } mirror::String* GetTargetString(const LinkerPatch& patch) SHARED_REQUIRES(Locks::mutator_lock_) { - mirror::String* string = dex_cache_->GetResolvedString(patch.TargetStringIndex()); + mirror::DexCache* dex_cache = GetDexCache(patch.TargetStringDexFile()); + mirror::String* string = dex_cache->GetResolvedString(patch.TargetStringIndex()); DCHECK(string != nullptr); DCHECK(writer_->HasBootImage() || Runtime::Current()->GetHeap()->ObjectIsInBootImageSpace(string)); diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc index 659c6f8497..b65e98a120 100644 --- a/compiler/optimizing/bounds_check_elimination.cc +++ b/compiler/optimizing/bounds_check_elimination.cc @@ -552,7 +552,11 @@ class BCEVisitor : public HGraphVisitor { DCHECK(!IsAddedBlock(block)); first_index_bounds_check_map_.clear(); HGraphVisitor::VisitBasicBlock(block); - AddComparesWithDeoptimization(block); + // We should never deoptimize from an osr method, otherwise we might wrongly optimize + // code dominated by the deoptimization. + if (!GetGraph()->IsCompilingOsr()) { + AddComparesWithDeoptimization(block); + } } void Finish() { @@ -1358,6 +1362,11 @@ class BCEVisitor : public HGraphVisitor { if (loop->IsIrreducible()) { return false; } + // We should never deoptimize from an osr method, otherwise we might wrongly optimize + // code dominated by the deoptimization. + if (GetGraph()->IsCompilingOsr()) { + return false; + } // A try boundary preheader is hard to handle. // TODO: remove this restriction. if (loop->GetPreHeader()->GetLastInstruction()->IsTryBoundary()) { diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 45d23fe516..197e473473 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -5183,10 +5183,10 @@ HLoadString::LoadKind CodeGeneratorARM::GetSupportedLoadStringKind( case HLoadString::LoadKind::kBootImageAddress: break; case HLoadString::LoadKind::kDexCacheAddress: - DCHECK(Runtime::Current()->UseJit()); + DCHECK(Runtime::Current()->UseJitCompilation()); break; case HLoadString::LoadKind::kDexCachePcRelative: - DCHECK(!Runtime::Current()->UseJit()); + DCHECK(!Runtime::Current()->UseJitCompilation()); // We disable pc-relative load when there is an irreducible loop, as the optimization // is incompatible with it. // TODO: Create as many ArmDexCacheArraysBase instructions as needed for methods diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index e8e6b68975..9680f2bf45 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -4010,10 +4010,10 @@ HLoadString::LoadKind CodeGeneratorARM64::GetSupportedLoadStringKind( case HLoadString::LoadKind::kBootImageAddress: break; case HLoadString::LoadKind::kDexCacheAddress: - DCHECK(Runtime::Current()->UseJit()); + DCHECK(Runtime::Current()->UseJitCompilation()); break; case HLoadString::LoadKind::kDexCachePcRelative: - DCHECK(!Runtime::Current()->UseJit()); + DCHECK(!Runtime::Current()->UseJitCompilation()); break; case HLoadString::LoadKind::kDexCacheViaMethod: break; diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index e73e880308..6dc480bbee 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -3303,17 +3303,6 @@ void InstructionCodeGeneratorX86::GenerateDivRemWithAnyConstant(HBinaryOperation int shift; CalculateMagicAndShiftForDivRem(imm, false /* is_long */, &magic, &shift); - NearLabel ndiv; - NearLabel end; - // If numerator is 0, the result is 0, no computation needed. - __ testl(eax, eax); - __ j(kNotEqual, &ndiv); - - __ xorl(out, out); - __ jmp(&end); - - __ Bind(&ndiv); - // Save the numerator. __ movl(num, eax); @@ -3348,7 +3337,6 @@ void InstructionCodeGeneratorX86::GenerateDivRemWithAnyConstant(HBinaryOperation } else { __ movl(eax, edx); } - __ Bind(&end); } void InstructionCodeGeneratorX86::GenerateDivRemIntegral(HBinaryOperation* instruction) { @@ -5977,7 +5965,7 @@ HLoadString::LoadKind CodeGeneratorX86::GetSupportedLoadStringKind( DCHECK(GetCompilerOptions().GetCompilePic()); FALLTHROUGH_INTENDED; case HLoadString::LoadKind::kDexCachePcRelative: - DCHECK(!Runtime::Current()->UseJit()); // Note: boot image is also non-JIT. + DCHECK(!Runtime::Current()->UseJitCompilation()); // Note: boot image is also non-JIT. // We disable pc-relative load when there is an irreducible loop, as the optimization // is incompatible with it. // TODO: Create as many X86ComputeBaseMethodAddress instructions as needed for methods @@ -5989,7 +5977,7 @@ HLoadString::LoadKind CodeGeneratorX86::GetSupportedLoadStringKind( case HLoadString::LoadKind::kBootImageAddress: break; case HLoadString::LoadKind::kDexCacheAddress: - DCHECK(Runtime::Current()->UseJit()); + DCHECK(Runtime::Current()->UseJitCompilation()); break; case HLoadString::LoadKind::kDexCacheViaMethod: break; diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 5576d839c3..96ec09c2a8 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -3390,16 +3390,6 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemWithAnyConstant(HBinaryOperat __ movl(numerator, eax); - NearLabel no_div; - NearLabel end; - __ testl(eax, eax); - __ j(kNotEqual, &no_div); - - __ xorl(out, out); - __ jmp(&end); - - __ Bind(&no_div); - __ movl(eax, Immediate(magic)); __ imull(numerator); @@ -3425,7 +3415,6 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemWithAnyConstant(HBinaryOperat } else { __ movl(eax, edx); } - __ Bind(&end); } else { int64_t imm = second.GetConstant()->AsLongConstant()->GetValue(); @@ -5413,10 +5402,10 @@ HLoadString::LoadKind CodeGeneratorX86_64::GetSupportedLoadStringKind( case HLoadString::LoadKind::kBootImageAddress: break; case HLoadString::LoadKind::kDexCacheAddress: - DCHECK(Runtime::Current()->UseJit()); + DCHECK(Runtime::Current()->UseJitCompilation()); break; case HLoadString::LoadKind::kDexCachePcRelative: - DCHECK(!Runtime::Current()->UseJit()); + DCHECK(!Runtime::Current()->UseJitCompilation()); break; case HLoadString::LoadKind::kDexCacheViaMethod: break; diff --git a/compiler/optimizing/graph_checker.cc b/compiler/optimizing/graph_checker.cc index 96837a8266..968e26724d 100644 --- a/compiler/optimizing/graph_checker.cc +++ b/compiler/optimizing/graph_checker.cc @@ -258,6 +258,15 @@ void GraphChecker::VisitBoundsCheck(HBoundsCheck* check) { VisitInstruction(check); } +void GraphChecker::VisitDeoptimize(HDeoptimize* deopt) { + if (GetGraph()->IsCompilingOsr()) { + AddError(StringPrintf("A graph compiled OSR cannot have a HDeoptimize instruction")); + } + + // Perform the instruction base checks too. + VisitInstruction(deopt); +} + void GraphChecker::VisitTryBoundary(HTryBoundary* try_boundary) { ArrayRef<HBasicBlock* const> handlers = try_boundary->GetExceptionHandlers(); diff --git a/compiler/optimizing/graph_checker.h b/compiler/optimizing/graph_checker.h index 83b198474c..3060c80073 100644 --- a/compiler/optimizing/graph_checker.h +++ b/compiler/optimizing/graph_checker.h @@ -57,6 +57,7 @@ class GraphChecker : public HGraphDelegateVisitor { void VisitCheckCast(HCheckCast* check) OVERRIDE; void VisitCondition(HCondition* op) OVERRIDE; void VisitConstant(HConstant* instruction) OVERRIDE; + void VisitDeoptimize(HDeoptimize* instruction) OVERRIDE; void VisitIf(HIf* instruction) OVERRIDE; void VisitInstanceOf(HInstanceOf* check) OVERRIDE; void VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) OVERRIDE; diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index ff4b9a765c..59de895182 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -308,7 +308,7 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) { // Check if we can use an inline cache. ArtMethod* caller = graph_->GetArtMethod(); - if (Runtime::Current()->UseJit()) { + if (Runtime::Current()->UseJitCompilation()) { // Under JIT, we should always know the caller. DCHECK(caller != nullptr); ScopedProfilingInfoInlineUse spiis(caller, soa.Self()); @@ -322,7 +322,13 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) { return false; } else if (ic.IsMonomorphic()) { MaybeRecordStat(kMonomorphicCall); - return TryInlineMonomorphicCall(invoke_instruction, resolved_method, ic); + if (outermost_graph_->IsCompilingOsr()) { + // If we are compiling OSR, we pretend this call is polymorphic, as we may come from the + // interpreter and it may have seen different receiver types. + return TryInlinePolymorphicCall(invoke_instruction, resolved_method, ic); + } else { + return TryInlineMonomorphicCall(invoke_instruction, resolved_method, ic); + } } else if (ic.IsPolymorphic()) { MaybeRecordStat(kPolymorphicCall); return TryInlinePolymorphicCall(invoke_instruction, resolved_method, ic); @@ -510,6 +516,11 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction, bool deoptimize = all_targets_inlined && (i != InlineCache::kIndividualCacheSize - 1) && (ic.GetTypeAt(i + 1) == nullptr); + + if (outermost_graph_->IsCompilingOsr()) { + // We do not support HDeoptimize in OSR methods. + deoptimize = false; + } HInstruction* compare = AddTypeGuard( receiver, cursor, bb_cursor, class_index, is_referrer, invoke_instruction, deoptimize); if (deoptimize) { @@ -623,7 +634,7 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction, ArtMethod* resolved_method, const InlineCache& ic) { // This optimization only works under JIT for now. - DCHECK(Runtime::Current()->UseJit()); + DCHECK(Runtime::Current()->UseJitCompilation()); if (graph_->GetInstructionSet() == kMips64) { // TODO: Support HClassTableGet for mips64. return false; @@ -672,7 +683,8 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction, HInstruction* cursor = invoke_instruction->GetPrevious(); HBasicBlock* bb_cursor = invoke_instruction->GetBlock(); - if (!TryInlineAndReplace(invoke_instruction, actual_method, /* do_rtp */ false)) { + HInstruction* return_replacement = nullptr; + if (!TryBuildAndInline(invoke_instruction, actual_method, &return_replacement)) { return false; } @@ -701,9 +713,6 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction, } HNotEqual* compare = new (graph_->GetArena()) HNotEqual(class_table_get, constant); - HDeoptimize* deoptimize = new (graph_->GetArena()) HDeoptimize( - compare, invoke_instruction->GetDexPc()); - // TODO: Extend reference type propagation to understand the guard. if (cursor != nullptr) { bb_cursor->InsertInstructionAfter(receiver_class, cursor); } else { @@ -711,8 +720,20 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(HInvoke* invoke_instruction, } bb_cursor->InsertInstructionAfter(class_table_get, receiver_class); bb_cursor->InsertInstructionAfter(compare, class_table_get); - bb_cursor->InsertInstructionAfter(deoptimize, compare); - deoptimize->CopyEnvironmentFrom(invoke_instruction->GetEnvironment()); + + if (outermost_graph_->IsCompilingOsr()) { + CreateDiamondPatternForPolymorphicInline(compare, return_replacement, invoke_instruction); + } else { + // TODO: Extend reference type propagation to understand the guard. + HDeoptimize* deoptimize = new (graph_->GetArena()) HDeoptimize( + compare, invoke_instruction->GetDexPc()); + bb_cursor->InsertInstructionAfter(deoptimize, compare); + deoptimize->CopyEnvironmentFrom(invoke_instruction->GetEnvironment()); + if (return_replacement != nullptr) { + invoke_instruction->ReplaceWith(return_replacement); + } + invoke_instruction->GetBlock()->RemoveInstruction(invoke_instruction); + } // Run type propagation to get the guard typed. ReferenceTypePropagation rtp_fixup(graph_, @@ -744,6 +765,12 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction, HInstruction** return_replacement) { const DexFile& caller_dex_file = *caller_compilation_unit_.GetDexFile(); + if (method->IsProxyMethod()) { + VLOG(compiler) << "Method " << PrettyMethod(method) + << " is not inlined because of unimplemented inline support for proxy methods."; + return false; + } + // Check whether we're allowed to inline. The outermost compilation unit is the relevant // dex file here (though the transitivity of an inline chain would allow checking the calller). if (!compiler_driver_->MayInline(method->GetDexFile(), @@ -802,7 +829,7 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction, if (!method->GetDeclaringClass()->IsVerified()) { uint16_t class_def_idx = method->GetDeclaringClass()->GetDexClassDefIndex(); - if (Runtime::Current()->UseJit() || + if (Runtime::Current()->UseJitCompilation() || !compiler_driver_->IsMethodVerifiedWithoutFailures( method->GetDexMethodIndex(), class_def_idx, *method->GetDexFile())) { VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file) @@ -1265,6 +1292,8 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction, size_t HInliner::RunOptimizations(HGraph* callee_graph, const DexFile::CodeItem* code_item, const DexCompilationUnit& dex_compilation_unit) { + // Note: if the outermost_graph_ is being compiled OSR, we should not run any + // optimization that could lead to a HDeoptimize. The following optimizations do not. HDeadCodeElimination dce(callee_graph, stats_); HConstantFolding fold(callee_graph); HSharpening sharpening(callee_graph, codegen_, dex_compilation_unit, compiler_driver_); diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h index 863dd1c6f6..39a1313ba0 100644 --- a/compiler/optimizing/intrinsics.h +++ b/compiler/optimizing/intrinsics.h @@ -30,6 +30,10 @@ class DexFile; // Temporary measure until we have caught up with the Java 7 definition of Math.round. b/26327751 static constexpr bool kRoundIsPlusPointFive = false; +// Positive floating-point infinities. +static constexpr uint32_t kPositiveInfinityFloat = 0x7f800000U; +static constexpr uint64_t kPositiveInfinityDouble = UINT64_C(0x7ff0000000000000); + // Recognize intrinsics from HInvoke nodes. class IntrinsicsRecognizer : public HOptimization { public: diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc index 86b7bc138c..146fea1fe0 100644 --- a/compiler/optimizing/intrinsics_arm.cc +++ b/compiler/optimizing/intrinsics_arm.cc @@ -1985,6 +1985,56 @@ void IntrinsicCodeGeneratorARM::VisitStringGetCharsNoCheck(HInvoke* invoke) { __ Bind(&done); } +void IntrinsicLocationsBuilderARM::VisitFloatIsInfinite(HInvoke* invoke) { + CreateFPToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorARM::VisitFloatIsInfinite(HInvoke* invoke) { + ArmAssembler* const assembler = GetAssembler(); + LocationSummary* const locations = invoke->GetLocations(); + const Register out = locations->Out().AsRegister<Register>(); + // Shifting left by 1 bit makes the value encodable as an immediate operand; + // we don't care about the sign bit anyway. + constexpr uint32_t infinity = kPositiveInfinityFloat << 1U; + + __ vmovrs(out, locations->InAt(0).AsFpuRegister<SRegister>()); + // We don't care about the sign bit, so shift left. + __ Lsl(out, out, 1); + __ eor(out, out, ShifterOperand(infinity)); + // If the result is 0, then it has 32 leading zeros, and less than that otherwise. + __ clz(out, out); + // Any number less than 32 logically shifted right by 5 bits results in 0; + // the same operation on 32 yields 1. + __ Lsr(out, out, 5); +} + +void IntrinsicLocationsBuilderARM::VisitDoubleIsInfinite(HInvoke* invoke) { + CreateFPToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorARM::VisitDoubleIsInfinite(HInvoke* invoke) { + ArmAssembler* const assembler = GetAssembler(); + LocationSummary* const locations = invoke->GetLocations(); + const Register out = locations->Out().AsRegister<Register>(); + // The highest 32 bits of double precision positive infinity separated into + // two constants encodable as immediate operands. + constexpr uint32_t infinity_high = 0x7f000000U; + constexpr uint32_t infinity_high2 = 0x00f00000U; + + static_assert((infinity_high | infinity_high2) == static_cast<uint32_t>(kPositiveInfinityDouble >> 32U), + "The constants do not add up to the high 32 bits of double precision positive infinity."); + __ vmovrrd(IP, out, FromLowSToD(locations->InAt(0).AsFpuRegisterPairLow<SRegister>())); + __ eor(out, out, ShifterOperand(infinity_high)); + __ eor(out, out, ShifterOperand(infinity_high2)); + // We don't care about the sign bit, so shift left. + __ orr(out, IP, ShifterOperand(out, LSL, 1)); + // If the result is 0, then it has 32 leading zeros, and less than that otherwise. + __ clz(out, out); + // Any number less than 32 logically shifted right by 5 bits results in 0; + // the same operation on 32 yields 1. + __ Lsr(out, out, 5); +} + UNIMPLEMENTED_INTRINSIC(ARM, IntegerBitCount) UNIMPLEMENTED_INTRINSIC(ARM, LongBitCount) UNIMPLEMENTED_INTRINSIC(ARM, MathMinDoubleDouble) @@ -2001,8 +2051,6 @@ UNIMPLEMENTED_INTRINSIC(ARM, MathRoundFloat) // Could be done by changing rou UNIMPLEMENTED_INTRINSIC(ARM, UnsafeCASLong) // High register pressure. UNIMPLEMENTED_INTRINSIC(ARM, SystemArrayCopyChar) UNIMPLEMENTED_INTRINSIC(ARM, ReferenceGetReferent) -UNIMPLEMENTED_INTRINSIC(ARM, FloatIsInfinite) -UNIMPLEMENTED_INTRINSIC(ARM, DoubleIsInfinite) UNIMPLEMENTED_INTRINSIC(ARM, IntegerHighestOneBit) UNIMPLEMENTED_INTRINSIC(ARM, LongHighestOneBit) UNIMPLEMENTED_INTRINSIC(ARM, IntegerLowestOneBit) diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index bf79767822..1d8229674c 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -2289,9 +2289,46 @@ void IntrinsicCodeGeneratorARM64::VisitSystemArrayCopy(HInvoke* invoke) { __ Bind(slow_path->GetExitLabel()); } +static void GenIsInfinite(LocationSummary* locations, + bool is64bit, + vixl::MacroAssembler* masm) { + Operand infinity; + Register out; + + if (is64bit) { + infinity = kPositiveInfinityDouble; + out = XRegisterFrom(locations->Out()); + } else { + infinity = kPositiveInfinityFloat; + out = WRegisterFrom(locations->Out()); + } + + const Register zero = vixl::Assembler::AppropriateZeroRegFor(out); + + MoveFPToInt(locations, is64bit, masm); + __ Eor(out, out, infinity); + // We don't care about the sign bit, so shift left. + __ Cmp(zero, Operand(out, LSL, 1)); + __ Cset(out, eq); +} + +void IntrinsicLocationsBuilderARM64::VisitFloatIsInfinite(HInvoke* invoke) { + CreateFPToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorARM64::VisitFloatIsInfinite(HInvoke* invoke) { + GenIsInfinite(invoke->GetLocations(), /* is64bit */ false, GetVIXLAssembler()); +} + +void IntrinsicLocationsBuilderARM64::VisitDoubleIsInfinite(HInvoke* invoke) { + CreateFPToIntLocations(arena_, invoke); +} + +void IntrinsicCodeGeneratorARM64::VisitDoubleIsInfinite(HInvoke* invoke) { + GenIsInfinite(invoke->GetLocations(), /* is64bit */ true, GetVIXLAssembler()); +} + UNIMPLEMENTED_INTRINSIC(ARM64, ReferenceGetReferent) -UNIMPLEMENTED_INTRINSIC(ARM64, FloatIsInfinite) -UNIMPLEMENTED_INTRINSIC(ARM64, DoubleIsInfinite) UNIMPLEMENTED_INTRINSIC(ARM64, IntegerHighestOneBit) UNIMPLEMENTED_INTRINSIC(ARM64, LongHighestOneBit) UNIMPLEMENTED_INTRINSIC(ARM64, IntegerLowestOneBit) diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc index 19c6a225ac..46195c104a 100644 --- a/compiler/optimizing/intrinsics_mips.cc +++ b/compiler/optimizing/intrinsics_mips.cc @@ -2283,10 +2283,10 @@ static void GenIsInfinite(LocationSummary* locations, // If one, or more, of the exponent bits is zero, then the number can't be infinite. if (type == Primitive::kPrimDouble) { __ MoveFromFpuHigh(TMP, in); - __ LoadConst32(AT, 0x7FF00000); + __ LoadConst32(AT, High32Bits(kPositiveInfinityDouble)); } else { __ Mfc1(TMP, in); - __ LoadConst32(AT, 0x7F800000); + __ LoadConst32(AT, kPositiveInfinityFloat); } __ Xor(TMP, TMP, AT); diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index fe75451ad2..6703695484 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -2218,6 +2218,7 @@ ReferenceTypeInfo ReferenceTypeInfo::Create(TypeHandle type_handle, bool is_exac ScopedObjectAccess soa(Thread::Current()); DCHECK(IsValidHandle(type_handle)); DCHECK(!type_handle->IsErroneous()); + DCHECK(!type_handle->IsArrayClass() || !type_handle->GetComponentType()->IsErroneous()); if (!is_exact) { DCHECK(!type_handle->CannotBeAssignedFromOtherTypes()) << "Callers of ReferenceTypeInfo::Create should ensure is_exact is properly computed"; diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index 04c9ff9d6d..f2394f605a 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -46,6 +46,13 @@ static inline ReferenceTypeInfo::TypeHandle GetRootHandle(StackHandleScopeCollec return *cache; } +// Returns true if klass is admissible to the propagation: non-null and non-erroneous. +// For an array type, we also check if the component type is admissible. +static bool IsAdmissible(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_) { + return klass != nullptr && !klass->IsErroneous() && + (!klass->IsArrayClass() || IsAdmissible(klass->GetComponentType())); +} + ReferenceTypeInfo::TypeHandle ReferenceTypePropagation::HandleCache::GetObjectClassHandle() { return GetRootHandle(handles_, ClassLinker::kJavaLangObject, &object_class_handle_); } @@ -453,15 +460,10 @@ void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* inst } instr->SetReferenceTypeInfo( ReferenceTypeInfo::Create(handle_cache_->GetStringClassHandle(), /* is_exact */ true)); - } else if (klass != nullptr) { - if (klass->IsErroneous()) { - // Set inexact object type for erroneous types. - instr->SetReferenceTypeInfo(instr->GetBlock()->GetGraph()->GetInexactObjectRti()); - } else { - ReferenceTypeInfo::TypeHandle handle = handle_cache_->NewHandle(klass); - is_exact = is_exact || handle->CannotBeAssignedFromOtherTypes(); - instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(handle, is_exact)); - } + } else if (IsAdmissible(klass)) { + ReferenceTypeInfo::TypeHandle handle = handle_cache_->NewHandle(klass); + is_exact = is_exact || handle->CannotBeAssignedFromOtherTypes(); + instr->SetReferenceTypeInfo(ReferenceTypeInfo::Create(handle, is_exact)); } else { instr->SetReferenceTypeInfo(instr->GetBlock()->GetGraph()->GetInexactObjectRti()); } @@ -563,7 +565,7 @@ void ReferenceTypePropagation::RTPVisitor::VisitLoadClass(HLoadClass* instr) { instr->GetDexFile(), instr->GetTypeIndex(), hint_dex_cache_); - if (resolved_class != nullptr && !resolved_class->IsErroneous()) { + if (IsAdmissible(resolved_class)) { instr->SetLoadedClassRTI(ReferenceTypeInfo::Create( handle_cache_->NewHandle(resolved_class), /* is_exact */ true)); } @@ -664,12 +666,6 @@ void ReferenceTypePropagation::VisitPhi(HPhi* phi) { } if (phi->GetBlock()->IsLoopHeader()) { - if (!is_first_run_ && graph_->IsCompilingOsr()) { - // Don't update the type of a loop phi when compiling OSR: we may have done - // speculative optimizations dominating that phi, that do not hold at the - // point the interpreter jumps to that loop header. - return; - } // Set the initial type for the phi. Use the non back edge input for reaching // a fixed point faster. HInstruction* first_input = phi->InputAt(0); @@ -742,7 +738,7 @@ void ReferenceTypePropagation::UpdateArrayGet(HArrayGet* instr, HandleCache* han } Handle<mirror::Class> handle = parent_rti.GetTypeHandle(); - if (handle->IsObjectArrayClass() && !handle->GetComponentType()->IsErroneous()) { + if (handle->IsObjectArrayClass() && IsAdmissible(handle->GetComponentType())) { ReferenceTypeInfo::TypeHandle component_handle = handle_cache->NewHandle(handle->GetComponentType()); bool is_exact = component_handle->CannotBeAssignedFromOtherTypes(); diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc index 7a1bb316e4..08bd35f14a 100644 --- a/compiler/optimizing/sharpening.cc +++ b/compiler/optimizing/sharpening.cc @@ -99,7 +99,7 @@ void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { if (direct_method != 0u) { // Should we use a direct pointer to the method? // Note: For JIT, kDirectAddressWithFixup doesn't make sense at all and while // kDirectAddress would be fine for image methods, we don't support it at the moment. - DCHECK(!Runtime::Current()->UseJit()); + DCHECK(!Runtime::Current()->UseJitCompilation()); if (direct_method != static_cast<uintptr_t>(-1)) { // Is the method pointer known now? method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDirectAddress; method_load_data = direct_method; @@ -109,7 +109,7 @@ void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { } else { // Use dex cache. DCHECK_EQ(target_method.dex_file, &graph_->GetDexFile()); if (use_pc_relative_instructions) { // Can we use PC-relative access to the dex cache arrays? - DCHECK(!Runtime::Current()->UseJit()); + DCHECK(!Runtime::Current()->UseJitCompilation()); method_load_kind = HInvokeStaticOrDirect::MethodLoadKind::kDexCachePcRelative; DexCacheArraysLayout layout(GetInstructionSetPointerSize(codegen_->GetInstructionSet()), &graph_->GetDexFile()); @@ -121,7 +121,7 @@ void HSharpening::ProcessInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) { if (direct_code != 0u) { // Should we use a direct pointer to the code? // Note: For JIT, kCallPCRelative and kCallDirectWithFixup don't make sense at all and // while kCallDirect would be fine for image methods, we don't support it at the moment. - DCHECK(!Runtime::Current()->UseJit()); + DCHECK(!Runtime::Current()->UseJitCompilation()); if (direct_code != static_cast<uintptr_t>(-1)) { // Is the code pointer known now? code_ptr_location = HInvokeStaticOrDirect::CodePtrLocation::kCallDirect; direct_code_ptr = direct_code; @@ -174,7 +174,7 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) { if (compiler_driver_->IsBootImage()) { // Compiling boot image. Resolve the string and allocate it if needed. - DCHECK(!runtime->UseJit()); + DCHECK(!runtime->UseJitCompilation()); mirror::String* string = class_linker->ResolveString(dex_file, string_index, dex_cache); CHECK(string != nullptr); if (!compiler_driver_->GetSupportBootImageFixup()) { @@ -187,7 +187,7 @@ void HSharpening::ProcessLoadString(HLoadString* load_string) { ? HLoadString::LoadKind::kBootImageLinkTimePcRelative : HLoadString::LoadKind::kBootImageLinkTimeAddress; } - } else if (runtime->UseJit()) { + } else if (runtime->UseJitCompilation()) { // TODO: Make sure we don't set the "compile PIC" flag for JIT as that's bogus. // DCHECK(!codegen_->GetCompilerOptions().GetCompilePic()); mirror::String* string = dex_cache->GetResolvedString(string_index); diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h index 40dab74a23..1141fd1c76 100644 --- a/compiler/optimizing/ssa_liveness_analysis.h +++ b/compiler/optimizing/ssa_liveness_analysis.h @@ -1003,6 +1003,15 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> { void AddBackEdgeUses(const HBasicBlock& block_at_use) { DCHECK(block_at_use.IsInLoop()); + if (block_at_use.GetGraph()->HasIrreducibleLoops()) { + // Linear order may not be well formed when irreducible loops are present, + // i.e. loop blocks may not be adjacent and a back edge may not be last, + // which violates assumptions made in this method. + return; + } + + DCHECK(IsLinearOrderWellFormed(*block_at_use.GetGraph())); + // Add synthesized uses at the back edge of loops to help the register allocator. // Note that this method is called in decreasing liveness order, to faciliate adding // uses at the head of the `first_use_` linked list. Because below @@ -1027,30 +1036,12 @@ class LiveInterval : public ArenaObject<kArenaAllocSsaLiveness> { if ((first_use_ != nullptr) && (first_use_->GetPosition() <= back_edge_use_position)) { // There was a use already seen in this loop. Therefore the previous call to `AddUse` // already inserted the backedge use. We can stop going outward. - if (kIsDebugBuild) { - if (!HasSynthesizeUseAt(back_edge_use_position)) { - // There exists a use prior to `back_edge_use_position` but there is - // no synthesized use at the back edge. This can happen in the presence - // of irreducible loops, when blocks of the loop are not adjacent in - // linear order, i.e. when there is an out-of-loop block between - // `block_at_use` and `back_edge_position` that uses this interval. - DCHECK(block_at_use.GetGraph()->HasIrreducibleLoops()); - DCHECK(!IsLinearOrderWellFormed(*block_at_use.GetGraph())); - } - } + DCHECK(HasSynthesizeUseAt(back_edge_use_position)); break; } - if (last_in_new_list != nullptr && - back_edge_use_position <= last_in_new_list->GetPosition()) { - // Loops are not properly nested in the linear order, i.e. the back edge - // of an outer loop preceeds blocks of an inner loop. This can happen - // in the presence of irreducible loops. - DCHECK(block_at_use.GetGraph()->HasIrreducibleLoops()); - DCHECK(!IsLinearOrderWellFormed(*block_at_use.GetGraph())); - // We must bail out, otherwise we would generate an unsorted use list. - break; - } + DCHECK(last_in_new_list == nullptr || + back_edge_use_position > last_in_new_list->GetPosition()); UsePosition* new_use = new (allocator_) UsePosition( /* user */ nullptr, diff --git a/dex2oat/dex2oat.cc b/dex2oat/dex2oat.cc index 370583e3ba..be38336f03 100644 --- a/dex2oat/dex2oat.cc +++ b/dex2oat/dex2oat.cc @@ -1526,6 +1526,7 @@ class Dex2Oat FINAL { instruction_set_, instruction_set_features_.get(), IsBootImage(), + IsAppImage(), image_classes_.release(), compiled_classes_.release(), /* compiled_methods */ nullptr, diff --git a/oatdump/oatdump.cc b/oatdump/oatdump.cc index 3c6a05d97b..d2ab699599 100644 --- a/oatdump/oatdump.cc +++ b/oatdump/oatdump.cc @@ -59,6 +59,7 @@ #include "stack_map.h" #include "ScopedLocalRef.h" #include "thread_list.h" +#include "type_lookup_table.h" #include "verifier/method_verifier.h" #include "well_known_classes.h" @@ -573,8 +574,15 @@ class OatDumper { os << StringPrintf("location: %s\n", oat_dex_file.GetDexFileLocation().c_str()); os << StringPrintf("checksum: 0x%08x\n", oat_dex_file.GetDexFileLocationChecksum()); - // Create the verifier early. + // Print embedded dex file data range. + const uint8_t* const oat_file_begin = oat_dex_file.GetOatFile()->Begin(); + const uint8_t* const dex_file_pointer = oat_dex_file.GetDexFilePointer(); + uint32_t dex_offset = dchecked_integral_cast<uint32_t>(dex_file_pointer - oat_file_begin); + os << StringPrintf("dex-file: 0x%08x..0x%08x\n", + dex_offset, + dchecked_integral_cast<uint32_t>(dex_offset + oat_dex_file.FileSize() - 1)); + // Create the dex file early. A lot of print-out things depend on it. std::string error_msg; const DexFile* const dex_file = OpenDexFile(&oat_dex_file, &error_msg); if (dex_file == nullptr) { @@ -583,6 +591,16 @@ class OatDumper { return false; } + // Print lookup table, if it exists. + if (oat_dex_file.GetLookupTableData() != nullptr) { + uint32_t table_offset = dchecked_integral_cast<uint32_t>( + oat_dex_file.GetLookupTableData() - oat_file_begin); + uint32_t table_size = TypeLookupTable::RawDataLength(*dex_file); + os << StringPrintf("type-table: 0x%08x..0x%08x\n", + table_offset, + table_offset + table_size - 1); + } + VariableIndentationOutputStream vios(&os); ScopedIndentation indent1(&vios); for (size_t class_def_index = 0; @@ -1416,11 +1434,10 @@ class ImageDumper { indent_os << "\n"; // TODO: Dump fields. // Dump methods after. - const auto& methods_section = image_header_.GetMethodsSection(); DumpArtMethodVisitor visitor(this); - methods_section.VisitPackedArtMethods(&visitor, - image_space_.Begin(), - image_header_.GetPointerSize()); + image_header_.VisitPackedArtMethods(&visitor, + image_space_.Begin(), + image_header_.GetPointerSize()); // Dump the large objects separately. heap->GetLargeObjectsSpace()->GetLiveBitmap()->Walk(ImageDumper::Callback, this); indent_os << "\n"; @@ -1779,6 +1796,7 @@ class ImageDumper { DCHECK(method != nullptr); const void* quick_oat_code_begin = GetQuickOatCodeBegin(method); const void* quick_oat_code_end = GetQuickOatCodeEnd(method); + const size_t pointer_size = image_header_.GetPointerSize(); OatQuickMethodHeader* method_header = reinterpret_cast<OatQuickMethodHeader*>( reinterpret_cast<uintptr_t>(quick_oat_code_begin) - sizeof(OatQuickMethodHeader)); if (method->IsNative()) { @@ -1792,13 +1810,16 @@ class ImageDumper { image_header_.GetPointerSize())) { indent_os << StringPrintf("OAT CODE: %p\n", quick_oat_code_begin); } - } else if (method->IsAbstract() || - method->IsCalleeSaveMethod() || - method->IsResolutionMethod() || - (method == Runtime::Current()->GetImtConflictMethod()) || - method->IsImtUnimplementedMethod() || - method->IsClassInitializer()) { + } else if (method->IsAbstract() || method->IsClassInitializer()) { // Don't print information for these. + } else if (method->IsRuntimeMethod()) { + ImtConflictTable* table = method->GetImtConflictTable(image_header_.GetPointerSize()); + if (table != nullptr) { + indent_os << "IMT conflict table " << table << " method: "; + for (size_t i = 0, count = table->NumEntries(pointer_size); i < count; ++i) { + indent_os << PrettyMethod(table->GetImplementationMethod(i, pointer_size)) << " "; + } + } } else { const DexFile::CodeItem* code_item = method->GetCodeItem(); size_t dex_instruction_bytes = code_item->insns_size_in_code_units_ * 2; diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc index 93e40afea8..0a7ffda3b4 100644 --- a/patchoat/patchoat.cc +++ b/patchoat/patchoat.cc @@ -472,8 +472,7 @@ class PatchOatArtFieldVisitor : public ArtFieldVisitor { void PatchOat::PatchArtFields(const ImageHeader* image_header) { PatchOatArtFieldVisitor visitor(this); - const auto& section = image_header->GetImageSection(ImageHeader::kSectionArtFields); - section.VisitPackedArtFields(&visitor, heap_->Begin()); + image_header->VisitPackedArtFields(&visitor, heap_->Begin()); } class PatchOatArtMethodVisitor : public ArtMethodVisitor { @@ -490,10 +489,20 @@ class PatchOatArtMethodVisitor : public ArtMethodVisitor { }; void PatchOat::PatchArtMethods(const ImageHeader* image_header) { - const auto& section = image_header->GetMethodsSection(); const size_t pointer_size = InstructionSetPointerSize(isa_); PatchOatArtMethodVisitor visitor(this); - section.VisitPackedArtMethods(&visitor, heap_->Begin(), pointer_size); + image_header->VisitPackedArtMethods(&visitor, heap_->Begin(), pointer_size); +} + +void PatchOat::PatchImtConflictTables(const ImageHeader* image_header) { + const size_t pointer_size = InstructionSetPointerSize(isa_); + // We can safely walk target image since the conflict tables are independent. + image_header->VisitPackedImtConflictTables( + [this](ArtMethod* method) { + return RelocatedAddressOfPointer(method); + }, + image_->Begin(), + pointer_size); } class FixupRootVisitor : public RootVisitor { @@ -627,6 +636,7 @@ bool PatchOat::PatchImage(bool primary_image) { PatchArtFields(image_header); PatchArtMethods(image_header); + PatchImtConflictTables(image_header); PatchInternedStrings(image_header); PatchClassTable(image_header); // Patch dex file int/long arrays which point to ArtFields. @@ -725,6 +735,7 @@ void PatchOat::FixupMethod(ArtMethod* object, ArtMethod* copy) { RelocatedAddressOfPointer(object->GetDexCacheResolvedTypes(pointer_size)), pointer_size); copy->SetEntryPointFromQuickCompiledCodePtrSize(RelocatedAddressOfPointer( object->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size)), pointer_size); + // No special handling for IMT conflict table since all pointers are moved by the same offset. copy->SetEntryPointFromJniPtrSize(RelocatedAddressOfPointer( object->GetEntryPointFromJniPtrSize(pointer_size)), pointer_size); } diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h index 510ff1e5be..3ef837fde9 100644 --- a/patchoat/patchoat.h +++ b/patchoat/patchoat.h @@ -117,6 +117,8 @@ class PatchOat { bool PatchImage(bool primary_image) SHARED_REQUIRES(Locks::mutator_lock_); void PatchArtFields(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_); void PatchArtMethods(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_); + void PatchImtConflictTables(const ImageHeader* image_header) + SHARED_REQUIRES(Locks::mutator_lock_); void PatchInternedStrings(const ImageHeader* image_header) SHARED_REQUIRES(Locks::mutator_lock_); void PatchClassTable(const ImageHeader* image_header) diff --git a/runtime/arch/arm/entrypoints_init_arm.cc b/runtime/arch/arm/entrypoints_init_arm.cc index e358ff879c..f0e9ac5170 100644 --- a/runtime/arch/arm/entrypoints_init_arm.cc +++ b/runtime/arch/arm/entrypoints_init_arm.cc @@ -17,6 +17,7 @@ #include "entrypoints/jni/jni_entrypoints.h" #include "entrypoints/quick/quick_alloc_entrypoints.h" #include "entrypoints/quick/quick_default_externs.h" +#include "entrypoints/quick/quick_default_init_entrypoints.h" #include "entrypoints/quick/quick_entrypoints.h" #include "entrypoints/entrypoint_utils.h" #include "entrypoints/math_entrypoints.h" @@ -47,67 +48,12 @@ extern "C" int __aeabi_idivmod(int32_t, int32_t); // [DIV|REM]_INT[_2ADDR|_LIT8 extern "C" int64_t __aeabi_ldivmod(int64_t, int64_t); void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { - // JNI - jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub; - - // Alloc - ResetQuickAllocEntryPoints(qpoints); + DefaultInitEntryPoints(jpoints, qpoints); // Cast qpoints->pInstanceofNonTrivial = artIsAssignableFromCode; qpoints->pCheckCast = art_quick_check_cast; - // DexCache - qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage; - qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access; - qpoints->pInitializeType = art_quick_initialize_type; - qpoints->pResolveString = art_quick_resolve_string; - - // Field - qpoints->pSet8Instance = art_quick_set8_instance; - qpoints->pSet8Static = art_quick_set8_static; - qpoints->pSet16Instance = art_quick_set16_instance; - qpoints->pSet16Static = art_quick_set16_static; - qpoints->pSet32Instance = art_quick_set32_instance; - qpoints->pSet32Static = art_quick_set32_static; - qpoints->pSet64Instance = art_quick_set64_instance; - qpoints->pSet64Static = art_quick_set64_static; - qpoints->pSetObjInstance = art_quick_set_obj_instance; - qpoints->pSetObjStatic = art_quick_set_obj_static; - qpoints->pGetByteInstance = art_quick_get_byte_instance; - qpoints->pGetBooleanInstance = art_quick_get_boolean_instance; - qpoints->pGetShortInstance = art_quick_get_short_instance; - qpoints->pGetCharInstance = art_quick_get_char_instance; - qpoints->pGet32Instance = art_quick_get32_instance; - qpoints->pGet64Instance = art_quick_get64_instance; - qpoints->pGetObjInstance = art_quick_get_obj_instance; - qpoints->pGetByteStatic = art_quick_get_byte_static; - qpoints->pGetBooleanStatic = art_quick_get_boolean_static; - qpoints->pGetShortStatic = art_quick_get_short_static; - qpoints->pGetCharStatic = art_quick_get_char_static; - qpoints->pGet32Static = art_quick_get32_static; - qpoints->pGet64Static = art_quick_get64_static; - qpoints->pGetObjStatic = art_quick_get_obj_static; - - // Array - qpoints->pAputObjectWithNullAndBoundCheck = art_quick_aput_obj_with_null_and_bound_check; - qpoints->pAputObjectWithBoundCheck = art_quick_aput_obj_with_bound_check; - qpoints->pAputObject = art_quick_aput_obj; - qpoints->pHandleFillArrayData = art_quick_handle_fill_data; - - // JNI - qpoints->pJniMethodStart = JniMethodStart; - qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized; - qpoints->pJniMethodEnd = JniMethodEnd; - qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized; - qpoints->pJniMethodEndWithReference = JniMethodEndWithReference; - qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; - qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline; - - // Locks - qpoints->pLockObject = art_quick_lock_object; - qpoints->pUnlockObject = art_quick_unlock_object; - // Math qpoints->pIdivmod = __aeabi_idivmod; qpoints->pLdiv = __aeabi_ldivmod; @@ -154,35 +100,6 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { qpoints->pStringCompareTo = art_quick_string_compareto; qpoints->pMemcpy = memcpy; - // Invocation - qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline; - qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline; - qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge; - qpoints->pInvokeDirectTrampolineWithAccessCheck = - art_quick_invoke_direct_trampoline_with_access_check; - qpoints->pInvokeInterfaceTrampolineWithAccessCheck = - art_quick_invoke_interface_trampoline_with_access_check; - qpoints->pInvokeStaticTrampolineWithAccessCheck = - art_quick_invoke_static_trampoline_with_access_check; - qpoints->pInvokeSuperTrampolineWithAccessCheck = - art_quick_invoke_super_trampoline_with_access_check; - qpoints->pInvokeVirtualTrampolineWithAccessCheck = - art_quick_invoke_virtual_trampoline_with_access_check; - - // Thread - qpoints->pTestSuspend = art_quick_test_suspend; - - // Throws - qpoints->pDeliverException = art_quick_deliver_exception; - qpoints->pThrowArrayBounds = art_quick_throw_array_bounds; - qpoints->pThrowDivZero = art_quick_throw_div_zero; - qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method; - qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception; - qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow; - - // Deoptimization from compiled code. - qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code; - // Read barrier. qpoints->pReadBarrierJni = ReadBarrierJni; qpoints->pReadBarrierMark = artReadBarrierMark; diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S index e6ff0aa131..321b9d217a 100644 --- a/runtime/arch/arm/quick_entrypoints_arm.S +++ b/runtime/arch/arm/quick_entrypoints_arm.S @@ -544,6 +544,15 @@ ENTRY art_quick_lock_object DELIVER_PENDING_EXCEPTION END art_quick_lock_object +ENTRY art_quick_lock_object_no_inline + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 @ save callee saves in case we block + mov r1, r9 @ pass Thread::Current + bl artLockObjectFromCode @ (Object* obj, Thread*) + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME + RETURN_IF_RESULT_IS_ZERO + DELIVER_PENDING_EXCEPTION +END art_quick_lock_object_no_inline + /* * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure. * r0 holds the possibly null object to lock. @@ -601,6 +610,16 @@ ENTRY art_quick_unlock_object DELIVER_PENDING_EXCEPTION END art_quick_unlock_object +ENTRY art_quick_unlock_object_no_inline + @ save callee saves in case exception allocation triggers GC + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME r1, r2 + mov r1, r9 @ pass Thread::Current + bl artUnlockObjectFromCode @ (Object* obj, Thread*) + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME + RETURN_IF_RESULT_IS_ZERO + DELIVER_PENDING_EXCEPTION +END art_quick_unlock_object_no_inline + /* * Entry from managed code that calls artIsAssignableFromCode and on failure calls * artThrowClassCastException. diff --git a/runtime/arch/arm64/entrypoints_init_arm64.cc b/runtime/arch/arm64/entrypoints_init_arm64.cc index f271596f82..bf0f6470d1 100644 --- a/runtime/arch/arm64/entrypoints_init_arm64.cc +++ b/runtime/arch/arm64/entrypoints_init_arm64.cc @@ -17,6 +17,7 @@ #include "entrypoints/jni/jni_entrypoints.h" #include "entrypoints/quick/quick_alloc_entrypoints.h" #include "entrypoints/quick/quick_default_externs.h" +#include "entrypoints/quick/quick_default_init_entrypoints.h" #include "entrypoints/quick/quick_entrypoints.h" #include "entrypoints/entrypoint_utils.h" #include "entrypoints/math_entrypoints.h" @@ -30,67 +31,12 @@ extern "C" uint32_t artIsAssignableFromCode(const mirror::Class* klass, const mirror::Class* ref_class); void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { - // JNI - jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub; - - // Alloc - ResetQuickAllocEntryPoints(qpoints); + DefaultInitEntryPoints(jpoints, qpoints); // Cast qpoints->pInstanceofNonTrivial = artIsAssignableFromCode; qpoints->pCheckCast = art_quick_check_cast; - // DexCache - qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage; - qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access; - qpoints->pInitializeType = art_quick_initialize_type; - qpoints->pResolveString = art_quick_resolve_string; - - // Field - qpoints->pSet8Instance = art_quick_set8_instance; - qpoints->pSet8Static = art_quick_set8_static; - qpoints->pSet16Instance = art_quick_set16_instance; - qpoints->pSet16Static = art_quick_set16_static; - qpoints->pSet32Instance = art_quick_set32_instance; - qpoints->pSet32Static = art_quick_set32_static; - qpoints->pSet64Instance = art_quick_set64_instance; - qpoints->pSet64Static = art_quick_set64_static; - qpoints->pSetObjInstance = art_quick_set_obj_instance; - qpoints->pSetObjStatic = art_quick_set_obj_static; - qpoints->pGetBooleanInstance = art_quick_get_boolean_instance; - qpoints->pGetByteInstance = art_quick_get_byte_instance; - qpoints->pGetCharInstance = art_quick_get_char_instance; - qpoints->pGetShortInstance = art_quick_get_short_instance; - qpoints->pGet32Instance = art_quick_get32_instance; - qpoints->pGet64Instance = art_quick_get64_instance; - qpoints->pGetObjInstance = art_quick_get_obj_instance; - qpoints->pGetBooleanStatic = art_quick_get_boolean_static; - qpoints->pGetByteStatic = art_quick_get_byte_static; - qpoints->pGetCharStatic = art_quick_get_char_static; - qpoints->pGetShortStatic = art_quick_get_short_static; - qpoints->pGet32Static = art_quick_get32_static; - qpoints->pGet64Static = art_quick_get64_static; - qpoints->pGetObjStatic = art_quick_get_obj_static; - - // Array - qpoints->pAputObjectWithNullAndBoundCheck = art_quick_aput_obj_with_null_and_bound_check; - qpoints->pAputObjectWithBoundCheck = art_quick_aput_obj_with_bound_check; - qpoints->pAputObject = art_quick_aput_obj; - qpoints->pHandleFillArrayData = art_quick_handle_fill_data; - - // JNI - qpoints->pJniMethodStart = JniMethodStart; - qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized; - qpoints->pJniMethodEnd = JniMethodEnd; - qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized; - qpoints->pJniMethodEndWithReference = JniMethodEndWithReference; - qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; - qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline; - - // Locks - qpoints->pLockObject = art_quick_lock_object; - qpoints->pUnlockObject = art_quick_unlock_object; - // Math // TODO null entrypoints not needed for ARM64 - generate inline. qpoints->pCmpgDouble = nullptr; @@ -138,35 +84,6 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { qpoints->pStringCompareTo = nullptr; qpoints->pMemcpy = memcpy; - // Invocation - qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline; - qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline; - qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge; - qpoints->pInvokeDirectTrampolineWithAccessCheck = - art_quick_invoke_direct_trampoline_with_access_check; - qpoints->pInvokeInterfaceTrampolineWithAccessCheck = - art_quick_invoke_interface_trampoline_with_access_check; - qpoints->pInvokeStaticTrampolineWithAccessCheck = - art_quick_invoke_static_trampoline_with_access_check; - qpoints->pInvokeSuperTrampolineWithAccessCheck = - art_quick_invoke_super_trampoline_with_access_check; - qpoints->pInvokeVirtualTrampolineWithAccessCheck = - art_quick_invoke_virtual_trampoline_with_access_check; - - // Thread - qpoints->pTestSuspend = art_quick_test_suspend; - - // Throws - qpoints->pDeliverException = art_quick_deliver_exception; - qpoints->pThrowArrayBounds = art_quick_throw_array_bounds; - qpoints->pThrowDivZero = art_quick_throw_div_zero; - qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method; - qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception; - qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow; - - // Deoptimization from compiled code. - qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code; - // Read barrier. qpoints->pReadBarrierJni = ReadBarrierJni; qpoints->pReadBarrierMark = artReadBarrierMark; diff --git a/runtime/arch/arm64/instruction_set_features_arm64.cc b/runtime/arch/arm64/instruction_set_features_arm64.cc index 613bb5c765..cad13b29d9 100644 --- a/runtime/arch/arm64/instruction_set_features_arm64.cc +++ b/runtime/arch/arm64/instruction_set_features_arm64.cc @@ -39,7 +39,7 @@ const Arm64InstructionSetFeatures* Arm64InstructionSetFeatures::FromVariant( if (!needs_a53_835769_fix) { // Check to see if this is an expected variant. static const char* arm64_known_variants[] = { - "denver64", "kryo" + "denver64", "kryo", "exynos-m1" }; if (!FindVariantInArray(arm64_known_variants, arraysize(arm64_known_variants), variant)) { std::ostringstream os; diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S index 27141fdf28..1fba09bae3 100644 --- a/runtime/arch/arm64/quick_entrypoints_arm64.S +++ b/runtime/arch/arm64/quick_entrypoints_arm64.S @@ -1113,6 +1113,14 @@ ENTRY art_quick_lock_object RETURN_IF_W0_IS_ZERO_OR_DELIVER END art_quick_lock_object +ENTRY art_quick_lock_object_no_inline + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case we block + mov x1, xSELF // pass Thread::Current + bl artLockObjectFromCode // (Object* obj, Thread*) + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME + RETURN_IF_W0_IS_ZERO_OR_DELIVER +END art_quick_lock_object_no_inline + /* * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure. * x0 holds the possibly null object to lock. @@ -1171,6 +1179,14 @@ ENTRY art_quick_unlock_object RETURN_IF_W0_IS_ZERO_OR_DELIVER END art_quick_unlock_object +ENTRY art_quick_unlock_object_no_inline + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME // save callee saves in case exception allocation triggers GC + mov x1, xSELF // pass Thread::Current + bl artUnlockObjectFromCode // (Object* obj, Thread*) + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME + RETURN_IF_W0_IS_ZERO_OR_DELIVER +END art_quick_unlock_object_no_inline + /* * Entry from managed code that calls artIsAssignableFromCode and on failure calls * artThrowClassCastException. diff --git a/runtime/arch/mips/entrypoints_init_mips.cc b/runtime/arch/mips/entrypoints_init_mips.cc index 51eb77f409..45e33a8500 100644 --- a/runtime/arch/mips/entrypoints_init_mips.cc +++ b/runtime/arch/mips/entrypoints_init_mips.cc @@ -59,6 +59,9 @@ extern "C" int64_t __divdi3(int64_t, int64_t); extern "C" int64_t __moddi3(int64_t, int64_t); void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { + // Note: MIPS has asserts checking for the type of entrypoint. Don't move it + // to InitDefaultEntryPoints(). + // JNI jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub; @@ -167,9 +170,14 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { "Non-direct C stub marked direct."); // Locks - qpoints->pLockObject = art_quick_lock_object; + if (UNLIKELY(VLOG_IS_ON(systrace_lock_logging))) { + qpoints->pLockObject = art_quick_lock_object_no_inline; + qpoints->pUnlockObject = art_quick_unlock_object_no_inline; + } else { + qpoints->pLockObject = art_quick_lock_object; + qpoints->pUnlockObject = art_quick_unlock_object; + } static_assert(!IsDirectEntrypoint(kQuickLockObject), "Non-direct C stub marked direct."); - qpoints->pUnlockObject = art_quick_unlock_object; static_assert(!IsDirectEntrypoint(kQuickUnlockObject), "Non-direct C stub marked direct."); // Math diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S index 8939a488e9..3ee26afc4f 100644 --- a/runtime/arch/mips/quick_entrypoints_mips.S +++ b/runtime/arch/mips/quick_entrypoints_mips.S @@ -906,6 +906,16 @@ ENTRY art_quick_lock_object RETURN_IF_ZERO END art_quick_lock_object +ENTRY art_quick_lock_object_no_inline + beqz $a0, .Lart_quick_throw_null_pointer_exception_gp_set + nop + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case we block + la $t9, artLockObjectFromCode + jalr $t9 # (Object* obj, Thread*) + move $a1, rSELF # pass Thread::Current + RETURN_IF_ZERO +END art_quick_lock_object_no_inline + /* * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure. */ @@ -920,6 +930,16 @@ ENTRY art_quick_unlock_object RETURN_IF_ZERO END art_quick_unlock_object +ENTRY art_quick_unlock_object_no_inline + beqz $a0, .Lart_quick_throw_null_pointer_exception_gp_set + nop + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC + la $t9, artUnlockObjectFromCode + jalr $t9 # (Object* obj, Thread*) + move $a1, rSELF # pass Thread::Current + RETURN_IF_ZERO +END art_quick_unlock_object_no_inline + /* * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure. */ diff --git a/runtime/arch/mips64/entrypoints_init_mips64.cc b/runtime/arch/mips64/entrypoints_init_mips64.cc index 4bdb38e51b..030c12707e 100644 --- a/runtime/arch/mips64/entrypoints_init_mips64.cc +++ b/runtime/arch/mips64/entrypoints_init_mips64.cc @@ -18,6 +18,7 @@ #include "entrypoints/jni/jni_entrypoints.h" #include "entrypoints/quick/quick_alloc_entrypoints.h" #include "entrypoints/quick/quick_default_externs.h" +#include "entrypoints/quick/quick_default_init_entrypoints.h" #include "entrypoints/quick/quick_entrypoints.h" #include "entrypoints/entrypoint_utils.h" #include "entrypoints/math_entrypoints.h" @@ -57,67 +58,12 @@ extern "C" int64_t __divdi3(int64_t, int64_t); extern "C" int64_t __moddi3(int64_t, int64_t); void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { - // JNI - jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub; - - // Alloc - ResetQuickAllocEntryPoints(qpoints); + DefaultInitEntryPoints(jpoints, qpoints); // Cast qpoints->pInstanceofNonTrivial = artIsAssignableFromCode; qpoints->pCheckCast = art_quick_check_cast; - // DexCache - qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage; - qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access; - qpoints->pInitializeType = art_quick_initialize_type; - qpoints->pResolveString = art_quick_resolve_string; - - // Field - qpoints->pSet8Instance = art_quick_set8_instance; - qpoints->pSet8Static = art_quick_set8_static; - qpoints->pSet16Instance = art_quick_set16_instance; - qpoints->pSet16Static = art_quick_set16_static; - qpoints->pSet32Instance = art_quick_set32_instance; - qpoints->pSet32Static = art_quick_set32_static; - qpoints->pSet64Instance = art_quick_set64_instance; - qpoints->pSet64Static = art_quick_set64_static; - qpoints->pSetObjInstance = art_quick_set_obj_instance; - qpoints->pSetObjStatic = art_quick_set_obj_static; - qpoints->pGetBooleanInstance = art_quick_get_boolean_instance; - qpoints->pGetByteInstance = art_quick_get_byte_instance; - qpoints->pGetCharInstance = art_quick_get_char_instance; - qpoints->pGetShortInstance = art_quick_get_short_instance; - qpoints->pGet32Instance = art_quick_get32_instance; - qpoints->pGet64Instance = art_quick_get64_instance; - qpoints->pGetObjInstance = art_quick_get_obj_instance; - qpoints->pGetBooleanStatic = art_quick_get_boolean_static; - qpoints->pGetByteStatic = art_quick_get_byte_static; - qpoints->pGetCharStatic = art_quick_get_char_static; - qpoints->pGetShortStatic = art_quick_get_short_static; - qpoints->pGet32Static = art_quick_get32_static; - qpoints->pGet64Static = art_quick_get64_static; - qpoints->pGetObjStatic = art_quick_get_obj_static; - - // Array - qpoints->pAputObjectWithNullAndBoundCheck = art_quick_aput_obj_with_null_and_bound_check; - qpoints->pAputObjectWithBoundCheck = art_quick_aput_obj_with_bound_check; - qpoints->pAputObject = art_quick_aput_obj; - qpoints->pHandleFillArrayData = art_quick_handle_fill_data; - - // JNI - qpoints->pJniMethodStart = JniMethodStart; - qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized; - qpoints->pJniMethodEnd = JniMethodEnd; - qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized; - qpoints->pJniMethodEndWithReference = JniMethodEndWithReference; - qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; - qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline; - - // Locks - qpoints->pLockObject = art_quick_lock_object; - qpoints->pUnlockObject = art_quick_unlock_object; - // Math qpoints->pCmpgDouble = CmpgDouble; qpoints->pCmpgFloat = CmpgFloat; @@ -144,35 +90,6 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { qpoints->pStringCompareTo = art_quick_string_compareto; qpoints->pMemcpy = memcpy; - // Invocation - qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline; - qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline; - qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge; - qpoints->pInvokeDirectTrampolineWithAccessCheck = - art_quick_invoke_direct_trampoline_with_access_check; - qpoints->pInvokeInterfaceTrampolineWithAccessCheck = - art_quick_invoke_interface_trampoline_with_access_check; - qpoints->pInvokeStaticTrampolineWithAccessCheck = - art_quick_invoke_static_trampoline_with_access_check; - qpoints->pInvokeSuperTrampolineWithAccessCheck = - art_quick_invoke_super_trampoline_with_access_check; - qpoints->pInvokeVirtualTrampolineWithAccessCheck = - art_quick_invoke_virtual_trampoline_with_access_check; - - // Thread - qpoints->pTestSuspend = art_quick_test_suspend; - - // Throws - qpoints->pDeliverException = art_quick_deliver_exception; - qpoints->pThrowArrayBounds = art_quick_throw_array_bounds; - qpoints->pThrowDivZero = art_quick_throw_div_zero; - qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method; - qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception; - qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow; - - // Deoptimization from compiled code. - qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code; - // TODO - use lld/scd instructions for Mips64 // Atomic 64-bit load/store qpoints->pA64Load = QuasiAtomic::Read64; diff --git a/runtime/arch/mips64/quick_entrypoints_mips64.S b/runtime/arch/mips64/quick_entrypoints_mips64.S index 5d0c94c637..8f1a35a693 100644 --- a/runtime/arch/mips64/quick_entrypoints_mips64.S +++ b/runtime/arch/mips64/quick_entrypoints_mips64.S @@ -971,6 +971,15 @@ ENTRY art_quick_lock_object RETURN_IF_ZERO END art_quick_lock_object +ENTRY art_quick_lock_object_no_inline + beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set + nop + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case we block + jal artLockObjectFromCode # (Object* obj, Thread*) + move $a1, rSELF # pass Thread::Current + RETURN_IF_ZERO +END art_quick_lock_object_no_inline + /* * Entry from managed code that calls artUnlockObjectFromCode and delivers exception on failure. */ @@ -984,6 +993,15 @@ ENTRY art_quick_unlock_object RETURN_IF_ZERO END art_quick_unlock_object +ENTRY art_quick_unlock_object_no_inline + beq $a0, $zero, .Lart_quick_throw_null_pointer_exception_gp_set + nop + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME # save callee saves in case exception allocation triggers GC + jal artUnlockObjectFromCode # (Object* obj, Thread*) + move $a1, rSELF # pass Thread::Current + RETURN_IF_ZERO +END art_quick_unlock_object_no_inline + /* * Entry from managed code that calls artCheckCastFromCode and delivers exception on failure. */ diff --git a/runtime/arch/stub_test.cc b/runtime/arch/stub_test.cc index 1fc579bed3..02629e8196 100644 --- a/runtime/arch/stub_test.cc +++ b/runtime/arch/stub_test.cc @@ -2011,14 +2011,14 @@ TEST_F(StubTest, DISABLED_IMT) { // that will create it: the runtime stub expects to be called by compiled code. LinearAlloc* linear_alloc = Runtime::Current()->GetLinearAlloc(); ArtMethod* conflict_method = Runtime::Current()->CreateImtConflictMethod(linear_alloc); - static ImtConflictTable::Entry empty_entry = { nullptr, nullptr }; - ImtConflictTable* empty_conflict_table = reinterpret_cast<ImtConflictTable*>(&empty_entry); + ImtConflictTable* empty_conflict_table = + Runtime::Current()->GetClassLinker()->CreateImtConflictTable(/*count*/0u, linear_alloc); void* data = linear_alloc->Alloc( self, - ImtConflictTable::ComputeSizeWithOneMoreEntry(empty_conflict_table)); + ImtConflictTable::ComputeSizeWithOneMoreEntry(empty_conflict_table, sizeof(void*))); ImtConflictTable* new_table = new (data) ImtConflictTable( - empty_conflict_table, inf_contains, contains_amethod); - conflict_method->SetImtConflictTable(new_table); + empty_conflict_table, inf_contains, contains_amethod, sizeof(void*)); + conflict_method->SetImtConflictTable(new_table, sizeof(void*)); size_t result = Invoke3WithReferrerAndHidden(reinterpret_cast<size_t>(conflict_method), diff --git a/runtime/arch/x86/entrypoints_init_x86.cc b/runtime/arch/x86/entrypoints_init_x86.cc index e593f39fd8..15a857146b 100644 --- a/runtime/arch/x86/entrypoints_init_x86.cc +++ b/runtime/arch/x86/entrypoints_init_x86.cc @@ -17,6 +17,7 @@ #include "entrypoints/jni/jni_entrypoints.h" #include "entrypoints/quick/quick_alloc_entrypoints.h" #include "entrypoints/quick/quick_default_externs.h" +#include "entrypoints/quick/quick_default_init_entrypoints.h" #include "entrypoints/quick/quick_entrypoints.h" #include "entrypoints/runtime_asm_entrypoints.h" #include "interpreter/interpreter.h" @@ -33,67 +34,12 @@ extern "C" mirror::Object* art_quick_read_barrier_slow(mirror::Object*, mirror:: extern "C" mirror::Object* art_quick_read_barrier_for_root_slow(GcRoot<mirror::Object>*); void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { - // JNI - jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub; - - // Alloc - ResetQuickAllocEntryPoints(qpoints); + DefaultInitEntryPoints(jpoints, qpoints); // Cast qpoints->pInstanceofNonTrivial = art_quick_is_assignable; qpoints->pCheckCast = art_quick_check_cast; - // DexCache - qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage; - qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access; - qpoints->pInitializeType = art_quick_initialize_type; - qpoints->pResolveString = art_quick_resolve_string; - - // Field - qpoints->pSet8Instance = art_quick_set8_instance; - qpoints->pSet8Static = art_quick_set8_static; - qpoints->pSet16Instance = art_quick_set16_instance; - qpoints->pSet16Static = art_quick_set16_static; - qpoints->pSet32Instance = art_quick_set32_instance; - qpoints->pSet32Static = art_quick_set32_static; - qpoints->pSet64Instance = art_quick_set64_instance; - qpoints->pSet64Static = art_quick_set64_static; - qpoints->pSetObjInstance = art_quick_set_obj_instance; - qpoints->pSetObjStatic = art_quick_set_obj_static; - qpoints->pGetByteInstance = art_quick_get_byte_instance; - qpoints->pGetBooleanInstance = art_quick_get_boolean_instance; - qpoints->pGetShortInstance = art_quick_get_short_instance; - qpoints->pGetCharInstance = art_quick_get_char_instance; - qpoints->pGet32Instance = art_quick_get32_instance; - qpoints->pGet64Instance = art_quick_get64_instance; - qpoints->pGetObjInstance = art_quick_get_obj_instance; - qpoints->pGetByteStatic = art_quick_get_byte_static; - qpoints->pGetBooleanStatic = art_quick_get_boolean_static; - qpoints->pGetShortStatic = art_quick_get_short_static; - qpoints->pGetCharStatic = art_quick_get_char_static; - qpoints->pGet32Static = art_quick_get32_static; - qpoints->pGet64Static = art_quick_get64_static; - qpoints->pGetObjStatic = art_quick_get_obj_static; - - // Array - qpoints->pAputObjectWithNullAndBoundCheck = art_quick_aput_obj_with_null_and_bound_check; - qpoints->pAputObjectWithBoundCheck = art_quick_aput_obj_with_bound_check; - qpoints->pAputObject = art_quick_aput_obj; - qpoints->pHandleFillArrayData = art_quick_handle_fill_data; - - // JNI - qpoints->pJniMethodStart = JniMethodStart; - qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized; - qpoints->pJniMethodEnd = JniMethodEnd; - qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized; - qpoints->pJniMethodEndWithReference = JniMethodEndWithReference; - qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; - qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline; - - // Locks - qpoints->pLockObject = art_quick_lock_object; - qpoints->pUnlockObject = art_quick_unlock_object; - // More math. qpoints->pCos = cos; qpoints->pSin = sin; @@ -128,35 +74,6 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { qpoints->pStringCompareTo = art_quick_string_compareto; qpoints->pMemcpy = art_quick_memcpy; - // Invocation - qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline; - qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline; - qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge; - qpoints->pInvokeDirectTrampolineWithAccessCheck = - art_quick_invoke_direct_trampoline_with_access_check; - qpoints->pInvokeInterfaceTrampolineWithAccessCheck = - art_quick_invoke_interface_trampoline_with_access_check; - qpoints->pInvokeStaticTrampolineWithAccessCheck = - art_quick_invoke_static_trampoline_with_access_check; - qpoints->pInvokeSuperTrampolineWithAccessCheck = - art_quick_invoke_super_trampoline_with_access_check; - qpoints->pInvokeVirtualTrampolineWithAccessCheck = - art_quick_invoke_virtual_trampoline_with_access_check; - - // Thread - qpoints->pTestSuspend = art_quick_test_suspend; - - // Throws - qpoints->pDeliverException = art_quick_deliver_exception; - qpoints->pThrowArrayBounds = art_quick_throw_array_bounds; - qpoints->pThrowDivZero = art_quick_throw_div_zero; - qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method; - qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception; - qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow; - - // Deoptimize - qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code; - // Read barrier. qpoints->pReadBarrierJni = ReadBarrierJni; qpoints->pReadBarrierMark = art_quick_read_barrier_mark; diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S index 4f9b3f7878..485da9fe33 100644 --- a/runtime/arch/x86/quick_entrypoints_x86.S +++ b/runtime/arch/x86/quick_entrypoints_x86.S @@ -1075,6 +1075,22 @@ DEFINE_FUNCTION art_quick_lock_object RETURN_IF_EAX_ZERO END_FUNCTION art_quick_lock_object +DEFINE_FUNCTION art_quick_lock_object_no_inline + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC + // Outgoing argument set up + subl LITERAL(8), %esp // alignment padding + CFI_ADJUST_CFA_OFFSET(8) + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + CFI_ADJUST_CFA_OFFSET(4) + PUSH eax // pass object + call SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*) + addl LITERAL(16), %esp // pop arguments + CFI_ADJUST_CFA_OFFSET(-16) + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + RETURN_IF_EAX_ZERO +END_FUNCTION art_quick_lock_object_no_inline + + DEFINE_FUNCTION art_quick_unlock_object testl %eax, %eax // null check object/eax jz .Lslow_unlock @@ -1130,6 +1146,21 @@ DEFINE_FUNCTION art_quick_unlock_object RETURN_IF_EAX_ZERO END_FUNCTION art_quick_unlock_object +DEFINE_FUNCTION art_quick_unlock_object_no_inline + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME ebx, ebx // save ref containing registers for GC + // Outgoing argument set up + subl LITERAL(8), %esp // alignment padding + CFI_ADJUST_CFA_OFFSET(8) + pushl %fs:THREAD_SELF_OFFSET // pass Thread::Current() + CFI_ADJUST_CFA_OFFSET(4) + PUSH eax // pass object + call SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*) + addl LITERAL(16), %esp // pop arguments + CFI_ADJUST_CFA_OFFSET(-16) + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + RETURN_IF_EAX_ZERO +END_FUNCTION art_quick_unlock_object_no_inline + DEFINE_FUNCTION art_quick_is_assignable PUSH eax // alignment padding PUSH ecx // pass arg2 - obj->klass diff --git a/runtime/arch/x86_64/entrypoints_init_x86_64.cc b/runtime/arch/x86_64/entrypoints_init_x86_64.cc index 0a5d14a163..bd6df700d0 100644 --- a/runtime/arch/x86_64/entrypoints_init_x86_64.cc +++ b/runtime/arch/x86_64/entrypoints_init_x86_64.cc @@ -17,6 +17,9 @@ #include "entrypoints/jni/jni_entrypoints.h" #include "entrypoints/quick/quick_alloc_entrypoints.h" #include "entrypoints/quick/quick_default_externs.h" +#if !defined(__APPLE__) +#include "entrypoints/quick/quick_default_init_entrypoints.h" +#endif #include "entrypoints/quick/quick_entrypoints.h" #include "entrypoints/math_entrypoints.h" #include "entrypoints/runtime_asm_entrypoints.h" @@ -38,67 +41,12 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { UNUSED(jpoints, qpoints); UNIMPLEMENTED(FATAL); #else - // JNI - jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub; - - // Alloc - ResetQuickAllocEntryPoints(qpoints); + DefaultInitEntryPoints(jpoints, qpoints); // Cast qpoints->pInstanceofNonTrivial = art_quick_assignable_from_code; qpoints->pCheckCast = art_quick_check_cast; - // DexCache - qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage; - qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access; - qpoints->pInitializeType = art_quick_initialize_type; - qpoints->pResolveString = art_quick_resolve_string; - - // Field - qpoints->pSet8Instance = art_quick_set8_instance; - qpoints->pSet8Static = art_quick_set8_static; - qpoints->pSet16Instance = art_quick_set16_instance; - qpoints->pSet16Static = art_quick_set16_static; - qpoints->pSet32Instance = art_quick_set32_instance; - qpoints->pSet32Static = art_quick_set32_static; - qpoints->pSet64Instance = art_quick_set64_instance; - qpoints->pSet64Static = art_quick_set64_static; - qpoints->pSetObjInstance = art_quick_set_obj_instance; - qpoints->pSetObjStatic = art_quick_set_obj_static; - qpoints->pGetByteInstance = art_quick_get_byte_instance; - qpoints->pGetBooleanInstance = art_quick_get_boolean_instance; - qpoints->pGetShortInstance = art_quick_get_short_instance; - qpoints->pGetCharInstance = art_quick_get_char_instance; - qpoints->pGet32Instance = art_quick_get32_instance; - qpoints->pGet64Instance = art_quick_get64_instance; - qpoints->pGetObjInstance = art_quick_get_obj_instance; - qpoints->pGetByteStatic = art_quick_get_byte_static; - qpoints->pGetBooleanStatic = art_quick_get_boolean_static; - qpoints->pGetShortStatic = art_quick_get_short_static; - qpoints->pGetCharStatic = art_quick_get_char_static; - qpoints->pGet32Static = art_quick_get32_static; - qpoints->pGet64Static = art_quick_get64_static; - qpoints->pGetObjStatic = art_quick_get_obj_static; - - // Array - qpoints->pAputObjectWithNullAndBoundCheck = art_quick_aput_obj_with_null_and_bound_check; - qpoints->pAputObjectWithBoundCheck = art_quick_aput_obj_with_bound_check; - qpoints->pAputObject = art_quick_aput_obj; - qpoints->pHandleFillArrayData = art_quick_handle_fill_data; - - // JNI - qpoints->pJniMethodStart = JniMethodStart; - qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized; - qpoints->pJniMethodEnd = JniMethodEnd; - qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized; - qpoints->pJniMethodEndWithReference = JniMethodEndWithReference; - qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; - qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline; - - // Locks - qpoints->pLockObject = art_quick_lock_object; - qpoints->pUnlockObject = art_quick_unlock_object; - // More math. qpoints->pCos = cos; qpoints->pSin = sin; @@ -132,35 +80,6 @@ void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { qpoints->pStringCompareTo = art_quick_string_compareto; qpoints->pMemcpy = art_quick_memcpy; - // Invocation - qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline; - qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline; - qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge; - qpoints->pInvokeDirectTrampolineWithAccessCheck = - art_quick_invoke_direct_trampoline_with_access_check; - qpoints->pInvokeInterfaceTrampolineWithAccessCheck = - art_quick_invoke_interface_trampoline_with_access_check; - qpoints->pInvokeStaticTrampolineWithAccessCheck = - art_quick_invoke_static_trampoline_with_access_check; - qpoints->pInvokeSuperTrampolineWithAccessCheck = - art_quick_invoke_super_trampoline_with_access_check; - qpoints->pInvokeVirtualTrampolineWithAccessCheck = - art_quick_invoke_virtual_trampoline_with_access_check; - - // Thread - qpoints->pTestSuspend = art_quick_test_suspend; - - // Throws - qpoints->pDeliverException = art_quick_deliver_exception; - qpoints->pThrowArrayBounds = art_quick_throw_array_bounds; - qpoints->pThrowDivZero = art_quick_throw_div_zero; - qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method; - qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception; - qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow; - - // Deoptimize - qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code; - // Read barrier. qpoints->pReadBarrierJni = ReadBarrierJni; qpoints->pReadBarrierMark = art_quick_read_barrier_mark; diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S index 26e668e7ae..562ee2d810 100644 --- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S +++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S @@ -994,6 +994,14 @@ DEFINE_FUNCTION art_quick_lock_object RETURN_IF_EAX_ZERO END_FUNCTION art_quick_lock_object +DEFINE_FUNCTION art_quick_lock_object_no_inline + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME + movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current() + call SYMBOL(artLockObjectFromCode) // artLockObjectFromCode(object, Thread*) + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + RETURN_IF_EAX_ZERO +END_FUNCTION art_quick_lock_object_no_inline + DEFINE_FUNCTION art_quick_unlock_object testl %edi, %edi // null check object/edi jz .Lslow_unlock @@ -1037,6 +1045,14 @@ DEFINE_FUNCTION art_quick_unlock_object RETURN_IF_EAX_ZERO END_FUNCTION art_quick_unlock_object +DEFINE_FUNCTION art_quick_unlock_object_no_inline + SETUP_REFS_ONLY_CALLEE_SAVE_FRAME + movq %gs:THREAD_SELF_OFFSET, %rsi // pass Thread::Current() + call SYMBOL(artUnlockObjectFromCode) // artUnlockObjectFromCode(object, Thread*) + RESTORE_REFS_ONLY_CALLEE_SAVE_FRAME // restore frame up to return address + RETURN_IF_EAX_ZERO +END_FUNCTION art_quick_unlock_object_no_inline + DEFINE_FUNCTION art_quick_check_cast PUSH rdi // Save args for exc PUSH rsi diff --git a/runtime/art_method.cc b/runtime/art_method.cc index 34d19d151b..06156f5cf8 100644 --- a/runtime/art_method.cc +++ b/runtime/art_method.cc @@ -276,7 +276,7 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* // Ensure that we won't be accidentally calling quick compiled code when -Xint. if (kIsDebugBuild && runtime->GetInstrumentation()->IsForcedInterpretOnly()) { - CHECK(!runtime->UseJit()); + CHECK(!runtime->UseJitCompilation()); const void* oat_quick_code = runtime->GetClassLinker()->GetOatMethodQuickCodeFor(this); CHECK(oat_quick_code == nullptr || oat_quick_code != GetEntryPointFromQuickCompiledCode()) << "Don't call compiled code when -Xint " << PrettyMethod(this); @@ -481,7 +481,7 @@ void ArtMethod::CopyFrom(ArtMethod* src, size_t image_pointer_size) { // to the JIT code, but this would require taking the JIT code cache lock to notify // it, which we do not want at this level. Runtime* runtime = Runtime::Current(); - if (runtime->GetJit() != nullptr) { + if (runtime->UseJitCompilation()) { if (runtime->GetJit()->GetCodeCache()->ContainsPc(GetEntryPointFromQuickCompiledCode())) { SetEntryPointFromQuickCompiledCodePtrSize(GetQuickToInterpreterBridge(), image_pointer_size); } diff --git a/runtime/art_method.h b/runtime/art_method.h index 08f02852ee..a012a5a9ca 100644 --- a/runtime/art_method.h +++ b/runtime/art_method.h @@ -41,6 +41,7 @@ class ShadowFrame; namespace mirror { class Array; class Class; +class IfTable; class PointerArray; } // namespace mirror @@ -50,66 +51,151 @@ class PointerArray; // with the last entry being null to make an assembly implementation of a lookup // faster. class ImtConflictTable { + enum MethodIndex { + kMethodInterface, + kMethodImplementation, + kMethodCount, // Number of elements in enum. + }; + public: // Build a new table copying `other` and adding the new entry formed of // the pair { `interface_method`, `implementation_method` } ImtConflictTable(ImtConflictTable* other, ArtMethod* interface_method, - ArtMethod* implementation_method) { - size_t index = 0; - while (other->entries_[index].interface_method != nullptr) { - entries_[index] = other->entries_[index]; - index++; + ArtMethod* implementation_method, + size_t pointer_size) { + const size_t count = other->NumEntries(pointer_size); + for (size_t i = 0; i < count; ++i) { + SetInterfaceMethod(i, pointer_size, other->GetInterfaceMethod(i, pointer_size)); + SetImplementationMethod(i, pointer_size, other->GetImplementationMethod(i, pointer_size)); } - entries_[index].interface_method = interface_method; - entries_[index].implementation_method = implementation_method; + SetInterfaceMethod(count, pointer_size, interface_method); + SetImplementationMethod(count, pointer_size, implementation_method); // Add the null marker. - entries_[index + 1].interface_method = nullptr; - entries_[index + 1].implementation_method = nullptr; + SetInterfaceMethod(count + 1, pointer_size, nullptr); + SetImplementationMethod(count + 1, pointer_size, nullptr); + } + + // num_entries excludes the header. + ImtConflictTable(size_t num_entries, size_t pointer_size) { + SetInterfaceMethod(num_entries, pointer_size, nullptr); + SetImplementationMethod(num_entries, pointer_size, nullptr); + } + + // Set an entry at an index. + void SetInterfaceMethod(size_t index, size_t pointer_size, ArtMethod* method) { + SetMethod(index * kMethodCount + kMethodInterface, pointer_size, method); + } + + void SetImplementationMethod(size_t index, size_t pointer_size, ArtMethod* method) { + SetMethod(index * kMethodCount + kMethodImplementation, pointer_size, method); + } + + ArtMethod* GetInterfaceMethod(size_t index, size_t pointer_size) const { + return GetMethod(index * kMethodCount + kMethodInterface, pointer_size); + } + + ArtMethod* GetImplementationMethod(size_t index, size_t pointer_size) const { + return GetMethod(index * kMethodCount + kMethodImplementation, pointer_size); + } + + // Visit all of the entries. + // NO_THREAD_SAFETY_ANALYSIS for calling with held locks. Visitor is passed a pair of ArtMethod* + // and also returns one. The order is <interface, implementation>. + template<typename Visitor> + void Visit(const Visitor& visitor, size_t pointer_size) NO_THREAD_SAFETY_ANALYSIS { + uint32_t table_index = 0; + for (;;) { + ArtMethod* interface_method = GetInterfaceMethod(table_index, pointer_size); + if (interface_method == nullptr) { + break; + } + ArtMethod* implementation_method = GetImplementationMethod(table_index, pointer_size); + auto input = std::make_pair(interface_method, implementation_method); + std::pair<ArtMethod*, ArtMethod*> updated = visitor(input); + if (input.first != updated.first) { + SetInterfaceMethod(table_index, pointer_size, updated.first); + } + if (input.second != updated.second) { + SetImplementationMethod(table_index, pointer_size, updated.second); + } + ++table_index; + } } // Lookup the implementation ArtMethod associated to `interface_method`. Return null // if not found. - ArtMethod* Lookup(ArtMethod* interface_method) const { + ArtMethod* Lookup(ArtMethod* interface_method, size_t pointer_size) const { uint32_t table_index = 0; - ArtMethod* current_interface_method; - while ((current_interface_method = entries_[table_index].interface_method) != nullptr) { + for (;;) { + ArtMethod* current_interface_method = GetInterfaceMethod(table_index, pointer_size); + if (current_interface_method == nullptr) { + break; + } if (current_interface_method == interface_method) { - return entries_[table_index].implementation_method; + return GetImplementationMethod(table_index, pointer_size); } - table_index++; + ++table_index; } return nullptr; } - // Compute the size in bytes taken by this table. - size_t ComputeSize() const { + // Compute the number of entries in this table. + size_t NumEntries(size_t pointer_size) const { uint32_t table_index = 0; - size_t total_size = 0; - while ((entries_[table_index].interface_method) != nullptr) { - total_size += sizeof(Entry); - table_index++; + while (GetInterfaceMethod(table_index, pointer_size) != nullptr) { + ++table_index; } + return table_index; + } + + // Compute the size in bytes taken by this table. + size_t ComputeSize(size_t pointer_size) const { // Add the end marker. - return total_size + sizeof(Entry); + return ComputeSize(NumEntries(pointer_size), pointer_size); } // Compute the size in bytes needed for copying the given `table` and add // one more entry. - static size_t ComputeSizeWithOneMoreEntry(ImtConflictTable* table) { - return table->ComputeSize() + sizeof(Entry); + static size_t ComputeSizeWithOneMoreEntry(ImtConflictTable* table, size_t pointer_size) { + return table->ComputeSize(pointer_size) + EntrySize(pointer_size); } - struct Entry { - ArtMethod* interface_method; - ArtMethod* implementation_method; - }; + // Compute size with a fixed number of entries. + static size_t ComputeSize(size_t num_entries, size_t pointer_size) { + return (num_entries + 1) * EntrySize(pointer_size); // Add one for null terminator. + } + + static size_t EntrySize(size_t pointer_size) { + return pointer_size * static_cast<size_t>(kMethodCount); + } private: + ArtMethod* GetMethod(size_t index, size_t pointer_size) const { + if (pointer_size == 8) { + return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(data64_[index])); + } else { + DCHECK_EQ(pointer_size, 4u); + return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(data32_[index])); + } + } + + void SetMethod(size_t index, size_t pointer_size, ArtMethod* method) { + if (pointer_size == 8) { + data64_[index] = dchecked_integral_cast<uint64_t>(reinterpret_cast<uintptr_t>(method)); + } else { + DCHECK_EQ(pointer_size, 4u); + data32_[index] = dchecked_integral_cast<uint32_t>(reinterpret_cast<uintptr_t>(method)); + } + } + // Array of entries that the assembly stubs will iterate over. Note that this is // not fixed size, and we allocate data prior to calling the constructor // of ImtConflictTable. - Entry entries_[0]; + union { + uint32_t data32_[0]; + uint64_t data64_[0]; + }; DISALLOW_COPY_AND_ASSIGN(ImtConflictTable); }; @@ -265,6 +351,12 @@ class ArtMethod FINAL { SetAccessFlags(GetAccessFlags() | kAccSkipAccessChecks); } + // Should this method be run in the interpreter and count locks (e.g., failed structured- + // locking verification)? + bool MustCountLocks() { + return (GetAccessFlags() & kAccMustCountLocks) != 0; + } + // Returns true if this method could be overridden by a default method. bool IsOverridableByDefaultMethod() SHARED_REQUIRES(Locks::mutator_lock_); @@ -351,7 +443,6 @@ class ArtMethod FINAL { // Find the method that this method overrides. ArtMethod* FindOverriddenMethod(size_t pointer_size) - REQUIRES(Roles::uninterruptible_) SHARED_REQUIRES(Locks::mutator_lock_); // Find the method index for this method within other_dexfile. If this method isn't present then @@ -417,8 +508,8 @@ class ArtMethod FINAL { return reinterpret_cast<ImtConflictTable*>(GetEntryPointFromJniPtrSize(pointer_size)); } - ALWAYS_INLINE void SetImtConflictTable(ImtConflictTable* table) { - SetEntryPointFromJniPtrSize(table, sizeof(void*)); + ALWAYS_INLINE void SetImtConflictTable(ImtConflictTable* table, size_t pointer_size) { + SetEntryPointFromJniPtrSize(table, pointer_size); } ALWAYS_INLINE void SetProfilingInfo(ProfilingInfo* info) { diff --git a/runtime/base/logging.h b/runtime/base/logging.h index 97280c3a03..3b5b8b54a5 100644 --- a/runtime/base/logging.h +++ b/runtime/base/logging.h @@ -56,6 +56,7 @@ struct LogVerbosity { bool threads; bool verifier; bool image; + bool systrace_lock_logging; // Enabled with "-verbose:sys-locks". }; // Global log verbosity setting, initialized by InitLogging. diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index fa0107af8e..d29d33a01f 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -687,6 +687,9 @@ bool ClassLinker::InitWithoutImage(std::vector<std::unique_ptr<const DexFile>> b self->AssertNoPendingException(); } + // Create conflict tables that depend on the class linker. + runtime->FixupConflictTables(); + FinishInit(self); VLOG(startup) << "ClassLinker::InitFromCompiler exiting"; @@ -773,9 +776,13 @@ static void SanityCheckArtMethod(ArtMethod* m, bool contains = false; for (gc::space::ImageSpace* space : spaces) { auto& header = space->GetImageHeader(); - auto& methods = header.GetMethodsSection(); - auto offset = reinterpret_cast<uint8_t*>(m) - space->Begin(); - contains |= methods.Contains(offset); + size_t offset = reinterpret_cast<uint8_t*>(m) - space->Begin(); + + const ImageSection& methods = header.GetMethodsSection(); + contains = contains || methods.Contains(offset); + + const ImageSection& runtime_methods = header.GetRuntimeMethodsSection(); + contains = contains || runtime_methods.Contains(offset); } CHECK(contains) << m << " not found"; } @@ -1438,20 +1445,14 @@ bool ClassLinker::UpdateAppImageClassLoadersAndDexCaches( if (*out_forward_dex_cache_array) { ScopedTrace timing("Fixup ArtMethod dex cache arrays"); FixupArtMethodArrayVisitor visitor(header); - header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods( - &visitor, - space->Begin(), - sizeof(void*)); + header.VisitPackedArtMethods(&visitor, space->Begin(), sizeof(void*)); Runtime::Current()->GetHeap()->WriteBarrierEveryFieldOf(class_loader.Get()); } if (kVerifyArtMethodDeclaringClasses) { ScopedTrace timing("Verify declaring classes"); ReaderMutexLock rmu(self, *Locks::heap_bitmap_lock_); VerifyDeclaringClassVisitor visitor; - header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods( - &visitor, - space->Begin(), - sizeof(void*)); + header.VisitPackedArtMethods(&visitor, space->Begin(), sizeof(void*)); } return true; } @@ -1729,9 +1730,8 @@ bool ClassLinker::AddImageSpace( // Set entry point to interpreter if in InterpretOnly mode. if (!runtime->IsAotCompiler() && runtime->GetInstrumentation()->InterpretOnly()) { - const ImageSection& methods = header.GetMethodsSection(); SetInterpreterEntrypointArtMethodVisitor visitor(image_pointer_size_); - methods.VisitPackedArtMethods(&visitor, space->Begin(), image_pointer_size_); + header.VisitPackedArtMethods(&visitor, space->Begin(), image_pointer_size_); } ClassTable* class_table = nullptr; @@ -1800,10 +1800,7 @@ bool ClassLinker::AddImageSpace( // This verification needs to happen after the classes have been added to the class loader. // Since it ensures classes are in the class table. VerifyClassInTableArtMethodVisitor visitor2(class_table); - header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods( - &visitor2, - space->Begin(), - sizeof(void*)); + header.VisitPackedArtMethods(&visitor2, space->Begin(), sizeof(void*)); } VLOG(class_linker) << "Adding image space took " << PrettyDuration(NanoTime() - start_time); return true; @@ -2035,6 +2032,7 @@ void ClassLinker::DeleteClassLoader(Thread* self, const ClassLoaderData& data) { Runtime* const runtime = Runtime::Current(); JavaVMExt* const vm = runtime->GetJavaVM(); vm->DeleteWeakGlobalRef(self, data.weak_root); + // Notify the JIT that we need to remove the methods and/or profiling info. if (runtime->GetJit() != nullptr) { jit::JitCodeCache* code_cache = runtime->GetJit()->GetCodeCache(); if (code_cache != nullptr) { @@ -2752,7 +2750,7 @@ bool ClassLinker::ShouldUseInterpreterEntrypoint(ArtMethod* method, const void* } if (runtime->IsNativeDebuggable()) { - DCHECK(runtime->UseJit() && runtime->GetJit()->JitAtFirstUse()); + DCHECK(runtime->UseJitCompilation() && runtime->GetJit()->JitAtFirstUse()); // If we are doing native debugging, ignore application's AOT code, // since we want to JIT it with extra stackmaps for native debugging. // On the other hand, keep all AOT code from the boot image, since the @@ -5961,16 +5959,49 @@ ClassLinker::DefaultMethodSearchResult ClassLinker::FindDefaultMethodImplementat } } -// Sets imt_ref appropriately for LinkInterfaceMethods. -// If there is no method in the imt location of imt_ref it will store the given method there. -// Otherwise it will set the conflict method which will figure out which method to use during -// runtime. -static void SetIMTRef(ArtMethod* unimplemented_method, - ArtMethod* imt_conflict_method, - size_t image_pointer_size, - ArtMethod* current_method, - /*out*/ArtMethod** imt_ref) - SHARED_REQUIRES(Locks::mutator_lock_) { +ArtMethod* ClassLinker::AddMethodToConflictTable(mirror::Class* klass, + ArtMethod* conflict_method, + ArtMethod* interface_method, + ArtMethod* method, + bool force_new_conflict_method) { + ImtConflictTable* current_table = conflict_method->GetImtConflictTable(sizeof(void*)); + Runtime* const runtime = Runtime::Current(); + LinearAlloc* linear_alloc = GetAllocatorForClassLoader(klass->GetClassLoader()); + bool new_entry = conflict_method == runtime->GetImtConflictMethod() || force_new_conflict_method; + + // Create a new entry if the existing one is the shared conflict method. + ArtMethod* new_conflict_method = new_entry + ? runtime->CreateImtConflictMethod(linear_alloc) + : conflict_method; + + // Allocate a new table. Note that we will leak this table at the next conflict, + // but that's a tradeoff compared to making the table fixed size. + void* data = linear_alloc->Alloc( + Thread::Current(), ImtConflictTable::ComputeSizeWithOneMoreEntry(current_table, + image_pointer_size_)); + if (data == nullptr) { + LOG(ERROR) << "Failed to allocate conflict table"; + return conflict_method; + } + ImtConflictTable* new_table = new (data) ImtConflictTable(current_table, + interface_method, + method, + image_pointer_size_); + + // Do a fence to ensure threads see the data in the table before it is assigned + // to the conflict method. + // Note that there is a race in the presence of multiple threads and we may leak + // memory from the LinearAlloc, but that's a tradeoff compared to using + // atomic operations. + QuasiAtomic::ThreadFenceRelease(); + new_conflict_method->SetImtConflictTable(new_table, image_pointer_size_); + return new_conflict_method; +} + +void ClassLinker::SetIMTRef(ArtMethod* unimplemented_method, + ArtMethod* imt_conflict_method, + ArtMethod* current_method, + /*out*/ArtMethod** imt_ref) { // Place method in imt if entry is empty, place conflict otherwise. if (*imt_ref == unimplemented_method) { *imt_ref = current_method; @@ -5980,9 +6011,9 @@ static void SetIMTRef(ArtMethod* unimplemented_method, // Note that we have checked IsRuntimeMethod, as there may be multiple different // conflict methods. MethodNameAndSignatureComparator imt_comparator( - (*imt_ref)->GetInterfaceMethodIfProxy(image_pointer_size)); + (*imt_ref)->GetInterfaceMethodIfProxy(image_pointer_size_)); if (imt_comparator.HasSameNameAndSignature( - current_method->GetInterfaceMethodIfProxy(image_pointer_size))) { + current_method->GetInterfaceMethodIfProxy(image_pointer_size_))) { *imt_ref = current_method; } else { *imt_ref = imt_conflict_method; @@ -5995,6 +6026,151 @@ static void SetIMTRef(ArtMethod* unimplemented_method, } } +void ClassLinker::FillIMTAndConflictTables(mirror::Class* klass) { + DCHECK(klass->ShouldHaveEmbeddedImtAndVTable()) << PrettyClass(klass); + DCHECK(!klass->IsTemp()) << PrettyClass(klass); + ArtMethod* imt[mirror::Class::kImtSize]; + Runtime* const runtime = Runtime::Current(); + ArtMethod* const unimplemented_method = runtime->GetImtUnimplementedMethod(); + ArtMethod* const conflict_method = runtime->GetImtConflictMethod(); + std::fill_n(imt, arraysize(imt), unimplemented_method); + if (klass->GetIfTable() != nullptr) { + FillIMTFromIfTable(klass->GetIfTable(), + unimplemented_method, + conflict_method, + klass, + true, + false, + &imt[0]); + } + for (size_t i = 0; i < mirror::Class::kImtSize; ++i) { + klass->SetEmbeddedImTableEntry(i, imt[i], image_pointer_size_); + } +} + +static inline uint32_t GetIMTIndex(ArtMethod* interface_method) + SHARED_REQUIRES(Locks::mutator_lock_) { + return interface_method->GetDexMethodIndex() % mirror::Class::kImtSize; +} + +ImtConflictTable* ClassLinker::CreateImtConflictTable(size_t count, + LinearAlloc* linear_alloc, + size_t image_pointer_size) { + void* data = linear_alloc->Alloc(Thread::Current(), + ImtConflictTable::ComputeSize(count, + image_pointer_size)); + return (data != nullptr) ? new (data) ImtConflictTable(count, image_pointer_size) : nullptr; +} + +ImtConflictTable* ClassLinker::CreateImtConflictTable(size_t count, LinearAlloc* linear_alloc) { + return CreateImtConflictTable(count, linear_alloc, image_pointer_size_); +} + +void ClassLinker::FillIMTFromIfTable(mirror::IfTable* if_table, + ArtMethod* unimplemented_method, + ArtMethod* imt_conflict_method, + mirror::Class* klass, + bool create_conflict_tables, + bool ignore_copied_methods, + ArtMethod** imt) { + uint32_t conflict_counts[mirror::Class::kImtSize] = {}; + for (size_t i = 0, length = if_table->Count(); i < length; ++i) { + mirror::Class* interface = if_table->GetInterface(i); + const size_t num_virtuals = interface->NumVirtualMethods(); + const size_t method_array_count = if_table->GetMethodArrayCount(i); + // Virtual methods can be larger than the if table methods if there are default methods. + DCHECK_GE(num_virtuals, method_array_count); + if (kIsDebugBuild) { + if (klass->IsInterface()) { + DCHECK_EQ(method_array_count, 0u); + } else { + DCHECK_EQ(interface->NumDeclaredVirtualMethods(), method_array_count); + } + } + if (method_array_count == 0) { + continue; + } + auto* method_array = if_table->GetMethodArray(i); + for (size_t j = 0; j < method_array_count; ++j) { + ArtMethod* implementation_method = + method_array->GetElementPtrSize<ArtMethod*>(j, image_pointer_size_); + if (ignore_copied_methods && implementation_method->IsCopied()) { + continue; + } + DCHECK(implementation_method != nullptr); + // Miranda methods cannot be used to implement an interface method, but they are safe to put + // in the IMT since their entrypoint is the interface trampoline. If we put any copied methods + // or interface methods in the IMT here they will not create extra conflicts since we compare + // names and signatures in SetIMTRef. + ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_); + const uint32_t imt_index = GetIMTIndex(interface_method); + + // There is only any conflicts if all of the interface methods for an IMT slot don't have + // the same implementation method, keep track of this to avoid creating a conflict table in + // this case. + + // Conflict table size for each IMT slot. + ++conflict_counts[imt_index]; + + SetIMTRef(unimplemented_method, + imt_conflict_method, + implementation_method, + /*out*/&imt[imt_index]); + } + } + + if (create_conflict_tables) { + // Create the conflict tables. + LinearAlloc* linear_alloc = GetAllocatorForClassLoader(klass->GetClassLoader()); + for (size_t i = 0; i < mirror::Class::kImtSize; ++i) { + size_t conflicts = conflict_counts[i]; + if (imt[i] == imt_conflict_method) { + ImtConflictTable* new_table = CreateImtConflictTable(conflicts, linear_alloc); + if (new_table != nullptr) { + ArtMethod* new_conflict_method = + Runtime::Current()->CreateImtConflictMethod(linear_alloc); + new_conflict_method->SetImtConflictTable(new_table, image_pointer_size_); + imt[i] = new_conflict_method; + } else { + LOG(ERROR) << "Failed to allocate conflict table"; + imt[i] = imt_conflict_method; + } + } else { + DCHECK_NE(imt[i], imt_conflict_method); + } + } + + for (size_t i = 0, length = if_table->Count(); i < length; ++i) { + mirror::Class* interface = if_table->GetInterface(i); + const size_t method_array_count = if_table->GetMethodArrayCount(i); + // Virtual methods can be larger than the if table methods if there are default methods. + if (method_array_count == 0) { + continue; + } + auto* method_array = if_table->GetMethodArray(i); + for (size_t j = 0; j < method_array_count; ++j) { + ArtMethod* implementation_method = + method_array->GetElementPtrSize<ArtMethod*>(j, image_pointer_size_); + if (ignore_copied_methods && implementation_method->IsCopied()) { + continue; + } + DCHECK(implementation_method != nullptr); + ArtMethod* interface_method = interface->GetVirtualMethod(j, image_pointer_size_); + const uint32_t imt_index = GetIMTIndex(interface_method); + if (!imt[imt_index]->IsRuntimeMethod() || + imt[imt_index] == unimplemented_method || + imt[imt_index] == imt_conflict_method) { + continue; + } + ImtConflictTable* table = imt[imt_index]->GetImtConflictTable(image_pointer_size_); + const size_t num_entries = table->NumEntries(image_pointer_size_); + table->SetInterfaceMethod(num_entries, image_pointer_size_, interface_method); + table->SetImplementationMethod(num_entries, image_pointer_size_, implementation_method); + } + } + } +} + // Simple helper function that checks that no subtypes of 'val' are contained within the 'classes' // set. static bool NotSubinterfaceOfAny(const std::unordered_set<mirror::Class*>& classes, @@ -6230,48 +6406,28 @@ static void SanityCheckVTable(Handle<mirror::Class> klass, uint32_t pointer_size } } -static void FillImtFromSuperClass(Handle<mirror::Class> klass, - Handle<mirror::IfTable> iftable, - ArtMethod* unimplemented_method, - ArtMethod* imt_conflict_method, - ArtMethod** out_imt, - size_t pointer_size) SHARED_REQUIRES(Locks::mutator_lock_) { +void ClassLinker::FillImtFromSuperClass(Handle<mirror::Class> klass, + ArtMethod* unimplemented_method, + ArtMethod* imt_conflict_method, + ArtMethod** imt) { DCHECK(klass->HasSuperClass()); mirror::Class* super_class = klass->GetSuperClass(); if (super_class->ShouldHaveEmbeddedImtAndVTable()) { for (size_t i = 0; i < mirror::Class::kImtSize; ++i) { - out_imt[i] = super_class->GetEmbeddedImTableEntry(i, pointer_size); + imt[i] = super_class->GetEmbeddedImTableEntry(i, image_pointer_size_); } } else { // No imt in the super class, need to reconstruct from the iftable. mirror::IfTable* if_table = super_class->GetIfTable(); - const size_t length = super_class->GetIfTableCount(); - for (size_t i = 0; i < length; ++i) { - mirror::Class* interface = iftable->GetInterface(i); - const size_t num_virtuals = interface->NumDeclaredVirtualMethods(); - const size_t method_array_count = if_table->GetMethodArrayCount(i); - DCHECK_EQ(num_virtuals, method_array_count); - if (method_array_count == 0) { - continue; - } - auto* method_array = if_table->GetMethodArray(i); - for (size_t j = 0; j < num_virtuals; ++j) { - auto method = method_array->GetElementPtrSize<ArtMethod*>(j, pointer_size); - DCHECK(method != nullptr) << PrettyClass(super_class); - // Miranda methods cannot be used to implement an interface method and defaults should be - // skipped in case we override it. - if (method->IsDefault() || method->IsMiranda()) { - continue; - } - ArtMethod* interface_method = interface->GetVirtualMethod(j, pointer_size); - uint32_t imt_index = interface_method->GetDexMethodIndex() % mirror::Class::kImtSize; - auto** imt_ref = &out_imt[imt_index]; - if (*imt_ref == unimplemented_method) { - *imt_ref = method; - } else if (*imt_ref != imt_conflict_method) { - *imt_ref = imt_conflict_method; - } - } + if (if_table != nullptr) { + // Ignore copied methods since we will handle these in LinkInterfaceMethods. + FillIMTFromIfTable(if_table, + unimplemented_method, + imt_conflict_method, + klass.Get(), + /*create_conflict_table*/false, + /*ignore_copied_methods*/true, + /*out*/imt); } } } @@ -6314,13 +6470,10 @@ bool ClassLinker::LinkInterfaceMethods( const bool extend_super_iftable = has_superclass; if (has_superclass && fill_tables) { FillImtFromSuperClass(klass, - iftable, unimplemented_method, imt_conflict_method, - out_imt, - image_pointer_size_); + out_imt); } - // Allocate method arrays before since we don't want miss visiting miranda method roots due to // thread suspension. if (fill_tables) { @@ -6404,7 +6557,7 @@ bool ClassLinker::LinkInterfaceMethods( auto* interface_method = iftable->GetInterface(i)->GetVirtualMethod(j, image_pointer_size_); MethodNameAndSignatureComparator interface_name_comparator( interface_method->GetInterfaceMethodIfProxy(image_pointer_size_)); - uint32_t imt_index = interface_method->GetDexMethodIndex() % mirror::Class::kImtSize; + uint32_t imt_index = GetIMTIndex(interface_method); ArtMethod** imt_ptr = &out_imt[imt_index]; // For each method listed in the interface's method list, find the // matching method in our class's method list. We want to favor the @@ -6449,7 +6602,6 @@ bool ClassLinker::LinkInterfaceMethods( // Place method in imt if entry is empty, place conflict otherwise. SetIMTRef(unimplemented_method, imt_conflict_method, - image_pointer_size_, vtable_method, /*out*/imt_ptr); } @@ -6483,6 +6635,15 @@ bool ClassLinker::LinkInterfaceMethods( // The method is not overridable by a default method (i.e. it is directly implemented // in some class). Therefore move onto the next interface method. continue; + } else { + // If the super-classes method is override-able by a default method we need to keep + // track of it since though it is override-able it is not guaranteed to be 'overridden'. + // If it turns out not to be overridden and we did not keep track of it we might add it + // to the vtable twice, causing corruption in this class and possibly any subclasses. + DCHECK(vtable_impl == nullptr || vtable_impl == supers_method) + << "vtable_impl was " << PrettyMethod(vtable_impl) << " and not 'nullptr' or " + << PrettyMethod(supers_method) << " as expected. IFTable appears to be corrupt!"; + vtable_impl = supers_method; } } // If we haven't found it yet we should search through the interfaces for default methods. @@ -6581,7 +6742,6 @@ bool ClassLinker::LinkInterfaceMethods( method_array->SetElementPtrSize(j, current_method, image_pointer_size_); SetIMTRef(unimplemented_method, imt_conflict_method, - image_pointer_size_, current_method, /*out*/imt_ptr); } @@ -7835,6 +7995,7 @@ std::set<DexCacheResolvedClasses> ClassLinker::GetResolvedClasses(bool ignore_bo VLOG(class_linker) << "Collecting class profile for dex file " << location << " types=" << num_types << " class_defs=" << num_class_defs; DexCacheResolvedClasses resolved_classes(dex_file->GetLocation(), + dex_file->GetBaseLocation(), dex_file->GetLocationChecksum()); size_t num_resolved = 0; std::unordered_set<uint16_t> class_set; diff --git a/runtime/class_linker.h b/runtime/class_linker.h index 97a10367fe..ece171c9a6 100644 --- a/runtime/class_linker.h +++ b/runtime/class_linker.h @@ -53,6 +53,7 @@ namespace mirror { class StackTraceElement; } // namespace mirror +class ImtConflictTable; template<class T> class Handle; template<class T> class MutableHandle; class InternTable; @@ -610,6 +611,26 @@ class ClassLinker { const std::set<DexCacheResolvedClasses>& classes) REQUIRES(!dex_lock_); + ArtMethod* AddMethodToConflictTable(mirror::Class* klass, + ArtMethod* conflict_method, + ArtMethod* interface_method, + ArtMethod* method, + bool force_new_conflict_method) + SHARED_REQUIRES(Locks::mutator_lock_); + + // Create a conflict table with a specified capacity. + ImtConflictTable* CreateImtConflictTable(size_t count, LinearAlloc* linear_alloc); + + // Static version for when the class linker is not yet created. + static ImtConflictTable* CreateImtConflictTable(size_t count, + LinearAlloc* linear_alloc, + size_t pointer_size); + + + // Create the IMT and conflict tables for a class. + void FillIMTAndConflictTables(mirror::Class* klass) SHARED_REQUIRES(Locks::mutator_lock_); + + struct DexCacheData { // Weak root to the DexCache. Note: Do not decode this unnecessarily or else class unloading may // not work properly. @@ -1057,6 +1078,28 @@ class ClassLinker { REQUIRES(!dex_lock_) SHARED_REQUIRES(Locks::mutator_lock_); + // Sets imt_ref appropriately for LinkInterfaceMethods. + // If there is no method in the imt location of imt_ref it will store the given method there. + // Otherwise it will set the conflict method which will figure out which method to use during + // runtime. + void SetIMTRef(ArtMethod* unimplemented_method, + ArtMethod* imt_conflict_method, + ArtMethod* current_method, + /*out*/ArtMethod** imt_ref) SHARED_REQUIRES(Locks::mutator_lock_); + + void FillIMTFromIfTable(mirror::IfTable* if_table, + ArtMethod* unimplemented_method, + ArtMethod* imt_conflict_method, + mirror::Class* klass, + bool create_conflict_tables, + bool ignore_copied_methods, + ArtMethod** imt) SHARED_REQUIRES(Locks::mutator_lock_); + + void FillImtFromSuperClass(Handle<mirror::Class> klass, + ArtMethod* unimplemented_method, + ArtMethod* imt_conflict_method, + ArtMethod** imt) SHARED_REQUIRES(Locks::mutator_lock_); + std::vector<const DexFile*> boot_class_path_; std::vector<std::unique_ptr<const DexFile>> boot_dex_files_; diff --git a/runtime/dex_cache_resolved_classes.h b/runtime/dex_cache_resolved_classes.h index 80c12cb642..0febbedf03 100644 --- a/runtime/dex_cache_resolved_classes.h +++ b/runtime/dex_cache_resolved_classes.h @@ -26,8 +26,11 @@ namespace art { // Data structure for passing around which classes belonging to a dex cache / dex file are resolved. class DexCacheResolvedClasses { public: - DexCacheResolvedClasses(const std::string& dex_location, uint32_t location_checksum) + DexCacheResolvedClasses(const std::string& dex_location, + const std::string& base_location, + uint32_t location_checksum) : dex_location_(dex_location), + base_location_(base_location), location_checksum_(location_checksum) {} // Only compare the key elements, ignore the resolved classes. @@ -35,6 +38,7 @@ class DexCacheResolvedClasses { if (location_checksum_ != other.location_checksum_) { return static_cast<int>(location_checksum_ - other.location_checksum_); } + // Don't need to compare base_location_ since dex_location_ has more info. return dex_location_.compare(other.dex_location_); } @@ -47,6 +51,10 @@ class DexCacheResolvedClasses { return dex_location_; } + const std::string& GetBaseLocation() const { + return base_location_; + } + uint32_t GetLocationChecksum() const { return location_checksum_; } @@ -57,6 +65,7 @@ class DexCacheResolvedClasses { private: const std::string dex_location_; + const std::string base_location_; const uint32_t location_checksum_; // Array of resolved class def indexes. mutable std::unordered_set<uint16_t> classes_; diff --git a/runtime/entrypoints/quick/quick_default_externs.h b/runtime/entrypoints/quick/quick_default_externs.h index 4e01d80312..f3a0d2f3ef 100644 --- a/runtime/entrypoints/quick/quick_default_externs.h +++ b/runtime/entrypoints/quick/quick_default_externs.h @@ -77,6 +77,10 @@ extern "C" void art_quick_handle_fill_data(void*, void*); extern "C" void art_quick_lock_object(art::mirror::Object*); extern "C" void art_quick_unlock_object(art::mirror::Object*); +// Lock entrypoints that do not inline any behavior (e.g., thin-locks). +extern "C" void art_quick_lock_object_no_inline(art::mirror::Object*); +extern "C" void art_quick_unlock_object_no_inline(art::mirror::Object*); + // Math entrypoints. extern "C" int64_t art_quick_d2l(double); extern "C" int64_t art_quick_f2l(float); diff --git a/runtime/entrypoints/quick/quick_default_init_entrypoints.h b/runtime/entrypoints/quick/quick_default_init_entrypoints.h new file mode 100644 index 0000000000..5dafa8b599 --- /dev/null +++ b/runtime/entrypoints/quick/quick_default_init_entrypoints.h @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_DEFAULT_INIT_ENTRYPOINTS_H_ +#define ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_DEFAULT_INIT_ENTRYPOINTS_H_ + +#include "base/logging.h" +#include "entrypoints/jni/jni_entrypoints.h" +#include "entrypoints/runtime_asm_entrypoints.h" +#include "quick_alloc_entrypoints.h" +#include "quick_default_externs.h" +#include "quick_entrypoints.h" + +namespace art { + +void DefaultInitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints) { + // JNI + jpoints->pDlsymLookup = art_jni_dlsym_lookup_stub; + + // Alloc + ResetQuickAllocEntryPoints(qpoints); + + // DexCache + qpoints->pInitializeStaticStorage = art_quick_initialize_static_storage; + qpoints->pInitializeTypeAndVerifyAccess = art_quick_initialize_type_and_verify_access; + qpoints->pInitializeType = art_quick_initialize_type; + qpoints->pResolveString = art_quick_resolve_string; + + // Field + qpoints->pSet8Instance = art_quick_set8_instance; + qpoints->pSet8Static = art_quick_set8_static; + qpoints->pSet16Instance = art_quick_set16_instance; + qpoints->pSet16Static = art_quick_set16_static; + qpoints->pSet32Instance = art_quick_set32_instance; + qpoints->pSet32Static = art_quick_set32_static; + qpoints->pSet64Instance = art_quick_set64_instance; + qpoints->pSet64Static = art_quick_set64_static; + qpoints->pSetObjInstance = art_quick_set_obj_instance; + qpoints->pSetObjStatic = art_quick_set_obj_static; + qpoints->pGetByteInstance = art_quick_get_byte_instance; + qpoints->pGetBooleanInstance = art_quick_get_boolean_instance; + qpoints->pGetShortInstance = art_quick_get_short_instance; + qpoints->pGetCharInstance = art_quick_get_char_instance; + qpoints->pGet32Instance = art_quick_get32_instance; + qpoints->pGet64Instance = art_quick_get64_instance; + qpoints->pGetObjInstance = art_quick_get_obj_instance; + qpoints->pGetByteStatic = art_quick_get_byte_static; + qpoints->pGetBooleanStatic = art_quick_get_boolean_static; + qpoints->pGetShortStatic = art_quick_get_short_static; + qpoints->pGetCharStatic = art_quick_get_char_static; + qpoints->pGet32Static = art_quick_get32_static; + qpoints->pGet64Static = art_quick_get64_static; + qpoints->pGetObjStatic = art_quick_get_obj_static; + + // Array + qpoints->pAputObjectWithNullAndBoundCheck = art_quick_aput_obj_with_null_and_bound_check; + qpoints->pAputObjectWithBoundCheck = art_quick_aput_obj_with_bound_check; + qpoints->pAputObject = art_quick_aput_obj; + qpoints->pHandleFillArrayData = art_quick_handle_fill_data; + + // JNI + qpoints->pJniMethodStart = JniMethodStart; + qpoints->pJniMethodStartSynchronized = JniMethodStartSynchronized; + qpoints->pJniMethodEnd = JniMethodEnd; + qpoints->pJniMethodEndSynchronized = JniMethodEndSynchronized; + qpoints->pJniMethodEndWithReference = JniMethodEndWithReference; + qpoints->pJniMethodEndWithReferenceSynchronized = JniMethodEndWithReferenceSynchronized; + qpoints->pQuickGenericJniTrampoline = art_quick_generic_jni_trampoline; + + // Locks + if (UNLIKELY(VLOG_IS_ON(systrace_lock_logging))) { + qpoints->pLockObject = art_quick_lock_object_no_inline; + qpoints->pUnlockObject = art_quick_unlock_object_no_inline; + } else { + qpoints->pLockObject = art_quick_lock_object; + qpoints->pUnlockObject = art_quick_unlock_object; + } + + // Invocation + qpoints->pQuickImtConflictTrampoline = art_quick_imt_conflict_trampoline; + qpoints->pQuickResolutionTrampoline = art_quick_resolution_trampoline; + qpoints->pQuickToInterpreterBridge = art_quick_to_interpreter_bridge; + qpoints->pInvokeDirectTrampolineWithAccessCheck = + art_quick_invoke_direct_trampoline_with_access_check; + qpoints->pInvokeInterfaceTrampolineWithAccessCheck = + art_quick_invoke_interface_trampoline_with_access_check; + qpoints->pInvokeStaticTrampolineWithAccessCheck = + art_quick_invoke_static_trampoline_with_access_check; + qpoints->pInvokeSuperTrampolineWithAccessCheck = + art_quick_invoke_super_trampoline_with_access_check; + qpoints->pInvokeVirtualTrampolineWithAccessCheck = + art_quick_invoke_virtual_trampoline_with_access_check; + + // Thread + qpoints->pTestSuspend = art_quick_test_suspend; + + // Throws + qpoints->pDeliverException = art_quick_deliver_exception; + qpoints->pThrowArrayBounds = art_quick_throw_array_bounds; + qpoints->pThrowDivZero = art_quick_throw_div_zero; + qpoints->pThrowNoSuchMethod = art_quick_throw_no_such_method; + qpoints->pThrowNullPointer = art_quick_throw_null_pointer_exception; + qpoints->pThrowStackOverflow = art_quick_throw_stack_overflow; + + // Deoptimize + qpoints->pDeoptimize = art_quick_deoptimize_from_compiled_code; +}; + +} // namespace art + +#endif // ART_RUNTIME_ENTRYPOINTS_QUICK_QUICK_DEFAULT_INIT_ENTRYPOINTS_H_ diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index da6af724ac..e9cdbb743d 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -2174,7 +2174,8 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUT imt_index % mirror::Class::kImtSize, sizeof(void*)); if (LIKELY(conflict_method->IsRuntimeMethod())) { ImtConflictTable* current_table = conflict_method->GetImtConflictTable(sizeof(void*)); - method = current_table->Lookup(interface_method); + DCHECK(current_table != nullptr); + method = current_table->Lookup(interface_method, sizeof(void*)); } else { // It seems we aren't really a conflict method! method = cls->FindVirtualMethodForInterface(interface_method, sizeof(void*)); @@ -2225,34 +2226,13 @@ extern "C" TwoWordReturn artInvokeInterfaceTrampoline(uint32_t deadbeef ATTRIBUT ArtMethod* conflict_method = cls->GetEmbeddedImTableEntry( imt_index % mirror::Class::kImtSize, sizeof(void*)); if (conflict_method->IsRuntimeMethod()) { - ImtConflictTable* current_table = conflict_method->GetImtConflictTable(sizeof(void*)); - Runtime* runtime = Runtime::Current(); - LinearAlloc* linear_alloc = (cls->GetClassLoader() == nullptr) - ? runtime->GetLinearAlloc() - : cls->GetClassLoader()->GetAllocator(); - bool is_new_entry = (conflict_method == runtime->GetImtConflictMethod()); - - // Create a new entry if the existing one is the shared conflict method. - ArtMethod* new_conflict_method = is_new_entry - ? runtime->CreateImtConflictMethod(linear_alloc) - : conflict_method; - - // Allocate a new table. Note that we will leak this table at the next conflict, - // but that's a tradeoff compared to making the table fixed size. - void* data = linear_alloc->Alloc( - self, ImtConflictTable::ComputeSizeWithOneMoreEntry(current_table)); - CHECK(data != nullptr) << "Out of memory"; - ImtConflictTable* new_table = new (data) ImtConflictTable( - current_table, interface_method, method); - - // Do a fence to ensure threads see the data in the table before it is assigned - // to the conlict method. - // Note that there is a race in the presence of multiple threads and we may leak - // memory from the LinearAlloc, but that's a tradeoff compared to using - // atomic operations. - QuasiAtomic::ThreadFenceRelease(); - new_conflict_method->SetImtConflictTable(new_table); - if (is_new_entry) { + ArtMethod* new_conflict_method = Runtime::Current()->GetClassLinker()->AddMethodToConflictTable( + cls.Get(), + conflict_method, + interface_method, + method, + /*force_new_conflict_method*/false); + if (new_conflict_method != conflict_method) { // Update the IMT if we create a new conflict method. No fence needed here, as the // data is consistent. cls->SetEmbeddedImTableEntry(imt_index % mirror::Class::kImtSize, diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc index c2f772f876..df5aa0a75c 100644 --- a/runtime/gc/heap.cc +++ b/runtime/gc/heap.cc @@ -2687,8 +2687,8 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, concurrent_start_bytes_ = std::numeric_limits<size_t>::max(); } - if ((gc_type == collector::kGcTypeFull) && runtime->UseJit()) { - // It's time to clear all inline caches, in case some classes can be unloaded. + // It's time to clear all inline caches, in case some classes can be unloaded. + if ((gc_type == collector::kGcTypeFull) && (runtime->GetJit() != nullptr)) { runtime->GetJit()->GetCodeCache()->ClearGcRootsInInlineCaches(self); } diff --git a/runtime/gc/space/image_space.cc b/runtime/gc/space/image_space.cc index d386c74354..78c570fa99 100644 --- a/runtime/gc/space/image_space.cc +++ b/runtime/gc/space/image_space.cc @@ -880,7 +880,7 @@ class FixupObjectVisitor : public FixupVisitor { class ForwardObjectAdapter { public: - ALWAYS_INLINE ForwardObjectAdapter(const FixupVisitor* visitor) : visitor_(visitor) {} + ALWAYS_INLINE explicit ForwardObjectAdapter(const FixupVisitor* visitor) : visitor_(visitor) {} template <typename T> ALWAYS_INLINE T* operator()(T* src) const { @@ -893,7 +893,7 @@ class ForwardObjectAdapter { class ForwardCodeAdapter { public: - ALWAYS_INLINE ForwardCodeAdapter(const FixupVisitor* visitor) + ALWAYS_INLINE explicit ForwardCodeAdapter(const FixupVisitor* visitor) : visitor_(visitor) {} template <typename T> @@ -914,10 +914,26 @@ class FixupArtMethodVisitor : public FixupVisitor, public ArtMethodVisitor { pointer_size_(pointer_size) {} virtual void Visit(ArtMethod* method) NO_THREAD_SAFETY_ANALYSIS { - if (fixup_heap_objects_) { - method->UpdateObjectsForImageRelocation(ForwardObjectAdapter(this), pointer_size_); + // TODO: Separate visitor for runtime vs normal methods. + if (UNLIKELY(method->IsRuntimeMethod())) { + ImtConflictTable* table = method->GetImtConflictTable(pointer_size_); + if (table != nullptr) { + ImtConflictTable* new_table = ForwardObject(table); + if (table != new_table) { + method->SetImtConflictTable(new_table, pointer_size_); + } + } + const void* old_code = method->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size_); + const void* new_code = ForwardCode(old_code); + if (old_code != new_code) { + method->SetEntryPointFromQuickCompiledCodePtrSize(new_code, pointer_size_); + } + } else { + if (fixup_heap_objects_) { + method->UpdateObjectsForImageRelocation(ForwardObjectAdapter(this), pointer_size_); + } + method->UpdateEntrypoints<kWithoutReadBarrier>(ForwardCodeAdapter(this), pointer_size_); } - method->UpdateEntrypoints<kWithoutReadBarrier>(ForwardCodeAdapter(this), pointer_size_); } private: @@ -1018,6 +1034,7 @@ static bool RelocateInPlace(ImageHeader& image_header, const ImageSection& objects_section = image_header.GetImageSection(ImageHeader::kSectionObjects); uintptr_t objects_begin = reinterpret_cast<uintptr_t>(target_base + objects_section.Offset()); uintptr_t objects_end = reinterpret_cast<uintptr_t>(target_base + objects_section.End()); + FixupObjectAdapter fixup_adapter(boot_image, boot_oat, app_image, app_oat); if (fixup_image) { // Two pass approach, fix up all classes first, then fix up non class-objects. // The visited bitmap is used to ensure that pointer arrays are not forwarded twice. @@ -1037,7 +1054,6 @@ static bool RelocateInPlace(ImageHeader& image_header, ScopedObjectAccess soa(Thread::Current()); timing.NewTiming("Fixup objects"); bitmap->VisitMarkedRange(objects_begin, objects_end, fixup_object_visitor); - FixupObjectAdapter fixup_adapter(boot_image, boot_oat, app_image, app_oat); // Fixup image roots. CHECK(app_image.InSource(reinterpret_cast<uintptr_t>( image_header.GetImageRoots<kWithoutReadBarrier>()))); @@ -1104,19 +1120,18 @@ static bool RelocateInPlace(ImageHeader& image_header, boot_oat, app_image, app_oat); - image_header.GetImageSection(ImageHeader::kSectionArtMethods).VisitPackedArtMethods( - &method_visitor, - target_base, - pointer_size); + image_header.VisitPackedArtMethods(&method_visitor, target_base, pointer_size); } if (fixup_image) { { // Only touches objects in the app image, no need for mutator lock. TimingLogger::ScopedTiming timing("Fixup fields", &logger); FixupArtFieldVisitor field_visitor(boot_image, boot_oat, app_image, app_oat); - image_header.GetImageSection(ImageHeader::kSectionArtFields).VisitPackedArtFields( - &field_visitor, - target_base); + image_header.VisitPackedArtFields(&field_visitor, target_base); + } + { + TimingLogger::ScopedTiming timing("Fixup conflict tables", &logger); + image_header.VisitPackedImtConflictTables(fixup_adapter, target_base, pointer_size); } // In the app image case, the image methods are actually in the boot image. image_header.RelocateImageMethods(boot_image.Delta()); diff --git a/runtime/hprof/hprof.cc b/runtime/hprof/hprof.cc index 3885c605bd..9895395169 100644 --- a/runtime/hprof/hprof.cc +++ b/runtime/hprof/hprof.cc @@ -505,6 +505,7 @@ class Hprof : public SingleRootVisitor { // Walk the roots and the heap. output_->StartNewRecord(HPROF_TAG_HEAP_DUMP_SEGMENT, kHprofTime); + simple_roots_.clear(); runtime->VisitRoots(this); runtime->VisitImageRoots(this); runtime->GetHeap()->VisitObjectsPaused(VisitObjectCallback, this); @@ -884,6 +885,14 @@ class Hprof : public SingleRootVisitor { gc::EqAllocRecordTypesPtr<gc::AllocRecordStackTraceElement>> frames_; std::unordered_map<const mirror::Object*, const gc::AllocRecordStackTrace*> allocation_records_; + // Set used to keep track of what simple root records we have already + // emitted, to avoid emitting duplicate entries. The simple root records are + // those that contain no other information than the root type and the object + // id. A pair of root type and object id is packed into a uint64_t, with + // the root type in the upper 32 bits and the object id in the lower 32 + // bits. + std::unordered_set<uint64_t> simple_roots_; + friend class GcRootVisitor; DISALLOW_COPY_AND_ASSIGN(Hprof); }; @@ -962,10 +971,14 @@ void Hprof::MarkRootObject(const mirror::Object* obj, jobject jni_obj, HprofHeap case HPROF_ROOT_MONITOR_USED: case HPROF_ROOT_INTERNED_STRING: case HPROF_ROOT_DEBUGGER: - case HPROF_ROOT_VM_INTERNAL: - __ AddU1(heap_tag); - __ AddObjectId(obj); + case HPROF_ROOT_VM_INTERNAL: { + uint64_t key = (static_cast<uint64_t>(heap_tag) << 32) | PointerToLowMemUInt32(obj); + if (simple_roots_.insert(key).second) { + __ AddU1(heap_tag); + __ AddObjectId(obj); + } break; + } // ID: object ID // ID: JNI global ref ID diff --git a/runtime/image-inl.h b/runtime/image-inl.h index e3307d87b6..ea75a622c7 100644 --- a/runtime/image-inl.h +++ b/runtime/image-inl.h @@ -19,6 +19,8 @@ #include "image.h" +#include "art_method.h" + namespace art { template <ReadBarrierOption kReadBarrierOption> @@ -42,6 +44,20 @@ inline mirror::ObjectArray<mirror::Object>* ImageHeader::GetImageRoots() const { return image_roots; } +template <typename Visitor> +inline void ImageHeader::VisitPackedImtConflictTables(const Visitor& visitor, + uint8_t* base, + size_t pointer_size) const { + const ImageSection& section = GetImageSection(kSectionIMTConflictTables); + for (size_t pos = 0; pos < section.Size(); ) { + auto* table = reinterpret_cast<ImtConflictTable*>(base + section.Offset() + pos); + table->Visit([&visitor](const std::pair<ArtMethod*, ArtMethod*>& methods) { + return std::make_pair(visitor(methods.first), visitor(methods.second)); + }, pointer_size); + pos += table->ComputeSize(pointer_size); + } +} + } // namespace art #endif // ART_RUNTIME_IMAGE_INL_H_ diff --git a/runtime/image.cc b/runtime/image.cc index 1f54e3e6ae..a9552c27d3 100644 --- a/runtime/image.cc +++ b/runtime/image.cc @@ -24,7 +24,7 @@ namespace art { const uint8_t ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' }; -const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '7', '\0' }; +const uint8_t ImageHeader::kImageVersion[] = { '0', '2', '9', '\0' }; ImageHeader::ImageHeader(uint32_t image_begin, uint32_t image_size, @@ -147,9 +147,10 @@ std::ostream& operator<<(std::ostream& os, const ImageSection& section) { return os << "size=" << section.Size() << " range=" << section.Offset() << "-" << section.End(); } -void ImageSection::VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const { - for (size_t pos = 0; pos < Size(); ) { - auto* array = reinterpret_cast<LengthPrefixedArray<ArtField>*>(base + Offset() + pos); +void ImageHeader::VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const { + const ImageSection& fields = GetFieldsSection(); + for (size_t pos = 0; pos < fields.Size(); ) { + auto* array = reinterpret_cast<LengthPrefixedArray<ArtField>*>(base + fields.Offset() + pos); for (size_t i = 0; i < array->size(); ++i) { visitor->Visit(&array->At(i, sizeof(ArtField))); } @@ -157,18 +158,25 @@ void ImageSection::VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) } } -void ImageSection::VisitPackedArtMethods(ArtMethodVisitor* visitor, - uint8_t* base, - size_t pointer_size) const { +void ImageHeader::VisitPackedArtMethods(ArtMethodVisitor* visitor, + uint8_t* base, + size_t pointer_size) const { const size_t method_alignment = ArtMethod::Alignment(pointer_size); const size_t method_size = ArtMethod::Size(pointer_size); - for (size_t pos = 0; pos < Size(); ) { - auto* array = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(base + Offset() + pos); + const ImageSection& methods = GetMethodsSection(); + for (size_t pos = 0; pos < methods.Size(); ) { + auto* array = reinterpret_cast<LengthPrefixedArray<ArtMethod>*>(base + methods.Offset() + pos); for (size_t i = 0; i < array->size(); ++i) { visitor->Visit(&array->At(i, method_size, method_alignment)); } pos += array->ComputeSize(array->size(), method_size, method_alignment); } + const ImageSection& runtime_methods = GetRuntimeMethodsSection(); + for (size_t pos = 0; pos < runtime_methods.Size(); ) { + auto* method = reinterpret_cast<ArtMethod*>(base + runtime_methods.Offset() + pos); + visitor->Visit(method); + pos += method_size; + } } } // namespace art diff --git a/runtime/image.h b/runtime/image.h index 8e5dbad57d..2ea9af7728 100644 --- a/runtime/image.h +++ b/runtime/image.h @@ -64,12 +64,6 @@ class PACKED(4) ImageSection { return offset - offset_ < size_; } - // Visit ArtMethods in the section starting at base. - void VisitPackedArtMethods(ArtMethodVisitor* visitor, uint8_t* base, size_t pointer_size) const; - - // Visit ArtMethods in the section starting at base. - void VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const; - private: uint32_t offset_; uint32_t size_; @@ -200,6 +194,8 @@ class PACKED(4) ImageHeader { kSectionObjects, kSectionArtFields, kSectionArtMethods, + kSectionRuntimeMethods, + kSectionIMTConflictTables, kSectionDexCacheArrays, kSectionInternedStrings, kSectionClassTable, @@ -211,10 +207,19 @@ class PACKED(4) ImageHeader { void SetImageMethod(ImageMethod index, ArtMethod* method); const ImageSection& GetImageSection(ImageSections index) const; + const ImageSection& GetMethodsSection() const { return GetImageSection(kSectionArtMethods); } + const ImageSection& GetRuntimeMethodsSection() const { + return GetImageSection(kSectionRuntimeMethods); + } + + const ImageSection& GetFieldsSection() const { + return GetImageSection(ImageHeader::kSectionArtFields); + } + template <ReadBarrierOption kReadBarrierOption = kWithReadBarrier> mirror::Object* GetImageRoot(ImageRoot image_root) const SHARED_REQUIRES(Locks::mutator_lock_); @@ -265,6 +270,19 @@ class PACKED(4) ImageHeader { return boot_image_size_ != 0u; } + // Visit ArtMethods in the section starting at base. Includes runtime methods. + // TODO: Delete base parameter if it is always equal to GetImageBegin. + void VisitPackedArtMethods(ArtMethodVisitor* visitor, uint8_t* base, size_t pointer_size) const; + + // Visit ArtMethods in the section starting at base. + // TODO: Delete base parameter if it is always equal to GetImageBegin. + void VisitPackedArtFields(ArtFieldVisitor* visitor, uint8_t* base) const; + + template <typename Visitor> + void VisitPackedImtConflictTables(const Visitor& visitor, + uint8_t* base, + size_t pointer_size) const; + private: static const uint8_t kImageMagic[4]; static const uint8_t kImageVersion[4]; diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc index 97dbe5d219..81a396a925 100644 --- a/runtime/interpreter/interpreter.cc +++ b/runtime/interpreter/interpreter.cc @@ -303,8 +303,13 @@ static inline JValue Execute(Thread* self, const DexFile::CodeItem* code_item, shadow_frame.GetMethod()->GetDeclaringClass()->AssertInitializedOrInitializingInThread(self); + // Lock counting is a special version of accessibility checks, and for simplicity and + // reduction of template parameters, we gate it behind access-checks mode. + ArtMethod* method = shadow_frame.GetMethod(); + DCHECK(!method->SkipAccessChecks() || !method->MustCountLocks()); + bool transaction_active = Runtime::Current()->IsActiveTransaction(); - if (LIKELY(shadow_frame.GetMethod()->SkipAccessChecks())) { + if (LIKELY(method->SkipAccessChecks())) { // Enter the "without access check" interpreter. if (kInterpreterImplKind == kMterpImplKind) { if (transaction_active) { @@ -487,6 +492,10 @@ void EnterInterpreterFromDeoptimize(Thread* self, // Are we executing the first shadow frame? bool first = true; while (shadow_frame != nullptr) { + // We do not want to recover lock state for lock counting when deoptimizing. Currently, + // the compiler should not have compiled a method that failed structured-locking checks. + DCHECK(!shadow_frame->GetMethod()->MustCountLocks()); + self->SetTopOfShadowStack(shadow_frame); const DexFile::CodeItem* code_item = shadow_frame->GetMethod()->GetCodeItem(); const uint32_t dex_pc = shadow_frame->GetDexPC(); diff --git a/runtime/interpreter/interpreter_common.h b/runtime/interpreter/interpreter_common.h index e5b89e2f98..69376fd7a1 100644 --- a/runtime/interpreter/interpreter_common.h +++ b/runtime/interpreter/interpreter_common.h @@ -95,7 +95,9 @@ static inline void DoMonitorEnter(Thread* self, StackHandleScope<1> hs(self); Handle<Object> h_ref(hs.NewHandle(ref)); h_ref->MonitorEnter(self); - frame->GetLockCountData().AddMonitor<kMonitorCounting>(self, h_ref.Get()); + if (kMonitorCounting && frame->GetMethod()->MustCountLocks()) { + frame->GetLockCountData().AddMonitor(self, h_ref.Get()); + } } template <bool kMonitorCounting> @@ -107,7 +109,19 @@ static inline void DoMonitorExit(Thread* self, StackHandleScope<1> hs(self); Handle<Object> h_ref(hs.NewHandle(ref)); h_ref->MonitorExit(self); - frame->GetLockCountData().RemoveMonitorOrThrow<kMonitorCounting>(self, h_ref.Get()); + if (kMonitorCounting && frame->GetMethod()->MustCountLocks()) { + frame->GetLockCountData().RemoveMonitorOrThrow(self, h_ref.Get()); + } +} + +template <bool kMonitorCounting> +static inline bool DoMonitorCheckOnExit(Thread* self, ShadowFrame* frame) + NO_THREAD_SAFETY_ANALYSIS + REQUIRES(!Roles::uninterruptible_) { + if (kMonitorCounting && frame->GetMethod()->MustCountLocks()) { + return frame->GetLockCountData().CheckAllMonitorsReleasedOrThrow(self); + } + return true; } void AbortTransactionF(Thread* self, const char* fmt, ...) diff --git a/runtime/interpreter/interpreter_goto_table_impl.cc b/runtime/interpreter/interpreter_goto_table_impl.cc index 13cfb9877d..f03036b6a8 100644 --- a/runtime/interpreter/interpreter_goto_table_impl.cc +++ b/runtime/interpreter/interpreter_goto_table_impl.cc @@ -104,8 +104,7 @@ namespace interpreter { } HANDLE_INSTRUCTION_END(); #define HANDLE_MONITOR_CHECKS() \ - if (!shadow_frame.GetLockCountData(). \ - CheckAllMonitorsReleasedOrThrow<do_assignability_check>(self)) { \ + if (!DoMonitorCheckOnExit<do_assignability_check>(self, &shadow_frame)) { \ HANDLE_PENDING_EXCEPTION(); \ } @@ -2584,7 +2583,7 @@ JValue ExecuteGotoImpl(Thread* self, const DexFile::CodeItem* code_item, ShadowF instrumentation); if (found_dex_pc == DexFile::kDexNoIndex) { // Structured locking is to be enforced for abnormal termination, too. - shadow_frame.GetLockCountData().CheckAllMonitorsReleasedOrThrow<do_assignability_check>(self); + DoMonitorCheckOnExit<do_assignability_check>(self, &shadow_frame); return JValue(); /* Handled in caller. */ } else { int32_t displacement = static_cast<int32_t>(found_dex_pc) - static_cast<int32_t>(dex_pc); diff --git a/runtime/interpreter/interpreter_switch_impl.cc b/runtime/interpreter/interpreter_switch_impl.cc index 4323d4f425..18330babe0 100644 --- a/runtime/interpreter/interpreter_switch_impl.cc +++ b/runtime/interpreter/interpreter_switch_impl.cc @@ -34,8 +34,7 @@ namespace interpreter { instrumentation); \ if (found_dex_pc == DexFile::kDexNoIndex) { \ /* Structured locking is to be enforced for abnormal termination, too. */ \ - shadow_frame.GetLockCountData(). \ - CheckAllMonitorsReleasedOrThrow<do_assignability_check>(self); \ + DoMonitorCheckOnExit<do_assignability_check>(self, &shadow_frame); \ if (interpret_one_instruction) { \ /* Signal mterp to return to caller */ \ shadow_frame.SetDexPC(DexFile::kDexNoIndex); \ @@ -57,8 +56,7 @@ namespace interpreter { } while (false) #define HANDLE_MONITOR_CHECKS() \ - if (!shadow_frame.GetLockCountData(). \ - CheckAllMonitorsReleasedOrThrow<do_assignability_check>(self)) { \ + if (!DoMonitorCheckOnExit<do_assignability_check>(self, &shadow_frame)) { \ HANDLE_PENDING_EXCEPTION(); \ } diff --git a/runtime/interpreter/mterp/out/mterp_x86.S b/runtime/interpreter/mterp/out/mterp_x86.S index e46f9cde94..f78e1bc416 100644 --- a/runtime/interpreter/mterp/out/mterp_x86.S +++ b/runtime/interpreter/mterp/out/mterp_x86.S @@ -12985,6 +12985,7 @@ MterpCommonTakenBranch: * not-taken path. All Dalvik not-taken conditional branch offsets are 2. */ .L_check_not_taken_osr: + EXPORT_PC movl rSELF, %eax movl %eax, OUT_ARG0(%esp) leal OFF_FP_SHADOWFRAME(rFP), %ecx diff --git a/runtime/interpreter/mterp/out/mterp_x86_64.S b/runtime/interpreter/mterp/out/mterp_x86_64.S index 62dce6e777..031cec8233 100644 --- a/runtime/interpreter/mterp/out/mterp_x86_64.S +++ b/runtime/interpreter/mterp/out/mterp_x86_64.S @@ -11961,6 +11961,7 @@ MterpCommonTakenBranch: * not-taken path. All Dalvik not-taken conditional branch offsets are 2. */ .L_check_not_taken_osr: + EXPORT_PC movq rSELF, OUT_ARG0 leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1 movl $2, OUT_32_ARG2 diff --git a/runtime/interpreter/mterp/x86/footer.S b/runtime/interpreter/mterp/x86/footer.S index fa03e78fe3..e8c8ca8d79 100644 --- a/runtime/interpreter/mterp/x86/footer.S +++ b/runtime/interpreter/mterp/x86/footer.S @@ -234,6 +234,7 @@ MterpCommonTakenBranch: * not-taken path. All Dalvik not-taken conditional branch offsets are 2. */ .L_check_not_taken_osr: + EXPORT_PC movl rSELF, %eax movl %eax, OUT_ARG0(%esp) leal OFF_FP_SHADOWFRAME(rFP), %ecx diff --git a/runtime/interpreter/mterp/x86_64/footer.S b/runtime/interpreter/mterp/x86_64/footer.S index 54d0cb1ce4..f78f163576 100644 --- a/runtime/interpreter/mterp/x86_64/footer.S +++ b/runtime/interpreter/mterp/x86_64/footer.S @@ -213,6 +213,7 @@ MterpCommonTakenBranch: * not-taken path. All Dalvik not-taken conditional branch offsets are 2. */ .L_check_not_taken_osr: + EXPORT_PC movq rSELF, OUT_ARG0 leaq OFF_FP_SHADOWFRAME(rFP), OUT_ARG1 movl $$2, OUT_32_ARG2 diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc index a41fd45041..79c320309c 100644 --- a/runtime/java_vm_ext.cc +++ b/runtime/java_vm_ext.cc @@ -318,6 +318,7 @@ class JII { } JavaVMExt* raw_vm = reinterpret_cast<JavaVMExt*>(vm); delete raw_vm->GetRuntime(); + android::ResetNativeLoader(); return JNI_OK; } @@ -950,6 +951,11 @@ extern "C" jint JNI_CreateJavaVM(JavaVM** p_vm, JNIEnv** p_env, void* vm_args) { LOG(WARNING) << "CreateJavaVM failed"; return JNI_ERR; } + + // Initialize native loader. This step makes sure we have + // everything set up before we start using JNI. + android::InitializeNativeLoader(); + *p_env = Thread::Current()->GetJniEnv(); *p_vm = runtime->GetJavaVM(); return JNI_OK; diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc index c36543f1f3..e9317a5435 100644 --- a/runtime/jit/jit.cc +++ b/runtime/jit/jit.cc @@ -51,7 +51,7 @@ bool Jit::generate_debug_info_ = false; JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& options) { auto* jit_options = new JitOptions; - jit_options->use_jit_ = options.GetOrDefault(RuntimeArgumentMap::UseJIT); + jit_options->use_jit_compilation_ = options.GetOrDefault(RuntimeArgumentMap::UseJitCompilation); jit_options->code_cache_initial_capacity_ = options.GetOrDefault(RuntimeArgumentMap::JITCodeCacheInitialCapacity); @@ -102,14 +102,26 @@ JitOptions* JitOptions::CreateFromRuntimeArguments(const RuntimeArgumentMap& opt static_cast<size_t>(1)); } + if (options.Exists(RuntimeArgumentMap::JITInvokeTransitionWeight)) { + if (jit_options->invoke_transition_weight_ > jit_options->warmup_threshold_) { + LOG(FATAL) << "Invoke transition weight is above the warmup threshold."; + } else if (jit_options->invoke_transition_weight_ == 0) { + LOG(FATAL) << "Invoke transition ratio cannot be 0."; + } + jit_options->invoke_transition_weight_ = + *options.Get(RuntimeArgumentMap::JITInvokeTransitionWeight); + } else { + jit_options->invoke_transition_weight_ = std::max( + jit_options->warmup_threshold_ / Jit::kDefaultInvokeTransitionWeightRatio, + static_cast<size_t>(1));; + } + return jit_options; } bool Jit::ShouldUsePriorityThreadWeight() { - // TODO(calin): verify that IsSensitiveThread covers only the cases we are interested on. - // In particular if apps can set StrictMode policies for any of their threads, case in which - // we need to find another way to track sensitive threads. - return Runtime::Current()->InJankPerceptibleProcessState() && Thread::IsSensitiveThread(); + return Runtime::Current()->InJankPerceptibleProcessState() + && Thread::Current()->IsJitSensitiveThread(); } void Jit::DumpInfo(std::ostream& os) { @@ -132,9 +144,11 @@ Jit::Jit() : dump_info_on_shutdown_(false), cumulative_timings_("JIT timings"), memory_use_("Memory used for compilation", 16), lock_("JIT memory use lock"), + use_jit_compilation_(true), save_profiling_info_(false) {} Jit* Jit::Create(JitOptions* options, std::string* error_msg) { + DCHECK(options->UseJitCompilation() || options->GetSaveProfilingInfo()); std::unique_ptr<Jit> jit(new Jit); jit->dump_info_on_shutdown_ = options->DumpJitInfoOnShutdown(); if (jit_compiler_handle_ == nullptr && !LoadCompiler(error_msg)) { @@ -148,6 +162,7 @@ Jit* Jit::Create(JitOptions* options, std::string* error_msg) { if (jit->GetCodeCache() == nullptr) { return nullptr; } + jit->use_jit_compilation_ = options->UseJitCompilation(); jit->save_profiling_info_ = options->GetSaveProfilingInfo(); VLOG(jit) << "JIT created with initial_capacity=" << PrettySize(options->GetCodeCacheInitialCapacity()) @@ -160,8 +175,7 @@ Jit* Jit::Create(JitOptions* options, std::string* error_msg) { jit->warm_method_threshold_ = options->GetWarmupThreshold(); jit->osr_method_threshold_ = options->GetOsrThreshold(); jit->priority_thread_weight_ = options->GetPriorityThreadWeight(); - jit->transition_weight_ = std::max( - jit->warm_method_threshold_ / kDefaultTransitionRatio, static_cast<size_t>(1)); + jit->invoke_transition_weight_ = options->GetInvokeTransitionWeight(); jit->CreateThreadPool(); @@ -227,6 +241,7 @@ bool Jit::LoadCompiler(std::string* error_msg) { } bool Jit::CompileMethod(ArtMethod* method, Thread* self, bool osr) { + DCHECK(Runtime::Current()->UseJitCompilation()); DCHECK(!method->IsRuntimeMethod()); // Don't compile the method if it has breakpoints. @@ -331,8 +346,12 @@ Jit::~Jit() { } void Jit::NewTypeLoadedIfUsingJit(mirror::Class* type) { + if (!Runtime::Current()->UseJitCompilation()) { + // No need to notify if we only use the JIT to save profiles. + return; + } jit::Jit* jit = Runtime::Current()->GetJit(); - if (jit != nullptr && jit->generate_debug_info_) { + if (jit->generate_debug_info_) { DCHECK(jit->jit_types_loaded_ != nullptr); jit->jit_types_loaded_(jit->jit_compiler_handle_, &type, 1); } @@ -606,22 +625,24 @@ void Jit::AddSamples(Thread* self, ArtMethod* method, uint16_t count, bool with_ } // Avoid jumping more than one state at a time. new_count = std::min(new_count, hot_method_threshold_ - 1); - } else if (starting_count < hot_method_threshold_) { - if ((new_count >= hot_method_threshold_) && - !code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) { - DCHECK(thread_pool_ != nullptr); - thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompile)); - } - // Avoid jumping more than one state at a time. - new_count = std::min(new_count, osr_method_threshold_ - 1); - } else if (starting_count < osr_method_threshold_) { - if (!with_backedges) { - // If the samples don't contain any back edge, we don't increment the hotness. - return; - } - if ((new_count >= osr_method_threshold_) && !code_cache_->IsOsrCompiled(method)) { - DCHECK(thread_pool_ != nullptr); - thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompileOsr)); + } else if (use_jit_compilation_) { + if (starting_count < hot_method_threshold_) { + if ((new_count >= hot_method_threshold_) && + !code_cache_->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) { + DCHECK(thread_pool_ != nullptr); + thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompile)); + } + // Avoid jumping more than one state at a time. + new_count = std::min(new_count, osr_method_threshold_ - 1); + } else if (starting_count < osr_method_threshold_) { + if (!with_backedges) { + // If the samples don't contain any back edge, we don't increment the hotness. + return; + } + if ((new_count >= osr_method_threshold_) && !code_cache_->IsOsrCompiled(method)) { + DCHECK(thread_pool_ != nullptr); + thread_pool_->AddTask(self, new JitCompileTask(method, JitCompileTask::kCompileOsr)); + } } } // Update hotness counter @@ -629,7 +650,8 @@ void Jit::AddSamples(Thread* self, ArtMethod* method, uint16_t count, bool with_ } void Jit::MethodEntered(Thread* thread, ArtMethod* method) { - if (UNLIKELY(Runtime::Current()->GetJit()->JitAtFirstUse())) { + Runtime* runtime = Runtime::Current(); + if (UNLIKELY(runtime->UseJitCompilation() && runtime->GetJit()->JitAtFirstUse())) { // The compiler requires a ProfilingInfo object. ProfilingInfo::Create(thread, method, /* retry_allocation */ true); JitCompileTask compile_task(method, JitCompileTask::kCompile); diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h index 8198c18ebb..f3a6240e80 100644 --- a/runtime/jit/jit.h +++ b/runtime/jit/jit.h @@ -44,7 +44,7 @@ class Jit { static constexpr bool kStressMode = kIsDebugBuild; static constexpr size_t kDefaultCompileThreshold = kStressMode ? 2 : 10000; static constexpr size_t kDefaultPriorityThreadWeightRatio = 1000; - static constexpr size_t kDefaultTransitionRatio = 100; + static constexpr size_t kDefaultInvokeTransitionWeightRatio = 500; virtual ~Jit(); static Jit* Create(JitOptions* options, std::string* error_msg); @@ -87,6 +87,15 @@ class Jit { return priority_thread_weight_; } + // Returns false if we only need to save profile information and not compile methods. + bool UseJitCompilation() const { + return use_jit_compilation_; + } + + bool SaveProfilingInfo() const { + return save_profiling_info_; + } + // Wait until there is no more pending compilation tasks. void WaitForCompilationToFinish(Thread* self); @@ -106,12 +115,12 @@ class Jit { void NotifyInterpreterToCompiledCodeTransition(Thread* self, ArtMethod* caller) SHARED_REQUIRES(Locks::mutator_lock_) { - AddSamples(self, caller, transition_weight_, false); + AddSamples(self, caller, invoke_transition_weight_, false); } void NotifyCompiledCodeToInterpreterTransition(Thread* self, ArtMethod* callee) SHARED_REQUIRES(Locks::mutator_lock_) { - AddSamples(self, callee, transition_weight_, false); + AddSamples(self, callee, invoke_transition_weight_, false); } // Starts the profile saver if the config options allow profile recording. @@ -179,13 +188,14 @@ class Jit { std::unique_ptr<jit::JitCodeCache> code_cache_; + bool use_jit_compilation_; bool save_profiling_info_; static bool generate_debug_info_; uint16_t hot_method_threshold_; uint16_t warm_method_threshold_; uint16_t osr_method_threshold_; uint16_t priority_thread_weight_; - uint16_t transition_weight_; + uint16_t invoke_transition_weight_; std::unique_ptr<ThreadPool> thread_pool_; DISALLOW_COPY_AND_ASSIGN(Jit); @@ -206,6 +216,9 @@ class JitOptions { uint16_t GetPriorityThreadWeight() const { return priority_thread_weight_; } + size_t GetInvokeTransitionWeight() const { + return invoke_transition_weight_; + } size_t GetCodeCacheInitialCapacity() const { return code_cache_initial_capacity_; } @@ -218,33 +231,34 @@ class JitOptions { bool GetSaveProfilingInfo() const { return save_profiling_info_; } - bool UseJIT() const { - return use_jit_; + bool UseJitCompilation() const { + return use_jit_compilation_; } - void SetUseJIT(bool b) { - use_jit_ = b; + void SetUseJitCompilation(bool b) { + use_jit_compilation_ = b; } void SetSaveProfilingInfo(bool b) { save_profiling_info_ = b; } void SetJitAtFirstUse() { - use_jit_ = true; + use_jit_compilation_ = true; compile_threshold_ = 0; } private: - bool use_jit_; + bool use_jit_compilation_; size_t code_cache_initial_capacity_; size_t code_cache_max_capacity_; size_t compile_threshold_; size_t warmup_threshold_; size_t osr_threshold_; uint16_t priority_thread_weight_; + size_t invoke_transition_weight_; bool dump_info_on_shutdown_; bool save_profiling_info_; JitOptions() - : use_jit_(false), + : use_jit_compilation_(false), code_cache_initial_capacity_(0), code_cache_max_capacity_(0), compile_threshold_(0), diff --git a/runtime/jit/offline_profiling_info.cc b/runtime/jit/offline_profiling_info.cc index a79bcf05ae..c99d3636a1 100644 --- a/runtime/jit/offline_profiling_info.cc +++ b/runtime/jit/offline_profiling_info.cc @@ -406,7 +406,8 @@ ProfileCompilationInfo::ProfileLoadSatus ProfileCompilationInfo::ReadProfileLine line_header->checksum = header_buffer.ReadUintAndAdvance<uint32_t>(); if (dex_location_size == 0 || dex_location_size > kMaxDexFileKeyLength) { - *error = "DexFileKey has an invalid size: " + std::to_string(dex_location_size); + *error = "DexFileKey has an invalid size: " + + std::to_string(static_cast<uint32_t>(dex_location_size)); return kProfileLoadBadData; } @@ -644,7 +645,8 @@ std::set<DexCacheResolvedClasses> ProfileCompilationInfo::GetResolvedClasses() c for (auto&& pair : info_) { const std::string& profile_key = pair.first; const DexFileData& data = pair.second; - DexCacheResolvedClasses classes(profile_key, data.checksum); + // TODO: Is it OK to use the same location for both base and dex location here? + DexCacheResolvedClasses classes(profile_key, profile_key, data.checksum); classes.AddClasses(data.class_set.begin(), data.class_set.end()); ret.insert(classes); } diff --git a/runtime/jit/profile_saver.cc b/runtime/jit/profile_saver.cc index 7a9d2506dc..e8462a1188 100644 --- a/runtime/jit/profile_saver.cc +++ b/runtime/jit/profile_saver.cc @@ -145,20 +145,25 @@ ProfileCompilationInfo* ProfileSaver::GetCachedProfiledInfo(const std::string& f void ProfileSaver::FetchAndCacheResolvedClasses() { ScopedTrace trace(__PRETTY_FUNCTION__); - ClassLinker* const class_linker = Runtime::Current()->GetClassLinker(); std::set<DexCacheResolvedClasses> resolved_classes = class_linker->GetResolvedClasses(/*ignore boot classes*/ true); MutexLock mu(Thread::Current(), *Locks::profiler_lock_); uint64_t total_number_of_profile_entries_cached = 0; + for (const auto& it : tracked_dex_base_locations_) { - std::set<DexCacheResolvedClasses> resolved_classes_for_location; + std::set<DexCacheResolvedClasses> resolved_classes_for_location; const std::string& filename = it.first; const std::set<std::string>& locations = it.second; for (const DexCacheResolvedClasses& classes : resolved_classes) { - if (locations.find(classes.GetDexLocation()) != locations.end()) { + if (locations.find(classes.GetBaseLocation()) != locations.end()) { + VLOG(profiler) << "Added classes for location " << classes.GetBaseLocation() + << " (" << classes.GetDexLocation() << ")"; resolved_classes_for_location.insert(classes); + } else { + VLOG(profiler) << "Location not found " << classes.GetBaseLocation() + << " (" << classes.GetDexLocation() << ")"; } } ProfileCompilationInfo* info = GetCachedProfiledInfo(filename); @@ -247,12 +252,17 @@ bool ProfileSaver::ProcessProfilingInfo() { void* ProfileSaver::RunProfileSaverThread(void* arg) { Runtime* runtime = Runtime::Current(); - ProfileSaver* profile_saver = reinterpret_cast<ProfileSaver*>(arg); - CHECK(runtime->AttachCurrentThread("Profile Saver", - /*as_daemon*/true, - runtime->GetSystemThreadGroup(), - /*create_peer*/true)); + bool attached = runtime->AttachCurrentThread("Profile Saver", + /*as_daemon*/true, + runtime->GetSystemThreadGroup(), + /*create_peer*/true); + if (!attached) { + CHECK(runtime->IsShuttingDown(Thread::Current())); + return nullptr; + } + + ProfileSaver* profile_saver = reinterpret_cast<ProfileSaver*>(arg); profile_saver->Run(); runtime->DetachCurrentThread(); @@ -285,7 +295,7 @@ void ProfileSaver::Start(const std::string& output_filename, const std::vector<std::string>& code_paths, const std::string& foreign_dex_profile_path, const std::string& app_data_dir) { - DCHECK(Runtime::Current()->UseJit()); + DCHECK(Runtime::Current()->SaveProfileInfo()); DCHECK(!output_filename.empty()); DCHECK(jit_code_cache != nullptr); @@ -520,4 +530,32 @@ void ProfileSaver::DumpInfo(std::ostream& os) { << max_number_of_profile_entries_cached_ << '\n'; } + +void ProfileSaver::ForceProcessProfiles() { + ProfileSaver* saver = nullptr; + { + MutexLock mu(Thread::Current(), *Locks::profiler_lock_); + saver = instance_; + } + // TODO(calin): this is not actually thread safe as the instance_ may have been deleted, + // but we only use this in testing when we now this won't happen. + // Refactor the way we handle the instance so that we don't end up in this situation. + if (saver != nullptr) { + saver->ProcessProfilingInfo(); + } +} + +bool ProfileSaver::HasSeenMethod(const std::string& profile, + const DexFile* dex_file, + uint16_t method_idx) { + MutexLock mu(Thread::Current(), *Locks::profiler_lock_); + if (instance_ != nullptr) { + ProfileCompilationInfo* info = instance_->GetCachedProfiledInfo(profile); + if (info != nullptr) { + return info->ContainsMethod(MethodReference(dex_file, method_idx)); + } + } + return false; +} + } // namespace art diff --git a/runtime/jit/profile_saver.h b/runtime/jit/profile_saver.h index 0a222bfdcd..4f3cdc28cb 100644 --- a/runtime/jit/profile_saver.h +++ b/runtime/jit/profile_saver.h @@ -49,6 +49,12 @@ class ProfileSaver { // If the profile saver is running, dumps statistics to the `os`. Otherwise it does nothing. static void DumpInstanceInfo(std::ostream& os); + // Just for testing purpose. + static void ForceProcessProfiles(); + static bool HasSeenMethod(const std::string& profile, + const DexFile* dex_file, + uint16_t method_idx); + private: ProfileSaver(const std::string& output_filename, jit::JitCodeCache* jit_code_cache, @@ -65,7 +71,10 @@ class ProfileSaver { void Run() REQUIRES(!Locks::profiler_lock_, !wait_lock_); // Processes the existing profiling info from the jit code cache and returns // true if it needed to be saved to disk. - bool ProcessProfilingInfo(); + bool ProcessProfilingInfo() + REQUIRES(!Locks::profiler_lock_) + REQUIRES(!Locks::mutator_lock_); + // Returns true if the saver is shutting down (ProfileSaver::Stop() has been called). bool ShuttingDown(Thread* self) REQUIRES(!Locks::profiler_lock_); diff --git a/runtime/modifiers.h b/runtime/modifiers.h index 6dd182a11b..fd7a125bc3 100644 --- a/runtime/modifiers.h +++ b/runtime/modifiers.h @@ -60,10 +60,13 @@ static constexpr uint32_t kAccDefault = 0x00400000; // method (ru // This is set by the class linker during LinkInterfaceMethods. Prior to that point we do not know // if any particular method needs to be a default conflict. Used to figure out at runtime if // invoking this method will throw an exception. -static constexpr uint32_t kAccDefaultConflict = 0x00800000; // method (runtime) +static constexpr uint32_t kAccDefaultConflict = 0x00800000; // method (runtime) // Set by the verifier for a method we do not want the compiler to compile. -static constexpr uint32_t kAccCompileDontBother = 0x01000000; // method (runtime) +static constexpr uint32_t kAccCompileDontBother = 0x01000000; // method (runtime) + +// Set by the verifier for a method that could not be verified to follow structured locking. +static constexpr uint32_t kAccMustCountLocks = 0x02000000; // method (runtime) // Special runtime-only flags. // Interface and all its super-interfaces with default methods have been recursively initialized. diff --git a/runtime/monitor.cc b/runtime/monitor.cc index 3680c78311..f4bc222d1a 100644 --- a/runtime/monitor.cc +++ b/runtime/monitor.cc @@ -215,6 +215,85 @@ void Monitor::SetObject(mirror::Object* object) { obj_ = GcRoot<mirror::Object>(object); } +// Note: Adapted from CurrentMethodVisitor in thread.cc. We must not resolve here. + +struct NthCallerWithDexPcVisitor FINAL : public StackVisitor { + explicit NthCallerWithDexPcVisitor(Thread* thread, size_t frame) + SHARED_REQUIRES(Locks::mutator_lock_) + : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFramesNoResolve), + method_(nullptr), + dex_pc_(0), + current_frame_number_(0), + wanted_frame_number_(frame) {} + bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { + ArtMethod* m = GetMethod(); + if (m == nullptr || m->IsRuntimeMethod()) { + // Runtime method, upcall, or resolution issue. Skip. + return true; + } + + // Is this the requested frame? + if (current_frame_number_ == wanted_frame_number_) { + method_ = m; + dex_pc_ = GetDexPc(false /* abort_on_error*/); + return false; + } + + // Look for more. + current_frame_number_++; + return true; + } + + ArtMethod* method_; + uint32_t dex_pc_; + + private: + size_t current_frame_number_; + const size_t wanted_frame_number_; +}; + +// This function is inlined and just helps to not have the VLOG and ATRACE check at all the +// potential tracing points. +void Monitor::AtraceMonitorLock(Thread* self, mirror::Object* obj, bool is_wait) { + if (UNLIKELY(VLOG_IS_ON(systrace_lock_logging) && ATRACE_ENABLED())) { + AtraceMonitorLockImpl(self, obj, is_wait); + } +} + +void Monitor::AtraceMonitorLockImpl(Thread* self, mirror::Object* obj, bool is_wait) { + // Wait() requires a deeper call stack to be useful. Otherwise you'll see "Waiting at + // Object.java". Assume that we'll wait a nontrivial amount, so it's OK to do a longer + // stack walk than if !is_wait. + NthCallerWithDexPcVisitor visitor(self, is_wait ? 1U : 0U); + visitor.WalkStack(false); + const char* prefix = is_wait ? "Waiting on " : "Locking "; + + const char* filename; + int32_t line_number; + TranslateLocation(visitor.method_, visitor.dex_pc_, &filename, &line_number); + + // It would be nice to have a stable "ID" for the object here. However, the only stable thing + // would be the identity hashcode. But we cannot use IdentityHashcode here: For one, there are + // times when it is unsafe to make that call (see stack dumping for an explanation). More + // importantly, we would have to give up on thin-locking when adding systrace locks, as the + // identity hashcode is stored in the lockword normally (so can't be used with thin-locks). + // + // Because of thin-locks we also cannot use the monitor id (as there is no monitor). Monitor ids + // also do not have to be stable, as the monitor may be deflated. + std::string tmp = StringPrintf("%s %d at %s:%d", + prefix, + (obj == nullptr ? -1 : static_cast<int32_t>(reinterpret_cast<uintptr_t>(obj))), + (filename != nullptr ? filename : "null"), + line_number); + ATRACE_BEGIN(tmp.c_str()); +} + +void Monitor::AtraceMonitorUnlock() { + if (UNLIKELY(VLOG_IS_ON(systrace_lock_logging))) { + ATRACE_END(); + } +} + std::string Monitor::PrettyContentionInfo(const std::string& owner_name, pid_t owner_tid, ArtMethod* owners_method, @@ -228,8 +307,8 @@ std::string Monitor::PrettyContentionInfo(const std::string& owner_name, std::ostringstream oss; oss << "monitor contention with owner " << owner_name << " (" << owner_tid << ")"; if (owners_method != nullptr) { - oss << " owner method=" << PrettyMethod(owners_method); - oss << " from " << owners_filename << ":" << owners_line_number; + oss << " at " << PrettyMethod(owners_method); + oss << "(" << owners_filename << ":" << owners_line_number << ")"; } oss << " waiters=" << num_waiters; return oss.str(); @@ -246,10 +325,10 @@ void Monitor::Lock(Thread* self) { if (lock_profiling_threshold_ != 0) { locking_method_ = self->GetCurrentMethod(&locking_dex_pc_); } - return; + break; } else if (owner_ == self) { // Recursive. lock_count_++; - return; + break; } // Contended. const bool log_contention = (lock_profiling_threshold_ != 0); @@ -284,8 +363,9 @@ void Monitor::Lock(Thread* self) { const char* filename; int32_t line_number; TranslateLocation(m, pc, &filename, &line_number); - oss << " blocking from " << (filename != nullptr ? filename : "null") - << ":" << line_number; + oss << " blocking from " + << PrettyMethod(m) << "(" << (filename != nullptr ? filename : "null") << ":" + << line_number << ")"; ATRACE_BEGIN(oss.str().c_str()); } monitor_contenders_.Wait(self); // Still contended so wait. @@ -318,6 +398,8 @@ void Monitor::Lock(Thread* self) { } if (sample_percent != 0 && (static_cast<uint32_t>(rand() % 100) < sample_percent)) { if (wait_ms > kLongWaitMs && owners_method != nullptr) { + uint32_t pc; + ArtMethod* m = self->GetCurrentMethod(&pc); // TODO: We should maybe check that original_owner is still a live thread. LOG(WARNING) << "Long " << PrettyContentionInfo(original_owner_name, @@ -325,7 +407,7 @@ void Monitor::Lock(Thread* self) { owners_method, owners_dex_pc, num_waiters) - << " for " << PrettyDuration(MsToNs(wait_ms)); + << " in " << PrettyMethod(m) << " for " << PrettyDuration(MsToNs(wait_ms)); } const char* owners_filename; int32_t owners_line_number; @@ -348,6 +430,8 @@ void Monitor::Lock(Thread* self) { monitor_lock_.Lock(self); // Reacquire locks in order. --num_waiters_; } + + AtraceMonitorLock(self, GetObject(), false /* is_wait */); } static void ThrowIllegalMonitorStateExceptionF(const char* fmt, ...) @@ -457,6 +541,7 @@ bool Monitor::Unlock(Thread* self) { } if (owner == self) { // We own the monitor, so nobody else can be in here. + AtraceMonitorUnlock(); if (lock_count_ == 0) { owner_ = nullptr; locking_method_ = nullptr; @@ -523,6 +608,11 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, uintptr_t saved_dex_pc = locking_dex_pc_; locking_dex_pc_ = 0; + AtraceMonitorUnlock(); // For the implict Unlock() just above. This will only end the deepest + // nesting, but that is enough for the visualization, and corresponds to + // the single Lock() we do afterwards. + AtraceMonitorLock(self, GetObject(), true /* is_wait */); + bool was_interrupted = false; { // Update thread state. If the GC wakes up, it'll ignore us, knowing @@ -586,6 +676,8 @@ void Monitor::Wait(Thread* self, int64_t ms, int32_t ns, self->ThrowNewException("Ljava/lang/InterruptedException;", nullptr); } + AtraceMonitorUnlock(); // End Wait(). + // Re-acquire the monitor and lock. Lock(self); monitor_lock_.Lock(self); @@ -775,6 +867,7 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) { case LockWord::kUnlocked: { LockWord thin_locked(LockWord::FromThinLockId(thread_id, 0, lock_word.ReadBarrierState())); if (h_obj->CasLockWordWeakSequentiallyConsistent(lock_word, thin_locked)) { + AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */); // CasLockWord enforces more than the acquire ordering we need here. return h_obj.Get(); // Success! } @@ -790,10 +883,12 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) { lock_word.ReadBarrierState())); if (!kUseReadBarrier) { h_obj->SetLockWord(thin_locked, true); + AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */); return h_obj.Get(); // Success! } else { // Use CAS to preserve the read barrier state. if (h_obj->CasLockWordWeakSequentiallyConsistent(lock_word, thin_locked)) { + AtraceMonitorLock(self, h_obj.Get(), false /* is_wait */); return h_obj.Get(); // Success! } } @@ -830,7 +925,7 @@ mirror::Object* Monitor::MonitorEnter(Thread* self, mirror::Object* obj) { continue; // Start from the beginning. default: { LOG(FATAL) << "Invalid monitor state " << lock_word.GetState(); - return h_obj.Get(); + UNREACHABLE(); } } } @@ -869,11 +964,17 @@ bool Monitor::MonitorExit(Thread* self, mirror::Object* obj) { if (!kUseReadBarrier) { DCHECK_EQ(new_lw.ReadBarrierState(), 0U); h_obj->SetLockWord(new_lw, true); + if (ATRACE_ENABLED()) { + ATRACE_END(); + } // Success! return true; } else { // Use CAS to preserve the read barrier state. if (h_obj->CasLockWordWeakSequentiallyConsistent(lock_word, new_lw)) { + if (ATRACE_ENABLED()) { + ATRACE_END(); + } // Success! return true; } diff --git a/runtime/monitor.h b/runtime/monitor.h index 8c7496b524..7b4b8f9467 100644 --- a/runtime/monitor.h +++ b/runtime/monitor.h @@ -250,6 +250,17 @@ class Monitor { uint32_t GetOwnerThreadId() REQUIRES(!monitor_lock_); + // Support for systrace output of monitor operations. + ALWAYS_INLINE static void AtraceMonitorLock(Thread* self, + mirror::Object* obj, + bool is_wait) + SHARED_REQUIRES(Locks::mutator_lock_); + static void AtraceMonitorLockImpl(Thread* self, + mirror::Object* obj, + bool is_wait) + SHARED_REQUIRES(Locks::mutator_lock_); + ALWAYS_INLINE static void AtraceMonitorUnlock(); + static uint32_t lock_profiling_threshold_; Mutex monitor_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; diff --git a/runtime/monitor_pool.cc b/runtime/monitor_pool.cc index ce38e4f108..a47a4b2cf2 100644 --- a/runtime/monitor_pool.cc +++ b/runtime/monitor_pool.cc @@ -28,7 +28,11 @@ namespace mirror { } // namespace mirror MonitorPool::MonitorPool() - : num_chunks_(0), capacity_(0), first_free_(nullptr) { + : current_chunk_list_index_(0), num_chunks_(0), current_chunk_list_capacity_(0), + first_free_(nullptr) { + for (size_t i = 0; i < kMaxChunkLists; ++i) { + monitor_chunks_[i] = nullptr; // Not absolutely required, but ... + } AllocateChunk(); // Get our first chunk. } @@ -37,24 +41,19 @@ MonitorPool::MonitorPool() void MonitorPool::AllocateChunk() { DCHECK(first_free_ == nullptr); - // Do we need to resize? - if (num_chunks_ == capacity_) { - if (capacity_ == 0U) { - // Initialization. - capacity_ = kInitialChunkStorage; - uintptr_t* new_backing = new uintptr_t[capacity_](); - DCHECK(monitor_chunks_.LoadRelaxed() == nullptr); - monitor_chunks_.StoreRelaxed(new_backing); - } else { - size_t new_capacity = 2 * capacity_; - uintptr_t* new_backing = new uintptr_t[new_capacity](); - uintptr_t* old_backing = monitor_chunks_.LoadRelaxed(); - memcpy(new_backing, old_backing, sizeof(uintptr_t) * capacity_); - monitor_chunks_.StoreRelaxed(new_backing); - capacity_ = new_capacity; - old_chunk_arrays_.push_back(std::unique_ptr<uintptr_t[]>(old_backing)); - VLOG(monitor) << "Resizing to capacity " << capacity_; - } + // Do we need to allocate another chunk list? + if (num_chunks_ == current_chunk_list_capacity_) { + if (current_chunk_list_capacity_ != 0U) { + ++current_chunk_list_index_; + CHECK_LT(current_chunk_list_index_, kMaxChunkLists) << "Out of space for inflated monitors"; + VLOG(monitor) << "Expanding to capacity " + << 2 * ChunkListCapacity(current_chunk_list_index_) - kInitialChunkStorage; + } // else we're initializing + current_chunk_list_capacity_ = ChunkListCapacity(current_chunk_list_index_); + uintptr_t* new_list = new uintptr_t[current_chunk_list_capacity_](); + DCHECK(monitor_chunks_[current_chunk_list_index_] == nullptr); + monitor_chunks_[current_chunk_list_index_] = new_list; + num_chunks_ = 0; } // Allocate the chunk. @@ -65,7 +64,7 @@ void MonitorPool::AllocateChunk() { CHECK_EQ(0U, reinterpret_cast<uintptr_t>(chunk) % kMonitorAlignment); // Add the chunk. - *(monitor_chunks_.LoadRelaxed() + num_chunks_) = reinterpret_cast<uintptr_t>(chunk); + monitor_chunks_[current_chunk_list_index_][num_chunks_] = reinterpret_cast<uintptr_t>(chunk); num_chunks_++; // Set up the free list @@ -73,8 +72,8 @@ void MonitorPool::AllocateChunk() { (kChunkCapacity - 1) * kAlignedMonitorSize); last->next_free_ = nullptr; // Eagerly compute id. - last->monitor_id_ = OffsetToMonitorId((num_chunks_ - 1) * kChunkSize + - (kChunkCapacity - 1) * kAlignedMonitorSize); + last->monitor_id_ = OffsetToMonitorId(current_chunk_list_index_* (kMaxListSize * kChunkSize) + + (num_chunks_ - 1) * kChunkSize + (kChunkCapacity - 1) * kAlignedMonitorSize); for (size_t i = 0; i < kChunkCapacity - 1; ++i) { Monitor* before = reinterpret_cast<Monitor*>(reinterpret_cast<uintptr_t>(last) - kAlignedMonitorSize); @@ -91,21 +90,19 @@ void MonitorPool::AllocateChunk() { void MonitorPool::FreeInternal() { // This is on shutdown with NO_THREAD_SAFETY_ANALYSIS, can't/don't need to lock. - uintptr_t* backing = monitor_chunks_.LoadRelaxed(); - DCHECK(backing != nullptr); - DCHECK_GT(capacity_, 0U); - DCHECK_GT(num_chunks_, 0U); - - for (size_t i = 0; i < capacity_; ++i) { - if (i < num_chunks_) { - DCHECK_NE(backing[i], 0U); - allocator_.deallocate(reinterpret_cast<uint8_t*>(backing[i]), kChunkSize); - } else { - DCHECK_EQ(backing[i], 0U); + DCHECK_NE(current_chunk_list_capacity_, 0UL); + for (size_t i = 0; i <= current_chunk_list_index_; ++i) { + DCHECK_NE(monitor_chunks_[i], static_cast<uintptr_t*>(nullptr)); + for (size_t j = 0; j < ChunkListCapacity(i); ++j) { + if (i < current_chunk_list_index_ || j < num_chunks_) { + DCHECK_NE(monitor_chunks_[i][j], 0U); + allocator_.deallocate(reinterpret_cast<uint8_t*>(monitor_chunks_[i][j]), kChunkSize); + } else { + DCHECK_EQ(monitor_chunks_[i][j], 0U); + } } + delete[] monitor_chunks_[i]; } - - delete[] backing; } Monitor* MonitorPool::CreateMonitorInPool(Thread* self, Thread* owner, mirror::Object* obj, diff --git a/runtime/monitor_pool.h b/runtime/monitor_pool.h index 875b3fe73d..99810e0c82 100644 --- a/runtime/monitor_pool.h +++ b/runtime/monitor_pool.h @@ -128,12 +128,17 @@ class MonitorPool { void ReleaseMonitorToPool(Thread* self, Monitor* monitor); void ReleaseMonitorsToPool(Thread* self, MonitorList::Monitors* monitors); - // Note: This is safe as we do not ever move chunks. + // Note: This is safe as we do not ever move chunks. All needed entries in the monitor_chunks_ + // data structure are read-only once we get here. Updates happen-before this call because + // the lock word was stored with release semantics and we read it with acquire semantics to + // retrieve the id. Monitor* LookupMonitor(MonitorId mon_id) { size_t offset = MonitorIdToOffset(mon_id); size_t index = offset / kChunkSize; + size_t top_index = index / kMaxListSize; + size_t list_index = index % kMaxListSize; size_t offset_in_chunk = offset % kChunkSize; - uintptr_t base = *(monitor_chunks_.LoadRelaxed()+index); + uintptr_t base = monitor_chunks_[top_index][list_index]; return reinterpret_cast<Monitor*>(base + offset_in_chunk); } @@ -142,28 +147,37 @@ class MonitorPool { return base_addr <= mon_ptr && (mon_ptr - base_addr < kChunkSize); } - // Note: This is safe as we do not ever move chunks. MonitorId ComputeMonitorIdInPool(Monitor* mon, Thread* self) { MutexLock mu(self, *Locks::allocated_monitor_ids_lock_); - for (size_t index = 0; index < num_chunks_; ++index) { - uintptr_t chunk_addr = *(monitor_chunks_.LoadRelaxed() + index); - if (IsInChunk(chunk_addr, mon)) { - return OffsetToMonitorId( - reinterpret_cast<uintptr_t>(mon) - chunk_addr + index * kChunkSize); + for (size_t i = 0; i <= current_chunk_list_index_; ++i) { + for (size_t j = 0; j < ChunkListCapacity(i); ++j) { + if (j >= num_chunks_ && i == current_chunk_list_index_) { + break; + } + uintptr_t chunk_addr = monitor_chunks_[i][j]; + if (IsInChunk(chunk_addr, mon)) { + return OffsetToMonitorId( + reinterpret_cast<uintptr_t>(mon) - chunk_addr + + i * (kMaxListSize * kChunkSize) + j * kChunkSize); + } } } LOG(FATAL) << "Did not find chunk that contains monitor."; return 0; } - static size_t MonitorIdToOffset(MonitorId id) { + static constexpr size_t MonitorIdToOffset(MonitorId id) { return id << 3; } - static MonitorId OffsetToMonitorId(size_t offset) { + static constexpr MonitorId OffsetToMonitorId(size_t offset) { return static_cast<MonitorId>(offset >> 3); } + static constexpr size_t ChunkListCapacity(size_t index) { + return kInitialChunkStorage << index; + } + // TODO: There are assumptions in the code that monitor addresses are 8B aligned (>>3). static constexpr size_t kMonitorAlignment = 8; // Size of a monitor, rounded up to a multiple of alignment. @@ -174,20 +188,47 @@ class MonitorPool { // Chunk size that is referenced in the id. We can collapse this to the actually used storage // in a chunk, i.e., kChunkCapacity * kAlignedMonitorSize, but this will mean proper divisions. static constexpr size_t kChunkSize = kPageSize; - // The number of initial chunks storable in monitor_chunks_. The number is large enough to make - // resizing unlikely, but small enough to not waste too much memory. - static constexpr size_t kInitialChunkStorage = 8U; - - // List of memory chunks. Each chunk is kChunkSize. - Atomic<uintptr_t*> monitor_chunks_; - // Number of chunks stored. + static_assert(IsPowerOfTwo(kChunkSize), "kChunkSize must be power of 2"); + // The number of chunks of storage that can be referenced by the initial chunk list. + // The total number of usable monitor chunks is typically 255 times this number, so it + // should be large enough that we don't run out. We run out of address bits if it's > 512. + // Currently we set it a bit smaller, to save half a page per process. We make it tiny in + // debug builds to catch growth errors. The only value we really expect to tune. + static constexpr size_t kInitialChunkStorage = kIsDebugBuild ? 1U : 256U; + static_assert(IsPowerOfTwo(kInitialChunkStorage), "kInitialChunkStorage must be power of 2"); + // The number of lists, each containing pointers to storage chunks. + static constexpr size_t kMaxChunkLists = 8; // Dictated by 3 bit index. Don't increase above 8. + static_assert(IsPowerOfTwo(kMaxChunkLists), "kMaxChunkLists must be power of 2"); + static constexpr size_t kMaxListSize = kInitialChunkStorage << (kMaxChunkLists - 1); + // We lose 3 bits in monitor id due to 3 bit monitor_chunks_ index, and gain it back from + // the 3 bit alignment constraint on monitors: + static_assert(kMaxListSize * kChunkSize < (1 << LockWord::kMonitorIdSize), + "Monitor id bits don't fit"); + static_assert(IsPowerOfTwo(kMaxListSize), "kMaxListSize must be power of 2"); + + // Array of pointers to lists (again arrays) of pointers to chunks containing monitors. + // Zeroth entry points to a list (array) of kInitialChunkStorage pointers to chunks. + // Each subsequent list as twice as large as the preceding one. + // Monitor Ids are interpreted as follows: + // Top 3 bits (of 28): index into monitor_chunks_. + // Next 16 bits: index into the chunk list, i.e. monitor_chunks_[i]. + // Last 9 bits: offset within chunk, expressed as multiple of kMonitorAlignment. + // If we set kInitialChunkStorage to 512, this would allow us to use roughly 128K chunks of + // monitors, which is 0.5GB of monitors. With this maximum setting, the largest chunk list + // contains 64K entries, and we make full use of the available index space. With a + // kInitialChunkStorage value of 256, this is proportionately reduced to 0.25GB of monitors. + // Updates to monitor_chunks_ are guarded by allocated_monitor_ids_lock_ . + // No field in this entire data structure is ever updated once a monitor id whose lookup + // requires it has been made visible to another thread. Thus readers never race with + // updates, in spite of the fact that they acquire no locks. + uintptr_t* monitor_chunks_[kMaxChunkLists]; // uintptr_t is really a Monitor* . + // Highest currently used index in monitor_chunks_ . Used for newly allocated chunks. + size_t current_chunk_list_index_ GUARDED_BY(Locks::allocated_monitor_ids_lock_); + // Number of chunk pointers stored in monitor_chunks_[current_chunk_list_index_] so far. size_t num_chunks_ GUARDED_BY(Locks::allocated_monitor_ids_lock_); - // Number of chunks storable. - size_t capacity_ GUARDED_BY(Locks::allocated_monitor_ids_lock_); - - // To avoid race issues when resizing, we keep all the previous arrays. - std::vector<std::unique_ptr<uintptr_t[]>> old_chunk_arrays_ - GUARDED_BY(Locks::allocated_monitor_ids_lock_); + // After the initial allocation, this is always equal to + // ChunkListCapacity(current_chunk_list_index_). + size_t current_chunk_list_capacity_ GUARDED_BY(Locks::allocated_monitor_ids_lock_); typedef TrackingAllocator<uint8_t, kAllocatorTagMonitorPool> Allocator; Allocator allocator_; diff --git a/runtime/native/dalvik_system_VMRuntime.cc b/runtime/native/dalvik_system_VMRuntime.cc index f355c2a948..5ba8df79ca 100644 --- a/runtime/native/dalvik_system_VMRuntime.cc +++ b/runtime/native/dalvik_system_VMRuntime.cc @@ -212,6 +212,10 @@ static void VMRuntime_registerNativeAllocation(JNIEnv* env, jobject, jint bytes) Runtime::Current()->GetHeap()->RegisterNativeAllocation(env, static_cast<size_t>(bytes)); } +static void VMRuntime_registerSensitiveThread(JNIEnv*, jobject) { + Runtime::Current()->RegisterSensitiveThread(); +} + static void VMRuntime_registerNativeFree(JNIEnv* env, jobject, jint bytes) { if (UNLIKELY(bytes < 0)) { ScopedObjectAccess soa(env); @@ -643,6 +647,7 @@ static JNINativeMethod gMethods[] = { NATIVE_METHOD(VMRuntime, properties, "()[Ljava/lang/String;"), NATIVE_METHOD(VMRuntime, setTargetSdkVersionNative, "(I)V"), NATIVE_METHOD(VMRuntime, registerNativeAllocation, "(I)V"), + NATIVE_METHOD(VMRuntime, registerSensitiveThread, "()V"), NATIVE_METHOD(VMRuntime, registerNativeFree, "(I)V"), NATIVE_METHOD(VMRuntime, requestConcurrentGC, "()V"), NATIVE_METHOD(VMRuntime, requestHeapTrim, "()V"), diff --git a/runtime/native/java_lang_Class.cc b/runtime/native/java_lang_Class.cc index 6b7ca40bee..0624da38c8 100644 --- a/runtime/native/java_lang_Class.cc +++ b/runtime/native/java_lang_Class.cc @@ -238,12 +238,13 @@ static mirror::Field* GetPublicFieldRecursive( DCHECK(name != nullptr); DCHECK(self != nullptr); - StackHandleScope<1> hs(self); + StackHandleScope<2> hs(self); MutableHandle<mirror::Class> h_clazz(hs.NewHandle(clazz)); + Handle<mirror::String> h_name(hs.NewHandle(name)); // We search the current class, its direct interfaces then its superclass. while (h_clazz.Get() != nullptr) { - mirror::Field* result = GetDeclaredField(self, h_clazz.Get(), name); + mirror::Field* result = GetDeclaredField(self, h_clazz.Get(), h_name.Get()); if ((result != nullptr) && (result->GetAccessFlags() & kAccPublic)) { return result; } else if (UNLIKELY(self->IsExceptionPending())) { @@ -258,7 +259,7 @@ static mirror::Field* GetPublicFieldRecursive( self->AssertPendingException(); return nullptr; } - result = GetPublicFieldRecursive(self, iface, name); + result = GetPublicFieldRecursive(self, iface, h_name.Get()); if (result != nullptr) { DCHECK(result->GetAccessFlags() & kAccPublic); return result; diff --git a/runtime/oat_file.h b/runtime/oat_file.h index 11a9d76dad..9470624df8 100644 --- a/runtime/oat_file.h +++ b/runtime/oat_file.h @@ -370,6 +370,10 @@ class OatDexFile FINAL { return lookup_table_data_; } + const uint8_t* GetDexFilePointer() const { + return dex_file_pointer_; + } + ~OatDexFile(); private: diff --git a/runtime/parsed_options.cc b/runtime/parsed_options.cc index c8d429158c..b25a1bb90f 100644 --- a/runtime/parsed_options.cc +++ b/runtime/parsed_options.cc @@ -153,7 +153,7 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize .Define("-Xusejit:_") .WithType<bool>() .WithValueMap({{"false", false}, {"true", true}}) - .IntoKey(M::UseJIT) + .IntoKey(M::UseJitCompilation) .Define("-Xjitinitialsize:_") .WithType<MemoryKiB>() .IntoKey(M::JITCodeCacheInitialCapacity) @@ -172,6 +172,9 @@ std::unique_ptr<RuntimeParser> ParsedOptions::MakeParser(bool ignore_unrecognize .Define("-Xjitprithreadweight:_") .WithType<unsigned int>() .IntoKey(M::JITPriorityThreadWeight) + .Define("-Xjittransitionweight:_") + .WithType<unsigned int>() + .IntoKey(M::JITInvokeTransitionWeight) .Define("-Xjitsaveprofilinginfo") .WithValue(true) .IntoKey(M::JITSaveProfilingInfo) @@ -471,6 +474,11 @@ bool ParsedOptions::DoParse(const RuntimeOptions& options, LOG(INFO) << "setting boot class path to " << *args.Get(M::BootClassPath); } + if (args.GetOrDefault(M::UseJitCompilation) && args.GetOrDefault(M::Interpret)) { + Usage("-Xusejit:true and -Xint cannot be specified together"); + Exit(0); + } + // Set a default boot class path if we didn't get an explicit one via command line. if (getenv("BOOTCLASSPATH") != nullptr) { args.SetIfMissing(M::BootClassPath, std::string(getenv("BOOTCLASSPATH"))); diff --git a/runtime/quick/inline_method_analyser.cc b/runtime/quick/inline_method_analyser.cc index c7ccee2125..1dea562b5e 100644 --- a/runtime/quick/inline_method_analyser.cc +++ b/runtime/quick/inline_method_analyser.cc @@ -434,7 +434,7 @@ static_assert(InlineMethodAnalyser::IGetVariant(Instruction::IGET_SHORT) == bool InlineMethodAnalyser::AnalyseMethodCode(verifier::MethodVerifier* verifier, InlineMethod* result) { DCHECK(verifier != nullptr); - if (!Runtime::Current()->UseJit()) { + if (!Runtime::Current()->UseJitCompilation()) { DCHECK_EQ(verifier->CanLoadClasses(), result != nullptr); } diff --git a/runtime/quick_exception_handler.cc b/runtime/quick_exception_handler.cc index a785ecba3b..a3e1f0020a 100644 --- a/runtime/quick_exception_handler.cc +++ b/runtime/quick_exception_handler.cc @@ -509,7 +509,7 @@ void QuickExceptionHandler::DeoptimizeSingleFrame() { // Compiled code made an explicit deoptimization. ArtMethod* deopt_method = visitor.GetSingleFrameDeoptMethod(); DCHECK(deopt_method != nullptr); - if (Runtime::Current()->UseJit()) { + if (Runtime::Current()->UseJitCompilation()) { Runtime::Current()->GetJit()->GetCodeCache()->InvalidateCompiledCodeFor( deopt_method, visitor.GetSingleFrameDeoptQuickMethodHeader()); } else { @@ -611,7 +611,7 @@ void QuickExceptionHandler::DoLongJump(bool smash_caller_saves) { // Prints out methods with their type of frame. class DumpFramesWithTypeStackVisitor FINAL : public StackVisitor { public: - DumpFramesWithTypeStackVisitor(Thread* self, bool show_details = false) + explicit DumpFramesWithTypeStackVisitor(Thread* self, bool show_details = false) SHARED_REQUIRES(Locks::mutator_lock_) : StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), show_details_(show_details) {} diff --git a/runtime/runtime.cc b/runtime/runtime.cc index a4d31ef123..bb19cbd255 100644 --- a/runtime/runtime.cc +++ b/runtime/runtime.cc @@ -558,15 +558,21 @@ bool Runtime::Start() { started_ = true; - if (jit_options_->UseJIT()) { + // Create the JIT either if we have to use JIT compilation or save profiling info. + // TODO(calin): We use the JIT class as a proxy for JIT compilation and for + // recoding profiles. Maybe we should consider changing the name to be more clear it's + // not only about compiling. b/28295073. + if (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) { std::string error_msg; if (!IsZygote()) { // If we are the zygote then we need to wait until after forking to create the code cache // due to SELinux restrictions on r/w/x memory regions. CreateJit(); - } else if (!jit::Jit::LoadCompilerLibrary(&error_msg)) { - // Try to load compiler pre zygote to reduce PSS. b/27744947 - LOG(WARNING) << "Failed to load JIT compiler with error " << error_msg; + } else if (jit_options_->UseJitCompilation()) { + if (!jit::Jit::LoadCompilerLibrary(&error_msg)) { + // Try to load compiler pre zygote to reduce PSS. b/27744947 + LOG(WARNING) << "Failed to load JIT compiler with error " << error_msg; + } } } @@ -713,7 +719,11 @@ void Runtime::InitNonZygoteOrPostFork( // before fork aren't attributed to an app. heap_->ResetGcPerformanceInfo(); - if (!is_system_server && !safe_mode_ && jit_options_->UseJIT() && jit_.get() == nullptr) { + + if (!is_system_server && + !safe_mode_ && + (jit_options_->UseJitCompilation() || jit_options_->GetSaveProfilingInfo()) && + jit_.get() == nullptr) { // Note that when running ART standalone (not zygote, nor zygote fork), // the jit may have already been created. CreateJit(); @@ -1016,7 +1026,8 @@ bool Runtime::Init(RuntimeArgumentMap&& runtime_options_in) { // this case. // If runtime_options doesn't have UseJIT set to true then CreateFromRuntimeArguments returns // null and we don't create the jit. - jit_options_->SetUseJIT(false); + jit_options_->SetUseJitCompilation(false); + jit_options_->SetSaveProfilingInfo(false); } // Allocate a global table of boxed lambda objects <-> closures. @@ -1613,18 +1624,19 @@ void Runtime::VisitImageRoots(RootVisitor* visitor) { } } -static ImtConflictTable::Entry empty_entry = { nullptr, nullptr }; - ArtMethod* Runtime::CreateImtConflictMethod(LinearAlloc* linear_alloc) { - auto* method = Runtime::Current()->GetClassLinker()->CreateRuntimeMethod(linear_alloc); + ClassLinker* const class_linker = GetClassLinker(); + ArtMethod* method = class_linker->CreateRuntimeMethod(linear_alloc); // When compiling, the code pointer will get set later when the image is loaded. + const size_t pointer_size = GetInstructionSetPointerSize(instruction_set_); if (IsAotCompiler()) { - size_t pointer_size = GetInstructionSetPointerSize(instruction_set_); method->SetEntryPointFromQuickCompiledCodePtrSize(nullptr, pointer_size); } else { method->SetEntryPointFromQuickCompiledCode(GetQuickImtConflictStub()); - method->SetImtConflictTable(reinterpret_cast<ImtConflictTable*>(&empty_entry)); } + // Create empty conflict table. + method->SetImtConflictTable(class_linker->CreateImtConflictTable(/*count*/0u, linear_alloc), + pointer_size); return method; } @@ -1632,9 +1644,6 @@ void Runtime::SetImtConflictMethod(ArtMethod* method) { CHECK(method != nullptr); CHECK(method->IsRuntimeMethod()); imt_conflict_method_ = method; - if (!IsAotCompiler()) { - method->SetImtConflictTable(reinterpret_cast<ImtConflictTable*>(&empty_entry)); - } } ArtMethod* Runtime::CreateResolutionMethod() { @@ -1915,9 +1924,8 @@ void Runtime::AddCurrentRuntimeFeaturesAsDex2OatArguments(std::vector<std::strin void Runtime::CreateJit() { CHECK(!IsAotCompiler()); - if (GetInstrumentation()->IsForcedInterpretOnly()) { - // Don't create JIT if forced interpret only. - return; + if (kIsDebugBuild && GetInstrumentation()->IsForcedInterpretOnly()) { + DCHECK(!jit_options_->UseJitCompilation()); } std::string error_msg; jit_.reset(jit::Jit::Create(jit_options_.get(), &error_msg)); @@ -1944,8 +1952,20 @@ void Runtime::SetImtUnimplementedMethod(ArtMethod* method) { CHECK(method != nullptr); CHECK(method->IsRuntimeMethod()); imt_unimplemented_method_ = method; - if (!IsAotCompiler()) { - method->SetImtConflictTable(reinterpret_cast<ImtConflictTable*>(&empty_entry)); +} + +void Runtime::FixupConflictTables() { + // We can only do this after the class linker is created. + const size_t pointer_size = GetClassLinker()->GetImagePointerSize(); + if (imt_unimplemented_method_->GetImtConflictTable(pointer_size) == nullptr) { + imt_unimplemented_method_->SetImtConflictTable( + ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size), + pointer_size); + } + if (imt_conflict_method_->GetImtConflictTable(pointer_size) == nullptr) { + imt_conflict_method_->SetImtConflictTable( + ClassLinker::CreateImtConflictTable(/*count*/0u, GetLinearAlloc(), pointer_size), + pointer_size); } } @@ -1981,4 +2001,18 @@ void Runtime::UpdateProcessState(ProcessState process_state) { GetHeap()->UpdateProcessState(old_process_state, process_state); } +void Runtime::RegisterSensitiveThread() const { + Thread::SetJitSensitiveThread(); +} + +// Returns true if JIT compilations are enabled. GetJit() will be not null in this case. +bool Runtime::UseJitCompilation() const { + return (jit_ != nullptr) && jit_->UseJitCompilation(); +} + +// Returns true if profile saving is enabled. GetJit() will be not null in this case. +bool Runtime::SaveProfileInfo() const { + return (jit_ != nullptr) && jit_->SaveProfilingInfo(); +} + } // namespace art diff --git a/runtime/runtime.h b/runtime/runtime.h index ae25dd1c65..1394462fd1 100644 --- a/runtime/runtime.h +++ b/runtime/runtime.h @@ -127,7 +127,7 @@ class Runtime { // IsAotCompiler for compilers that don't have a running runtime. Only dex2oat currently. bool IsAotCompiler() const { - return !UseJit() && IsCompiler(); + return !UseJitCompilation() && IsCompiler(); } // IsCompiler is any runtime which has a running compiler, either dex2oat or JIT. @@ -383,6 +383,7 @@ class Runtime { return imt_conflict_method_ != nullptr; } + void FixupConflictTables(); void SetImtConflictMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); void SetImtUnimplementedMethod(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_); @@ -451,9 +452,11 @@ class Runtime { jit::Jit* GetJit() { return jit_.get(); } - bool UseJit() const { - return jit_.get() != nullptr; - } + + // Returns true if JIT compilations are enabled. GetJit() will be not null in this case. + bool UseJitCompilation() const; + // Returns true if profile saving is enabled. GetJit() will be not null in this case. + bool SaveProfileInfo() const; void PreZygoteFork(); bool InitZygote(); @@ -635,6 +638,8 @@ class Runtime { return process_state_ == kProcessStateJankPerceptible; } + void RegisterSensitiveThread() const; + void SetZygoteNoThreadSection(bool val) { zygote_no_threads_ = val; } diff --git a/runtime/runtime_options.def b/runtime/runtime_options.def index 6433c3352f..2a96703109 100644 --- a/runtime/runtime_options.def +++ b/runtime/runtime_options.def @@ -66,12 +66,13 @@ RUNTIME_OPTIONS_KEY (Unit, IgnoreMaxFootprint) RUNTIME_OPTIONS_KEY (Unit, LowMemoryMode) RUNTIME_OPTIONS_KEY (bool, UseTLAB, (kUseTlab || kUseReadBarrier)) RUNTIME_OPTIONS_KEY (bool, EnableHSpaceCompactForOOM, true) -RUNTIME_OPTIONS_KEY (bool, UseJIT, false) +RUNTIME_OPTIONS_KEY (bool, UseJitCompilation, false) RUNTIME_OPTIONS_KEY (bool, DumpNativeStackOnSigQuit, true) RUNTIME_OPTIONS_KEY (unsigned int, JITCompileThreshold, jit::Jit::kDefaultCompileThreshold) RUNTIME_OPTIONS_KEY (unsigned int, JITWarmupThreshold) RUNTIME_OPTIONS_KEY (unsigned int, JITOsrThreshold) RUNTIME_OPTIONS_KEY (unsigned int, JITPriorityThreadWeight) +RUNTIME_OPTIONS_KEY (unsigned int, JITInvokeTransitionWeight) RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheInitialCapacity, jit::JitCodeCache::kInitialCapacity) RUNTIME_OPTIONS_KEY (MemoryKiB, JITCodeCacheMaxCapacity, jit::JitCodeCache::kMaxCapacity) RUNTIME_OPTIONS_KEY (bool, JITSaveProfilingInfo, false) diff --git a/runtime/stack.cc b/runtime/stack.cc index 56ef5aaa90..a5ca527aa2 100644 --- a/runtime/stack.cc +++ b/runtime/stack.cc @@ -637,8 +637,8 @@ static void AssertPcIsWithinQuickCode(ArtMethod* method, uintptr_t pc) // If we are the JIT then we may have just compiled the method after the // IsQuickToInterpreterBridge check. - jit::Jit* const jit = Runtime::Current()->GetJit(); - if (jit != nullptr && jit->GetCodeCache()->ContainsPc(code)) { + Runtime* runtime = Runtime::Current(); + if (runtime->UseJitCompilation() && runtime->GetJit()->GetCodeCache()->ContainsPc(code)) { return; } @@ -678,8 +678,10 @@ void StackVisitor::SanityCheckFrame() const { if (space->IsImageSpace()) { auto* image_space = space->AsImageSpace(); const auto& header = image_space->GetImageHeader(); - const auto* methods = &header.GetMethodsSection(); - if (methods->Contains(reinterpret_cast<const uint8_t*>(method) - image_space->Begin())) { + const ImageSection& methods = header.GetMethodsSection(); + const ImageSection& runtime_methods = header.GetRuntimeMethodsSection(); + const size_t offset = reinterpret_cast<const uint8_t*>(method) - image_space->Begin(); + if (methods.Contains(offset) || runtime_methods.Contains(offset)) { in_image = true; break; } @@ -948,7 +950,7 @@ int StackVisitor::GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item, } } -void LockCountData::AddMonitorInternal(Thread* self, mirror::Object* obj) { +void LockCountData::AddMonitor(Thread* self, mirror::Object* obj) { if (obj == nullptr) { return; } @@ -965,7 +967,7 @@ void LockCountData::AddMonitorInternal(Thread* self, mirror::Object* obj) { monitors_->push_back(obj); } -void LockCountData::RemoveMonitorInternal(Thread* self, const mirror::Object* obj) { +void LockCountData::RemoveMonitorOrThrow(Thread* self, const mirror::Object* obj) { if (obj == nullptr) { return; } @@ -998,7 +1000,7 @@ void MonitorExitHelper(Thread* self, mirror::Object* obj) NO_THREAD_SAFETY_ANALY obj->MonitorExit(self); } -bool LockCountData::CheckAllMonitorsReleasedInternal(Thread* self) { +bool LockCountData::CheckAllMonitorsReleasedOrThrow(Thread* self) { DCHECK(self != nullptr); if (monitors_ != nullptr) { if (!monitors_->empty()) { diff --git a/runtime/stack.h b/runtime/stack.h index 7301184a9e..e77ab4647e 100644 --- a/runtime/stack.h +++ b/runtime/stack.h @@ -80,39 +80,18 @@ class LockCountData { public: // Add the given object to the list of monitors, that is, objects that have been locked. This // will not throw (but be skipped if there is an exception pending on entry). - template <bool kLockCounting> - void AddMonitor(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) { - DCHECK(self != nullptr); - if (!kLockCounting) { - return; - } - AddMonitorInternal(self, obj); - } + void AddMonitor(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); // Try to remove the given object from the monitor list, indicating an unlock operation. // This will throw an IllegalMonitorStateException (clearing any already pending exception), in // case that there wasn't a lock recorded for the object. - template <bool kLockCounting> void RemoveMonitorOrThrow(Thread* self, - const mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_) { - DCHECK(self != nullptr); - if (!kLockCounting) { - return; - } - RemoveMonitorInternal(self, obj); - } + const mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); // Check whether all acquired monitors have been released. This will potentially throw an // IllegalMonitorStateException, clearing any already pending exception. Returns true if the // check shows that everything is OK wrt/ lock counting, false otherwise. - template <bool kLockCounting> - bool CheckAllMonitorsReleasedOrThrow(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_) { - DCHECK(self != nullptr); - if (!kLockCounting) { - return true; - } - return CheckAllMonitorsReleasedInternal(self); - } + bool CheckAllMonitorsReleasedOrThrow(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); template <typename T, typename... Args> void VisitMonitors(T visitor, Args&&... args) SHARED_REQUIRES(Locks::mutator_lock_) { @@ -125,12 +104,6 @@ class LockCountData { } private: - // Internal implementations. - void AddMonitorInternal(Thread* self, mirror::Object* obj) SHARED_REQUIRES(Locks::mutator_lock_); - void RemoveMonitorInternal(Thread* self, const mirror::Object* obj) - SHARED_REQUIRES(Locks::mutator_lock_); - bool CheckAllMonitorsReleasedInternal(Thread* self) SHARED_REQUIRES(Locks::mutator_lock_); - // Stores references to the locked-on objects. As noted, this should be visited during thread // marking. std::unique_ptr<std::vector<mirror::Object*>> monitors_; diff --git a/runtime/thread.cc b/runtime/thread.cc index 7922b60962..fb248282be 100644 --- a/runtime/thread.cc +++ b/runtime/thread.cc @@ -89,6 +89,7 @@ pthread_key_t Thread::pthread_key_self_; ConditionVariable* Thread::resume_cond_ = nullptr; const size_t Thread::kStackOverflowImplicitCheckSize = GetStackOverflowReservedBytes(kRuntimeISA); bool (*Thread::is_sensitive_thread_hook_)() = nullptr; +Thread* Thread::jit_sensitive_thread_ = nullptr; static constexpr bool kVerifyImageObjectsMarked = kIsDebugBuild; @@ -739,7 +740,7 @@ Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_g { MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_); if (runtime->IsShuttingDownLocked()) { - LOG(ERROR) << "Thread attaching while runtime is shutting down: " << thread_name; + LOG(WARNING) << "Thread attaching while runtime is shutting down: " << thread_name; return nullptr; } else { Runtime::Current()->StartThreadBirth(); @@ -3010,7 +3011,6 @@ size_t Thread::NumberOfHeldMutexes() const { return count; } - void Thread::DeoptimizeWithDeoptimizationException(JValue* result) { DCHECK_EQ(GetException(), Thread::GetDeoptimizationException()); ClearException(); @@ -3031,4 +3031,11 @@ void Thread::DeoptimizeWithDeoptimizationException(JValue* result) { interpreter::EnterInterpreterFromDeoptimize(this, shadow_frame, from_code, result); } +void Thread::SetException(mirror::Throwable* new_exception) { + CHECK(new_exception != nullptr); + // TODO: DCHECK(!IsExceptionPending()); + tlsPtr_.exception = new_exception; + // LOG(ERROR) << new_exception->Dump(); +} + } // namespace art diff --git a/runtime/thread.h b/runtime/thread.h index ed42e462ae..582a0cdbd6 100644 --- a/runtime/thread.h +++ b/runtime/thread.h @@ -363,12 +363,7 @@ class Thread { void AssertNoPendingException() const; void AssertNoPendingExceptionForNewException(const char* msg) const; - void SetException(mirror::Throwable* new_exception) - SHARED_REQUIRES(Locks::mutator_lock_) { - CHECK(new_exception != nullptr); - // TODO: DCHECK(!IsExceptionPending()); - tlsPtr_.exception = new_exception; - } + void SetException(mirror::Throwable* new_exception) SHARED_REQUIRES(Locks::mutator_lock_); void ClearException() SHARED_REQUIRES(Locks::mutator_lock_) { tlsPtr_.exception = nullptr; @@ -1098,6 +1093,12 @@ class Thread { return debug_disallow_read_barrier_; } + // Returns true if the current thread is the jit sensitive thread. + bool IsJitSensitiveThread() const { + return this == jit_sensitive_thread_; + } + + // Returns true if StrictMode events are traced for the current thread. static bool IsSensitiveThread() { if (is_sensitive_thread_hook_ != nullptr) { return (*is_sensitive_thread_hook_)(); @@ -1180,6 +1181,16 @@ class Thread { ALWAYS_INLINE void PassActiveSuspendBarriers() REQUIRES(!Locks::thread_suspend_count_lock_, !Roles::uninterruptible_); + // Registers the current thread as the jit sensitive thread. Should be called just once. + static void SetJitSensitiveThread() { + if (jit_sensitive_thread_ == nullptr) { + jit_sensitive_thread_ = Thread::Current(); + } else { + LOG(WARNING) << "Attempt to set the sensitive thread twice. Tid:" + << Thread::Current()->GetTid(); + } + } + static void SetSensitiveThreadHook(bool (*is_sensitive_thread_hook)()) { is_sensitive_thread_hook_ = is_sensitive_thread_hook; } @@ -1229,6 +1240,8 @@ class Thread { // Hook passed by framework which returns true // when StrictMode events are traced for the current thread. static bool (*is_sensitive_thread_hook_)(); + // Stores the jit sensitive thread (which for now is the UI thread). + static Thread* jit_sensitive_thread_; /***********************************************************************************************/ // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for diff --git a/runtime/verifier/method_verifier.cc b/runtime/verifier/method_verifier.cc index 8802e62435..d05ae42b0e 100644 --- a/runtime/verifier/method_verifier.cc +++ b/runtime/verifier/method_verifier.cc @@ -401,8 +401,13 @@ MethodVerifier::FailureData MethodVerifier::VerifyMethod(Thread* self, method->SetAccessFlags(method->GetAccessFlags() | kAccCompileDontBother); } } - if (method != nullptr && verifier.HasInstructionThatWillThrow()) { - method->SetAccessFlags(method->GetAccessFlags() | kAccCompileDontBother); + if (method != nullptr) { + if (verifier.HasInstructionThatWillThrow()) { + method->SetAccessFlags(method->GetAccessFlags() | kAccCompileDontBother); + } + if ((verifier.encountered_failure_types_ & VerifyError::VERIFY_ERROR_LOCKING) != 0) { + method->SetAccessFlags(method->GetAccessFlags() | kAccMustCountLocks); + } } } else { // Bad method data. diff --git a/test/141-class-unload/src/Main.java b/test/141-class-unload/src/Main.java index 15683b0b1e..17a6049dbf 100644 --- a/test/141-class-unload/src/Main.java +++ b/test/141-class-unload/src/Main.java @@ -23,6 +23,7 @@ import java.lang.reflect.Method; public class Main { static final String DEX_FILE = System.getenv("DEX_LOCATION") + "/141-class-unload-ex.jar"; + static final String LIBRARY_SEARCH_PATH = System.getProperty("java.library.path"); static String nativeLibraryName; public static void main(String[] args) throws Exception { @@ -32,7 +33,7 @@ public class Main { throw new AssertionError("Couldn't find path class loader class"); } Constructor constructor = - pathClassLoader.getDeclaredConstructor(String.class, ClassLoader.class); + pathClassLoader.getDeclaredConstructor(String.class, String.class, ClassLoader.class); try { testUnloadClass(constructor); testUnloadLoader(constructor); @@ -49,7 +50,7 @@ public class Main { // Test that the oat files are unloaded. testOatFilesUnloaded(getPid()); } catch (Exception e) { - System.out.println(e); + e.printStackTrace(); } } @@ -118,7 +119,7 @@ public class Main { private static void testNoUnloadInvoke(Constructor constructor) throws Exception { WeakReference<ClassLoader> loader = new WeakReference((ClassLoader) constructor.newInstance( - DEX_FILE, ClassLoader.getSystemClassLoader())); + DEX_FILE, LIBRARY_SEARCH_PATH, ClassLoader.getSystemClassLoader())); WeakReference<Class> intHolder = new WeakReference(loader.get().loadClass("IntHolder")); intHolder.get().getDeclaredMethod("runGC").invoke(intHolder.get()); boolean isNull = loader.get() == null; @@ -128,7 +129,7 @@ public class Main { private static void testNoUnloadInstance(Constructor constructor) throws Exception { WeakReference<ClassLoader> loader = new WeakReference((ClassLoader) constructor.newInstance( - DEX_FILE, ClassLoader.getSystemClassLoader())); + DEX_FILE, LIBRARY_SEARCH_PATH, ClassLoader.getSystemClassLoader())); WeakReference<Class> intHolder = new WeakReference(loader.get().loadClass("IntHolder")); Object o = intHolder.get().newInstance(); Runtime.getRuntime().gc(); @@ -138,7 +139,7 @@ public class Main { private static WeakReference<Class> setUpUnloadClass(Constructor constructor) throws Exception { ClassLoader loader = (ClassLoader) constructor.newInstance( - DEX_FILE, ClassLoader.getSystemClassLoader()); + DEX_FILE, LIBRARY_SEARCH_PATH, ClassLoader.getSystemClassLoader()); Class intHolder = loader.loadClass("IntHolder"); Method getValue = intHolder.getDeclaredMethod("getValue"); Method setValue = intHolder.getDeclaredMethod("setValue", Integer.TYPE); @@ -155,7 +156,7 @@ public class Main { boolean waitForCompilation) throws Exception { ClassLoader loader = (ClassLoader) constructor.newInstance( - DEX_FILE, ClassLoader.getSystemClassLoader()); + DEX_FILE, LIBRARY_SEARCH_PATH, ClassLoader.getSystemClassLoader()); Class intHolder = loader.loadClass("IntHolder"); Method setValue = intHolder.getDeclaredMethod("setValue", Integer.TYPE); setValue.invoke(intHolder, 2); @@ -177,7 +178,7 @@ public class Main { private static WeakReference<ClassLoader> setUpLoadLibrary(Constructor constructor) throws Exception { ClassLoader loader = (ClassLoader) constructor.newInstance( - DEX_FILE, ClassLoader.getSystemClassLoader()); + DEX_FILE, LIBRARY_SEARCH_PATH, ClassLoader.getSystemClassLoader()); Class intHolder = loader.loadClass("IntHolder"); Method loadLibrary = intHolder.getDeclaredMethod("loadLibrary", String.class); loadLibrary.invoke(intHolder, nativeLibraryName); diff --git a/test/551-checker-shifter-operand/src/Main.java b/test/551-checker-shifter-operand/src/Main.java index edb8a68b47..a4561b83da 100644 --- a/test/551-checker-shifter-operand/src/Main.java +++ b/test/551-checker-shifter-operand/src/Main.java @@ -500,9 +500,9 @@ public class Main { assertIntEquals(a + $noinline$IntShl(b, 16), a + (b << 16)); assertIntEquals(a + $noinline$IntShl(b, 30), a + (b << 30)); assertIntEquals(a + $noinline$IntShl(b, 31), a + (b << 31)); - assertIntEquals(a + $noinline$IntShl(b, 32), a + (b << 32)); - assertIntEquals(a + $noinline$IntShl(b, 62), a + (b << 62)); - assertIntEquals(a + $noinline$IntShl(b, 63), a + (b << 63)); + assertIntEquals(a + $noinline$IntShl(b, 32), a + (b << $opt$inline$IntConstant32())); + assertIntEquals(a + $noinline$IntShl(b, 62), a + (b << $opt$inline$IntConstant62())); + assertIntEquals(a + $noinline$IntShl(b, 63), a + (b << $opt$inline$IntConstant63())); assertIntEquals(a - $noinline$IntShr(b, 1), a - (b >> 1)); assertIntEquals(a - $noinline$IntShr(b, 6), a - (b >> 6)); @@ -513,9 +513,9 @@ public class Main { assertIntEquals(a - $noinline$IntShr(b, 16), a - (b >> 16)); assertIntEquals(a - $noinline$IntShr(b, 30), a - (b >> 30)); assertIntEquals(a - $noinline$IntShr(b, 31), a - (b >> 31)); - assertIntEquals(a - $noinline$IntShr(b, 32), a - (b >> 32)); - assertIntEquals(a - $noinline$IntShr(b, 62), a - (b >> 62)); - assertIntEquals(a - $noinline$IntShr(b, 63), a - (b >> 63)); + assertIntEquals(a - $noinline$IntShr(b, 32), a - (b >> $opt$inline$IntConstant32())); + assertIntEquals(a - $noinline$IntShr(b, 62), a - (b >> $opt$inline$IntConstant62())); + assertIntEquals(a - $noinline$IntShr(b, 63), a - (b >> $opt$inline$IntConstant63())); assertIntEquals(a ^ $noinline$IntUshr(b, 1), a ^ (b >>> 1)); assertIntEquals(a ^ $noinline$IntUshr(b, 6), a ^ (b >>> 6)); @@ -526,11 +526,17 @@ public class Main { assertIntEquals(a ^ $noinline$IntUshr(b, 16), a ^ (b >>> 16)); assertIntEquals(a ^ $noinline$IntUshr(b, 30), a ^ (b >>> 30)); assertIntEquals(a ^ $noinline$IntUshr(b, 31), a ^ (b >>> 31)); - assertIntEquals(a ^ $noinline$IntUshr(b, 32), a ^ (b >>> 32)); - assertIntEquals(a ^ $noinline$IntUshr(b, 62), a ^ (b >>> 62)); - assertIntEquals(a ^ $noinline$IntUshr(b, 63), a ^ (b >>> 63)); + assertIntEquals(a ^ $noinline$IntUshr(b, 32), a ^ (b >>> $opt$inline$IntConstant32())); + assertIntEquals(a ^ $noinline$IntUshr(b, 62), a ^ (b >>> $opt$inline$IntConstant62())); + assertIntEquals(a ^ $noinline$IntUshr(b, 63), a ^ (b >>> $opt$inline$IntConstant63())); } + // Hiding constants outside the range [0, 32) used for int shifts from Jack. + // (Jack extracts only the low 5 bits.) + public static int $opt$inline$IntConstant32() { return 32; } + public static int $opt$inline$IntConstant62() { return 62; } + public static int $opt$inline$IntConstant63() { return 63; } + static long $noinline$LongShl(long b, long c) { if (doThrow) throw new Error(); diff --git a/test/557-checker-instruction-simplifier-ror/src/Main.java b/test/557-checker-instruction-simplifier-ror/src/Main.java index 310611bba9..6d8b74d1ec 100644 --- a/test/557-checker-instruction-simplifier-ror/src/Main.java +++ b/test/557-checker-instruction-simplifier-ror/src/Main.java @@ -175,28 +175,32 @@ public class Main { // (i >>> #distance) | (i << #-distance) - /// CHECK-START: int Main.ror_int_constant_c_negc(int) instruction_simplifier (before) + /// CHECK-START: int Main.ror_int_constant_c_negc(int) instruction_simplifier_after_bce (before) /// CHECK: <<ArgValue:i\d+>> ParameterValue /// CHECK: <<Const2:i\d+>> IntConstant 2 - /// CHECK: <<ConstNeg2:i\d+>> IntConstant {{-2|30}} + /// CHECK: <<ConstNeg2:i\d+>> IntConstant -2 /// CHECK-DAG: <<UShr:i\d+>> UShr [<<ArgValue>>,<<Const2>>] /// CHECK-DAG: <<Shl:i\d+>> Shl [<<ArgValue>>,<<ConstNeg2>>] /// CHECK: <<Or:i\d+>> Or [<<UShr>>,<<Shl>>] /// CHECK: Return [<<Or>>] - /// CHECK-START: int Main.ror_int_constant_c_negc(int) instruction_simplifier (after) + /// CHECK-START: int Main.ror_int_constant_c_negc(int) instruction_simplifier_after_bce (after) /// CHECK: <<ArgValue:i\d+>> ParameterValue /// CHECK: <<Const2:i\d+>> IntConstant 2 /// CHECK: <<Ror:i\d+>> Ror [<<ArgValue>>,<<Const2>>] /// CHECK: Return [<<Ror>>] - /// CHECK-START: int Main.ror_int_constant_c_negc(int) instruction_simplifier (after) + /// CHECK-START: int Main.ror_int_constant_c_negc(int) instruction_simplifier_after_bce (after) /// CHECK-NOT: UShr /// CHECK-NOT: Shl public static int ror_int_constant_c_negc(int value) { - return (value >>> 2) | (value << -2); + return (value >>> 2) | (value << $opt$inline$IntConstantM2()); } + // Hiding constants outside the range [0, 32) used for int shifts from Jack. + // (Jack extracts only the low 5 bits.) + public static int $opt$inline$IntConstantM2() { return -2; } + // (j >>> #distance) | (j << #-distance) /// CHECK-START: long Main.ror_long_constant_c_negc(long) instruction_simplifier (before) diff --git a/test/566-polymorphic-inlining/polymorphic_inline.cc b/test/566-polymorphic-inlining/polymorphic_inline.cc index 167a5757f9..7b2c6cbcd5 100644 --- a/test/566-polymorphic-inlining/polymorphic_inline.cc +++ b/test/566-polymorphic-inlining/polymorphic_inline.cc @@ -60,6 +60,7 @@ extern "C" JNIEXPORT void JNICALL Java_Main_ensureJittedAndPolymorphicInline(JNI do_checks(cls, "testInvokeVirtual"); do_checks(cls, "testInvokeInterface"); + do_checks(cls, "$noinline$testInlineToSameTarget"); } } // namespace art diff --git a/test/566-polymorphic-inlining/src/Main.java b/test/566-polymorphic-inlining/src/Main.java index 7283e86227..286f0d996e 100644 --- a/test/566-polymorphic-inlining/src/Main.java +++ b/test/566-polymorphic-inlining/src/Main.java @@ -25,6 +25,12 @@ public class Main implements Itf { } } + public static void assertEquals(int expected, int actual) { + if (expected != actual) { + throw new Error("Expected " + expected + ", got " + actual); + } + } + public static void main(String[] args) throws Exception { System.loadLibrary(args[0]); Main[] mains = new Main[3]; @@ -41,6 +47,8 @@ public class Main implements Itf { testInvokeVirtual(mains[1]); testInvokeInterface(itfs[0]); testInvokeInterface(itfs[1]); + $noinline$testInlineToSameTarget(mains[0]); + $noinline$testInlineToSameTarget(mains[1]); } ensureJittedAndPolymorphicInline(); @@ -56,6 +64,10 @@ public class Main implements Itf { // This will trigger a deoptimization of the compiled code. assertEquals(OtherSubclass.class, testInvokeVirtual(mains[2])); assertEquals(OtherSubclass.class, testInvokeInterface(itfs[2])); + + // Run this once to make sure we execute the JITted code. + $noinline$testInlineToSameTarget(mains[0]); + assertEquals(20001, counter); } public Class sameInvokeVirtual() { @@ -76,9 +88,20 @@ public class Main implements Itf { return m.sameInvokeVirtual(); } + public static void $noinline$testInlineToSameTarget(Main m) { + if (doThrow) throw new Error(""); + m.increment(); + } + public Object field = new Object(); public static native void ensureJittedAndPolymorphicInline(); + + public void increment() { + counter++; + } + public static int counter = 0; + public static boolean doThrow = false; } class Subclass extends Main { diff --git a/test/570-checker-osr/osr.cc b/test/570-checker-osr/osr.cc index 2a5b2c954d..bd8f0a9520 100644 --- a/test/570-checker-osr/osr.cc +++ b/test/570-checker-osr/osr.cc @@ -75,7 +75,7 @@ extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInOsrCode(JNIEnv* env, extern "C" JNIEXPORT jboolean JNICALL Java_Main_isInInterpreter(JNIEnv* env, jclass, jstring method_name) { - if (!Runtime::Current()->UseJit()) { + if (!Runtime::Current()->UseJitCompilation()) { // The return value is irrelevant if we're not using JIT. return false; } @@ -111,7 +111,7 @@ class ProfilingInfoVisitor : public StackVisitor { extern "C" JNIEXPORT void JNICALL Java_Main_ensureHasProfilingInfo(JNIEnv* env, jclass, jstring method_name) { - if (!Runtime::Current()->UseJit()) { + if (!Runtime::Current()->UseJitCompilation()) { return; } ScopedUtfChars chars(env, method_name); @@ -151,7 +151,7 @@ class OsrCheckVisitor : public StackVisitor { extern "C" JNIEXPORT void JNICALL Java_Main_ensureHasOsrCode(JNIEnv* env, jclass, jstring method_name) { - if (!Runtime::Current()->UseJit()) { + if (!Runtime::Current()->UseJitCompilation()) { return; } ScopedUtfChars chars(env, method_name); diff --git a/test/570-checker-osr/src/Main.java b/test/570-checker-osr/src/Main.java index 200b54a445..15c232d6a8 100644 --- a/test/570-checker-osr/src/Main.java +++ b/test/570-checker-osr/src/Main.java @@ -61,6 +61,18 @@ public class Main { throw new Error("Unexpected return value"); } + $noinline$inlineCache2(new Main(), /* isSecondInvocation */ false); + if ($noinline$inlineCache2(new SubMain(), /* isSecondInvocation */ true) != SubMain.class) { + throw new Error("Unexpected return value"); + } + + // Test polymorphic inline cache to the same target (inlineCache3). + $noinline$inlineCache3(new Main(), /* isSecondInvocation */ false); + $noinline$inlineCache3(new SubMain(), /* isSecondInvocation */ false); + if ($noinline$inlineCache3(new SubMain(), /* isSecondInvocation */ true) != null) { + throw new Error("Unexpected return value"); + } + $noinline$stackOverflow(new Main(), /* isSecondInvocation */ false); $noinline$stackOverflow(new SubMain(), /* isSecondInvocation */ true); @@ -147,10 +159,76 @@ public class Main { return other.returnClass(); } + public static Class $noinline$inlineCache2(Main m, boolean isSecondInvocation) { + // If we are running in non-JIT mode, or were unlucky enough to get this method + // already JITted, just return the expected value. + if (!isInInterpreter("$noinline$inlineCache2")) { + return SubMain.class; + } + + ensureHasProfilingInfo("$noinline$inlineCache2"); + + // Ensure that we have OSR code to jump to. + if (isSecondInvocation) { + ensureHasOsrCode("$noinline$inlineCache2"); + } + + // This call will be optimized in the OSR compiled code + // to check and deoptimize if m is not of type 'Main'. + Main other = m.inlineCache2(); + + // Jump to OSR compiled code. The second run + // of this method will have 'm' as a SubMain, and the compiled + // code we are jumping to will have wrongly optimize other as being null. + if (isSecondInvocation) { + while (!isInOsrCode("$noinline$inlineCache2")) {} + } + + // We used to wrongly optimize this code and assume 'other' was always null. + return (other == null) ? null : other.returnClass(); + } + + public static Class $noinline$inlineCache3(Main m, boolean isSecondInvocation) { + // If we are running in non-JIT mode, or were unlucky enough to get this method + // already JITted, just return the expected value. + if (!isInInterpreter("$noinline$inlineCache3")) { + return null; + } + + ensureHasProfilingInfo("$noinline$inlineCache3"); + + // Ensure that we have OSR code to jump to. + if (isSecondInvocation) { + ensureHasOsrCode("$noinline$inlineCache3"); + } + + // This call will be optimized in the OSR compiled code + // to check and deoptimize if m is not of type 'Main'. + Main other = m.inlineCache3(); + + // Jump to OSR compiled code. The second run + // of this method will have 'm' as a SubMain, and the compiled + // code we are jumping to will have wrongly optimize other as being null. + if (isSecondInvocation) { + while (!isInOsrCode("$noinline$inlineCache3")) {} + } + + // We used to wrongly optimize this code and assume 'other' was always null. + return (other == null) ? null : other.returnClass(); + } + public Main inlineCache() { return new Main(); } + public Main inlineCache2() { + return null; + } + + public Main inlineCache3() { + return null; + } + public Class returnClass() { return Main.class; } @@ -235,6 +313,10 @@ class SubMain extends Main { return new SubMain(); } + public Main inlineCache2() { + return new SubMain(); + } + public void otherInlineCache() { return; } diff --git a/test/594-checker-irreducible-linorder/smali/IrreducibleLoop.smali b/test/594-checker-irreducible-linorder/smali/IrreducibleLoop.smali index 366c7b9b68..ef53ee867a 100644 --- a/test/594-checker-irreducible-linorder/smali/IrreducibleLoop.smali +++ b/test/594-checker-irreducible-linorder/smali/IrreducibleLoop.smali @@ -67,3 +67,57 @@ return p3 .end method + +## CHECK-START: int IrreducibleLoop.liveness2(boolean, boolean, boolean, int) builder (after) +## CHECK-DAG: Mul loop:<<Loop:B\d+>> +## CHECK-DAG: Not loop:<<Loop>> + +## CHECK-START: int IrreducibleLoop.liveness2(boolean, boolean, boolean, int) liveness (after) +## CHECK-DAG: Mul liveness:<<LPreEntry2:\d+>> +## CHECK-DAG: Not liveness:<<LBackEdge1:\d+>> +## CHECK-EVAL: <<LBackEdge1>> < <<LPreEntry2>> + +.method public liveness2(ZZZI)I + .registers 10 + + const v1, 1 + + :header1 + if-eqz p0, :body1 + + :exit + return p3 + + :body1 + # The test will generate an incorrect linear order when the following IF swaps + # its successors. To do that, load a boolean value and compare NotEqual to 1. + sget-boolean v2, LIrreducibleLoop;->f:Z + const v3, 1 + if-ne v2, v3, :pre_header2 + + :pre_entry2 + # This constant has a use in a phi in :back_edge2 and a back edge use in + # :back_edge1. Because the linear order is wrong, the back edge use has + # a lower liveness than the phi use. + const v0, 42 + mul-int/2addr p3, p3 + goto :back_edge2 + + :back_edge2 + add-int/2addr p3, v0 + add-int/2addr v0, v1 + goto :header2 + + :header2 + if-eqz p2, :back_edge2 + + :back_edge1 + not-int p3, p3 + goto :header1 + + :pre_header2 + const v0, 42 + goto :header2 +.end method + +.field public static f:Z diff --git a/test/595-error-class/expected.txt b/test/595-error-class/expected.txt new file mode 100644 index 0000000000..b0aad4deb5 --- /dev/null +++ b/test/595-error-class/expected.txt @@ -0,0 +1 @@ +passed diff --git a/test/595-error-class/info.txt b/test/595-error-class/info.txt new file mode 100644 index 0000000000..a58b8b31b4 --- /dev/null +++ b/test/595-error-class/info.txt @@ -0,0 +1 @@ +Regression test on merging array type with error component type. diff --git a/test/595-error-class/smali/error.smali b/test/595-error-class/smali/error.smali new file mode 100644 index 0000000000..925c34b293 --- /dev/null +++ b/test/595-error-class/smali/error.smali @@ -0,0 +1,23 @@ +# Copyright (C) 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public final LAnError; + +.super LSuperOfAnError; + +# Override a final method to put this class in the error state. +.method public foo()V + .registers 1 + return-void +.end method diff --git a/test/595-error-class/smali/merge.smali b/test/595-error-class/smali/merge.smali new file mode 100644 index 0000000000..2f8b41504e --- /dev/null +++ b/test/595-error-class/smali/merge.smali @@ -0,0 +1,31 @@ +# +# Copyright (C) 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public LMerge; +.super Ljava/lang/Object; + +# Method that selects between x = new Integer[] or new AnError[], +# Reference type propagation should correctly see error in component type. +.method public static select(Z)Ljava/lang/Object; + .registers 2 + const/16 v0, 10 + if-eqz v1, :Skip + new-array v0, v0, [LAnError; + goto :Done +:Skip + new-array v0, v0, [Ljava/lang/Integer; +:Done + return-object v0 +.end method diff --git a/test/595-error-class/smali/super.smali b/test/595-error-class/smali/super.smali new file mode 100644 index 0000000000..da7467d164 --- /dev/null +++ b/test/595-error-class/smali/super.smali @@ -0,0 +1,22 @@ +# Copyright (C) 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.class public LSuperOfAnError; + +.super Ljava/lang/Object; + +.method public final foo()V + .registers 1 + return-void +.end method diff --git a/test/595-error-class/src/Main.java b/test/595-error-class/src/Main.java new file mode 100644 index 0000000000..655fa4336a --- /dev/null +++ b/test/595-error-class/src/Main.java @@ -0,0 +1,30 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.lang.reflect.*; + +public class Main { + + public static void main(String args[]) throws Throwable { + Class<?> c = Class.forName("Merge"); + Method m = c.getMethod("select", boolean.class); + Object x = m.invoke(null, true); + if (x == null) { + throw new Error("Did not get array"); + } + System.out.println("passed"); + } +} diff --git a/test/595-profile-saving/expected.txt b/test/595-profile-saving/expected.txt new file mode 100644 index 0000000000..6a5618ebc6 --- /dev/null +++ b/test/595-profile-saving/expected.txt @@ -0,0 +1 @@ +JNI_OnLoad called diff --git a/test/595-profile-saving/info.txt b/test/595-profile-saving/info.txt new file mode 100644 index 0000000000..5d318f5b15 --- /dev/null +++ b/test/595-profile-saving/info.txt @@ -0,0 +1 @@ +Check that profile recording works even when JIT compilation is not enabled. diff --git a/test/595-profile-saving/profile-saving.cc b/test/595-profile-saving/profile-saving.cc new file mode 100644 index 0000000000..a7e6f8ba8c --- /dev/null +++ b/test/595-profile-saving/profile-saving.cc @@ -0,0 +1,83 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "dex_file.h" + +#include "jit/offline_profiling_info.h" +#include "jit/profile_saver.h" +#include "jni.h" +#include "method_reference.h" +#include "mirror/class-inl.h" +#include "oat_file_assistant.h" +#include "oat_file_manager.h" +#include "scoped_thread_state_change.h" +#include "ScopedUtfChars.h" +#include "thread.h" + +namespace art { +namespace { + +class CreateProfilingInfoVisitor : public StackVisitor { + public: + explicit CreateProfilingInfoVisitor(Thread* thread, const char* method_name) + SHARED_REQUIRES(Locks::mutator_lock_) + : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), + method_name_(method_name) {} + + bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) { + ArtMethod* m = GetMethod(); + std::string m_name(m->GetName()); + + if (m_name.compare(method_name_) == 0) { + ProfilingInfo::Create(Thread::Current(), m, /* retry_allocation */ true); + method_index_ = m->GetDexMethodIndex(); + return false; + } + return true; + } + + int method_index_ = -1; + const char* const method_name_; +}; + +extern "C" JNIEXPORT jint JNICALL Java_Main_ensureProfilingInfo(JNIEnv* env, + jclass, + jstring method_name) { + ScopedUtfChars chars(env, method_name); + CHECK(chars.c_str() != nullptr); + ScopedObjectAccess soa(Thread::Current()); + CreateProfilingInfoVisitor visitor(soa.Self(), chars.c_str()); + visitor.WalkStack(); + return visitor.method_index_; +} + +extern "C" JNIEXPORT void JNICALL Java_Main_ensureProfileProcessing(JNIEnv*, jclass) { + ProfileSaver::ForceProcessProfiles(); +} + +extern "C" JNIEXPORT jboolean JNICALL Java_Main_presentInProfile( + JNIEnv* env, jclass cls, jstring filename, jint method_index) { + ScopedUtfChars filename_chars(env, filename); + CHECK(filename_chars.c_str() != nullptr); + ScopedObjectAccess soa(Thread::Current()); + const DexFile* dex_file = soa.Decode<mirror::Class*>(cls)->GetDexCache()->GetDexFile(); + return ProfileSaver::HasSeenMethod(std::string(filename_chars.c_str()), + dex_file, + static_cast<uint16_t>(method_index)); +} + +} // namespace +} // namespace art diff --git a/test/595-profile-saving/run b/test/595-profile-saving/run new file mode 100644 index 0000000000..f12fac9207 --- /dev/null +++ b/test/595-profile-saving/run @@ -0,0 +1,28 @@ +#!/bin/bash +# +# Copyright 2016 The Android Open Source Project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Use +# --compiler-filter=interpret-only to make sure that the test is not compiled AOT +# -XOatFileManagerCompilerFilter:interpret-only to make sure the test is not compiled +# when loaded (by PathClassLoader) +# -Xjitsaveprofilinginfo to enable profile saving +# -Xusejit:false to disable jit and only test profiles. +exec ${RUN} \ + -Xcompiler-option --compiler-filter=interpret-only \ + --runtime-option -XOatFileManagerCompilerFilter:interpret-only \ + --runtime-option -Xjitsaveprofilinginfo \ + --runtime-option -Xusejit:false \ + "${@}" diff --git a/test/595-profile-saving/src/Main.java b/test/595-profile-saving/src/Main.java new file mode 100644 index 0000000000..039503f7a4 --- /dev/null +++ b/test/595-profile-saving/src/Main.java @@ -0,0 +1,99 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.File; +import java.io.IOException; +import java.lang.reflect.Method; + +public class Main { + + public static void main(String[] args) throws Exception { + System.loadLibrary(args[0]); + + File file = null; + try { + file = createTempFile(); + // String codePath = getDexBaseLocation(); + String codePath = System.getenv("DEX_LOCATION") + "/595-profile-saving.jar"; + VMRuntime.registerAppInfo(file.getPath(), + System.getenv("DEX_LOCATION"), + new String[] {codePath}, + /* foreignProfileDir */ null); + + int methodIdx = $opt$noinline$testProfile(); + ensureProfileProcessing(); + if (!presentInProfile(file.getPath(), methodIdx)) { + throw new RuntimeException("Method with index " + methodIdx + " not in the profile"); + } + } finally { + if (file != null) { + file.delete(); + } + } + } + + public static int $opt$noinline$testProfile() { + if (doThrow) throw new Error(); + // Make sure we have a profile info for this method without the need to loop. + return ensureProfilingInfo("$opt$noinline$testProfile"); + } + + // Return the dex method index. + public static native int ensureProfilingInfo(String methodName); + // Ensures the profile saver does its usual processing. + public static native void ensureProfileProcessing(); + // Checks if the profiles saver knows about the method. + public static native boolean presentInProfile(String profile, int methodIdx); + + public static boolean doThrow = false; + private static final String TEMP_FILE_NAME_PREFIX = "dummy"; + private static final String TEMP_FILE_NAME_SUFFIX = "-file"; + + static native String getProfileInfoDump( + String filename); + + private static File createTempFile() throws Exception { + try { + return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX); + } catch (IOException e) { + System.setProperty("java.io.tmpdir", "/data/local/tmp"); + try { + return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX); + } catch (IOException e2) { + System.setProperty("java.io.tmpdir", "/sdcard"); + return File.createTempFile(TEMP_FILE_NAME_PREFIX, TEMP_FILE_NAME_SUFFIX); + } + } + } + + private static class VMRuntime { + private static final Method registerAppInfoMethod; + static { + try { + Class<? extends Object> c = Class.forName("dalvik.system.VMRuntime"); + registerAppInfoMethod = c.getDeclaredMethod("registerAppInfo", + String.class, String.class, String[].class, String.class); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + public static void registerAppInfo(String profile, String appDir, + String[] codePaths, String foreignDir) throws Exception { + registerAppInfoMethod.invoke(null, profile, appDir, codePaths, foreignDir); + } + } +} diff --git a/test/596-app-images/app_images.cc b/test/596-app-images/app_images.cc new file mode 100644 index 0000000000..11c0f424ba --- /dev/null +++ b/test/596-app-images/app_images.cc @@ -0,0 +1,69 @@ +/* + * Copyright 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include <assert.h> +#include <iostream> +#include <pthread.h> +#include <stdio.h> +#include <vector> + +#include "gc/heap.h" +#include "gc/space/image_space.h" +#include "gc/space/space-inl.h" +#include "image.h" +#include "jni.h" +#include "mirror/class.h" +#include "runtime.h" +#include "scoped_thread_state_change.h" + +namespace art { + +namespace { + +extern "C" JNIEXPORT jboolean JNICALL Java_Main_checkAppImageLoaded(JNIEnv*, jclass) { + ScopedObjectAccess soa(Thread::Current()); + for (auto* space : Runtime::Current()->GetHeap()->GetContinuousSpaces()) { + if (space->IsImageSpace()) { + auto* image_space = space->AsImageSpace(); + const auto& image_header = image_space->GetImageHeader(); + if (image_header.IsAppImage()) { + return JNI_TRUE; + } + } + } + return JNI_FALSE; +} + +extern "C" JNIEXPORT jboolean JNICALL Java_Main_checkAppImageContains(JNIEnv*, jclass, jclass c) { + ScopedObjectAccess soa(Thread::Current()); + mirror::Class* klass_ptr = soa.Decode<mirror::Class*>(c); + for (auto* space : Runtime::Current()->GetHeap()->GetContinuousSpaces()) { + if (space->IsImageSpace()) { + auto* image_space = space->AsImageSpace(); + const auto& image_header = image_space->GetImageHeader(); + if (image_header.IsAppImage()) { + if (image_space->HasAddress(klass_ptr)) { + return JNI_TRUE; + } + } + } + } + return JNI_FALSE; +} + +} // namespace + +} // namespace art diff --git a/test/596-app-images/expected.txt b/test/596-app-images/expected.txt new file mode 100644 index 0000000000..6a5618ebc6 --- /dev/null +++ b/test/596-app-images/expected.txt @@ -0,0 +1 @@ +JNI_OnLoad called diff --git a/test/596-app-images/info.txt b/test/596-app-images/info.txt new file mode 100644 index 0000000000..a3d5e7ea70 --- /dev/null +++ b/test/596-app-images/info.txt @@ -0,0 +1 @@ +Tests that app-images are loaded and used. diff --git a/test/596-app-images/src/Main.java b/test/596-app-images/src/Main.java new file mode 100644 index 0000000000..75b31b8061 --- /dev/null +++ b/test/596-app-images/src/Main.java @@ -0,0 +1,33 @@ +/* + * Copyright 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +class Main { + static class Inner { + public static int abc = 0; + } + + public static void main(String[] args) { + System.loadLibrary(args[0]); + if (!checkAppImageLoaded()) { + System.out.println("App image is not loaded!"); + } else if (!checkAppImageContains(Inner.class)) { + System.out.println("App image does not contain Inner!"); + } + } + + public static native boolean checkAppImageLoaded(); + public static native boolean checkAppImageContains(Class<?> klass); +} diff --git a/test/803-no-super/expected.txt b/test/803-no-super/expected.txt new file mode 100644 index 0000000000..5036991397 --- /dev/null +++ b/test/803-no-super/expected.txt @@ -0,0 +1,2 @@ +java.lang.ClassNotFoundException: NoSuper1 +Done! diff --git a/test/803-no-super/info.txt b/test/803-no-super/info.txt new file mode 100644 index 0000000000..0178a446e1 --- /dev/null +++ b/test/803-no-super/info.txt @@ -0,0 +1,3 @@ +Regression test that temp (erroneous) classes don't get conflict tables created. + +Obviously needs to run under Dalvik or ART. diff --git a/test/803-no-super/smali/nosuper1.smali b/test/803-no-super/smali/nosuper1.smali new file mode 100644 index 0000000000..df2eaa5ca8 --- /dev/null +++ b/test/803-no-super/smali/nosuper1.smali @@ -0,0 +1,3 @@ +.class public LNoSuper1; + +.super LNoClass; diff --git a/test/803-no-super/src/Main.java b/test/803-no-super/src/Main.java new file mode 100644 index 0000000000..a07e042c32 --- /dev/null +++ b/test/803-no-super/src/Main.java @@ -0,0 +1,29 @@ +/* + * Copyright (C) 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Attempt to load class with no superclass. + */ +public class Main { + public static void main(String[] args) throws Exception { + try { + Class<?> c = Class.forName("NoSuper1"); + } catch (Exception e) { + System.out.println(e); + } + System.out.println("Done!"); + } +} diff --git a/test/960-default-smali/expected.txt b/test/960-default-smali/expected.txt index 7671eed5de..f3db93f87f 100644 --- a/test/960-default-smali/expected.txt +++ b/test/960-default-smali/expected.txt @@ -82,3 +82,19 @@ J-virtual A.SayHiTwice()='Hi Hi ' J-interface Greeter.SayHiTwice()='Hi Hi ' J-virtual J.SayHiTwice()='Hi Hi ' End testing for type J +Testing for type K +K-interface Foo.bar()='foobar' +K-virtual K.bar()='foobar' +End testing for type K +Testing for type L +L-interface Foo.bar()='foobar' +L-virtual K.bar()='foobar' +L-virtual L.bar()='foobar' +End testing for type L +Testing for type M +M-interface Foo.bar()='BAZ!' +M-interface Fooer.bar()='BAZ!' +M-virtual K.bar()='BAZ!' +M-virtual L.bar()='BAZ!' +M-virtual M.bar()='BAZ!' +End testing for type M diff --git a/test/960-default-smali/src/Foo.java b/test/960-default-smali/src/Foo.java new file mode 100644 index 0000000000..ed5b35f47b --- /dev/null +++ b/test/960-default-smali/src/Foo.java @@ -0,0 +1,20 @@ +/* + * Copyright 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +interface Foo { + public default String bar() { + return "foobar"; + } +} diff --git a/test/960-default-smali/src/Fooer.java b/test/960-default-smali/src/Fooer.java new file mode 100644 index 0000000000..d8a5f61636 --- /dev/null +++ b/test/960-default-smali/src/Fooer.java @@ -0,0 +1,19 @@ +/* + * Copyright 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +interface Fooer extends Foo { + public String bar(); +} diff --git a/test/960-default-smali/src/K.java b/test/960-default-smali/src/K.java new file mode 100644 index 0000000000..4426be7192 --- /dev/null +++ b/test/960-default-smali/src/K.java @@ -0,0 +1,17 @@ +/* + * Copyright 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +class K implements Foo { } diff --git a/test/960-default-smali/src/L.java b/test/960-default-smali/src/L.java new file mode 100644 index 0000000000..c08ab72a99 --- /dev/null +++ b/test/960-default-smali/src/L.java @@ -0,0 +1,17 @@ +/* + * Copyright 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +class L extends K { } diff --git a/test/960-default-smali/src/M.java b/test/960-default-smali/src/M.java new file mode 100644 index 0000000000..affe7e9c9e --- /dev/null +++ b/test/960-default-smali/src/M.java @@ -0,0 +1,21 @@ +/* + * Copyright 2016 The Android Open Source Project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +class M extends L implements Fooer { + public String bar() { + return "BAZ!"; + } +} diff --git a/test/960-default-smali/src/classes.xml b/test/960-default-smali/src/classes.xml index 0aa41f7fb6..f3e50c570b 100644 --- a/test/960-default-smali/src/classes.xml +++ b/test/960-default-smali/src/classes.xml @@ -81,6 +81,27 @@ <implements> </implements> <methods> </methods> </class> + + <class name="K" super="java/lang/Object"> + <implements> + <item>Foo</item> + </implements> + <methods> </methods> + </class> + + <class name="L" super="K"> + <implements> </implements> + <methods> </methods> + </class> + + <class name="M" super="L"> + <implements> + <item>Fooer</item> + </implements> + <methods> + <method>bar</method> + </methods> + </class> </classes> <interfaces> @@ -123,5 +144,22 @@ <method type="abstract">GetPlace</method> </methods> </interface> + + <interface name="Foo" super="java/lang/Object"> + <implements> + </implements> + <methods> + <method type="default">bar</method> + </methods> + </interface> + + <interface name="Fooer" super="java/lang/Object"> + <implements> + <item>Foo</item> + </implements> + <methods> + <method type="abstract">bar</method> + </methods> + </interface> </interfaces> </data> diff --git a/test/Android.libarttest.mk b/test/Android.libarttest.mk index e547c72c0e..d6f5d372a9 100644 --- a/test/Android.libarttest.mk +++ b/test/Android.libarttest.mk @@ -41,7 +41,9 @@ LIBARTTEST_COMMON_SRC_FILES := \ 497-inlining-and-class-loader/clear_dex_cache.cc \ 543-env-long-ref/env_long_ref.cc \ 566-polymorphic-inlining/polymorphic_inline.cc \ - 570-checker-osr/osr.cc + 570-checker-osr/osr.cc \ + 595-profile-saving/profile-saving.cc \ + 596-app-images/app_images.cc ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttest.so ART_TARGET_LIBARTTEST_$(ART_PHONY_TEST_TARGET_SUFFIX) += $(ART_TARGET_TEST_OUT)/$(TARGET_ARCH)/libarttestd.so diff --git a/test/Android.run-test.mk b/test/Android.run-test.mk index f3cda479db..ee651b5494 100644 --- a/test/Android.run-test.mk +++ b/test/Android.run-test.mk @@ -563,6 +563,13 @@ endif TEST_ART_BROKEN_OPTIMIZING_READ_BARRIER_RUN_TESTS := TEST_ART_BROKEN_JIT_READ_BARRIER_RUN_TESTS := +TEST_ART_BROKEN_NPIC_RUN_TESTS := 596-app-images +ifneq (,$(filter npictest,$(PICTEST_TYPES))) + ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(RUN_TYPES),$(PREBUILD_TYPES), \ + ${COMPILER_TYPES},$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \ + $(IMAGE_TYPES),npictest,$(DEBUGGABLE_TYPES),$(TEST_ART_BROKEN_NPIC_RUN_TESTS),$(ALL_ADDRESS_SIZES)) +endif + # Tests that should fail in the heap poisoning configuration with the Optimizing compiler. # 055: Exceeds run time limits due to heap poisoning instrumentation (on ARM and ARM64 devices). TEST_ART_BROKEN_OPTIMIZING_HEAP_POISONING_RUN_TESTS := \ diff --git a/tools/buildbot-build.sh b/tools/buildbot-build.sh index 2eb52bcad9..304c2a9398 100755 --- a/tools/buildbot-build.sh +++ b/tools/buildbot-build.sh @@ -46,9 +46,14 @@ while true; do done if [[ $mode == "host" ]]; then - make_command="make $j_arg $showcommands build-art-host-tests $common_targets ${out_dir}/host/linux-x86/lib/libjavacoretests.so ${out_dir}/host/linux-x86/lib64/libjavacoretests.so" + make_command="make $j_arg $showcommands build-art-host-tests $common_targets" + make_command+=" ${out_dir}/host/linux-x86/lib/libjavacoretests.so " + make_command+=" ${out_dir}/host/linux-x86/lib64/libjavacoretests.so" elif [[ $mode == "target" ]]; then - make_command="make $j_arg $showcommands build-art-target-tests $common_targets libjavacrypto libjavacoretests linker toybox toolbox sh ${out_dir}/host/linux-x86/bin/adb libstdc++" + make_command="make $j_arg $showcommands build-art-target-tests $common_targets" + make_command+=" libjavacrypto libjavacoretests linker toybox toolbox sh" + make_command+=" ${out_dir}/host/linux-x86/bin/adb libstdc++ " + make_command+=" ${out_dir}/target/product/${TARGET_PRODUCT}/system/etc/public.libraries.txt" fi echo "Executing $make_command" diff --git a/tools/libcore_failures.txt b/tools/libcore_failures.txt index 38b6ea60f0..dd2cc3140f 100644 --- a/tools/libcore_failures.txt +++ b/tools/libcore_failures.txt @@ -243,48 +243,6 @@ "org.apache.harmony.tests.java.util.prefs.FilePreferencesImplTest#testPutGet"] }, { - description: "libnativehelper_compat_libc++ loading issue", - result: EXEC_FAILED, - modes: [device], - names: ["dalvik.system.JniTest#testGetSuperclass", - "dalvik.system.JniTest#testPassingBooleans", - "dalvik.system.JniTest#testPassingBytes", - "dalvik.system.JniTest#testPassingChars", - "dalvik.system.JniTest#testPassingClass", - "dalvik.system.JniTest#testPassingDoubles", - "dalvik.system.JniTest#testPassingFloats", - "dalvik.system.JniTest#testPassingInts", - "dalvik.system.JniTest#testPassingLongs", - "dalvik.system.JniTest#testPassingObjectReferences", - "dalvik.system.JniTest#testPassingShorts", - "dalvik.system.JniTest#testPassingThis", - "libcore.util.NativeAllocationRegistryTest#testBadSize", - "libcore.util.NativeAllocationRegistryTest#testEarlyFree", - "libcore.util.NativeAllocationRegistryTest#testNativeAllocationAllocatorAndNoSharedRegistry", - "libcore.util.NativeAllocationRegistryTest#testNativeAllocationAllocatorAndSharedRegistry", - "libcore.util.NativeAllocationRegistryTest#testNativeAllocationNoAllocatorAndNoSharedRegistry", - "libcore.util.NativeAllocationRegistryTest#testNativeAllocationNoAllocatorAndSharedRegistry", - "libcore.util.NativeAllocationRegistryTest#testNullArguments"] -}, -{ - description: "libnativehelper_compat_libc++.so not found by dlopen on ARM64", - result: EXEC_FAILED, - modes: [device], - bug: 28082914, - names: ["libcore.java.lang.ThreadTest#testContextClassLoaderIsInherited", - "libcore.java.lang.ThreadTest#testContextClassLoaderIsNotNull", - "libcore.java.lang.ThreadTest#testGetAllStackTracesIncludesAllGroups", - "libcore.java.lang.ThreadTest#testGetStackTrace", - "libcore.java.lang.ThreadTest#testJavaContextClassLoader", - "libcore.java.lang.ThreadTest#testLeakingStartedThreads", - "libcore.java.lang.ThreadTest#testLeakingUnstartedThreads", - "libcore.java.lang.ThreadTest#testNativeThreadNames", - "libcore.java.lang.ThreadTest#testThreadInterrupted", - "libcore.java.lang.ThreadTest#testThreadSleep", - "libcore.java.lang.ThreadTest#testThreadSleepIllegalArguments", - "libcore.java.lang.ThreadTest#testThreadWakeup"] -}, -{ description: "Only work with --mode=activity", result: EXEC_FAILED, names: [ "libcore.java.io.FileTest#testJavaIoTmpdirMutable" ] @@ -295,5 +253,11 @@ names: ["jsr166.CollectionTest#testEmptyMeansEmpty", "jsr166.Collection8Test#testForEach", "jsr166.Collection8Test#testForEachConcurrentStressTest"] +}, +{ + description: "Unclear why this started to fail", + result: EXEC_FAILED, + bug: 28574453, + names: [ "org.apache.harmony.tests.javax.security.cert.X509CertificateTest#testVerifyPublicKey" ] } ] diff --git a/tools/public.libraries.buildbot.txt b/tools/public.libraries.buildbot.txt new file mode 100644 index 0000000000..4b01796a0a --- /dev/null +++ b/tools/public.libraries.buildbot.txt @@ -0,0 +1,8 @@ +libart.so +libartd.so +libbacktrace.so +libc.so +libc++.so +libdl.so +libm.so +libnativehelper.so |