diff options
Diffstat (limited to 'compiler')
24 files changed, 303 insertions, 235 deletions
diff --git a/compiler/debug/elf_debug_frame_writer.h b/compiler/debug/elf_debug_frame_writer.h index d0c98a7b79..27b70c8caa 100644 --- a/compiler/debug/elf_debug_frame_writer.h +++ b/compiler/debug/elf_debug_frame_writer.h @@ -207,13 +207,12 @@ void WriteCFISection(linker::ElfBuilder<ElfTypes>* builder, } // Write .eh_frame/.debug_frame section. - auto* cfi_section = (format == dwarf::DW_DEBUG_FRAME_FORMAT - ? builder->GetDebugFrame() - : builder->GetEhFrame()); + const bool is_debug_frame = format == dwarf::DW_DEBUG_FRAME_FORMAT; + auto* cfi_section = (is_debug_frame ? builder->GetDebugFrame() : builder->GetEhFrame()); { cfi_section->Start(); const bool is64bit = Is64BitInstructionSet(builder->GetIsa()); - const Elf_Addr cfi_address = cfi_section->GetAddress(); + const Elf_Addr cfi_address = (is_debug_frame ? 0 : cfi_section->GetAddress()); const Elf_Addr cie_address = cfi_address; Elf_Addr buffer_address = cfi_address; std::vector<uint8_t> buffer; // Small temporary buffer. diff --git a/compiler/debug/elf_debug_info_writer.h b/compiler/debug/elf_debug_info_writer.h index d5999941d7..107ed488cd 100644 --- a/compiler/debug/elf_debug_info_writer.h +++ b/compiler/debug/elf_debug_info_writer.h @@ -22,6 +22,7 @@ #include <vector> #include "art_field-inl.h" +#include "code_item_accessors-inl.h" #include "debug/dwarf/debug_abbrev_writer.h" #include "debug/dwarf/debug_info_entry_writer.h" #include "debug/elf_compilation_unit.h" @@ -48,10 +49,10 @@ static void LocalInfoCallback(void* ctx, const DexFile::LocalInfo& entry) { static std::vector<const char*> GetParamNames(const MethodDebugInfo* mi) { std::vector<const char*> names; - if (mi->code_item != nullptr) { + CodeItemDebugInfoAccessor accessor(mi->dex_file, mi->code_item); + if (accessor.HasCodeItem()) { DCHECK(mi->dex_file != nullptr); - uint32_t debug_info_offset = OatFile::GetDebugInfoOffset(*mi->dex_file, mi->code_item); - const uint8_t* stream = mi->dex_file->GetDebugInfoStream(debug_info_offset); + const uint8_t* stream = mi->dex_file->GetDebugInfoStream(accessor.DebugInfoOffset()); if (stream != nullptr) { DecodeUnsignedLeb128(&stream); // line. uint32_t parameters_size = DecodeUnsignedLeb128(&stream); @@ -162,7 +163,7 @@ class ElfCompilationUnitWriter { for (auto mi : compilation_unit.methods) { DCHECK(mi->dex_file != nullptr); const DexFile* dex = mi->dex_file; - const DexFile::CodeItem* dex_code = mi->code_item; + CodeItemDebugInfoAccessor accessor(dex, mi->code_item); const DexFile::MethodId& dex_method = dex->GetMethodId(mi->dex_method_index); const DexFile::ProtoId& dex_proto = dex->GetMethodPrototype(dex_method); const DexFile::TypeList* dex_params = dex->GetProtoParameters(dex_proto); @@ -204,13 +205,13 @@ class ElfCompilationUnitWriter { // Decode dex register locations for all stack maps. // It might be expensive, so do it just once and reuse the result. std::vector<DexRegisterMap> dex_reg_maps; - if (dex_code != nullptr && mi->code_info != nullptr) { + if (accessor.HasCodeItem() && mi->code_info != nullptr) { const CodeInfo code_info(mi->code_info); CodeInfoEncoding encoding = code_info.ExtractEncoding(); for (size_t s = 0; s < code_info.GetNumberOfStackMaps(encoding); ++s) { const StackMap& stack_map = code_info.GetStackMapAt(s, encoding); dex_reg_maps.push_back(code_info.GetDexRegisterMapOf( - stack_map, encoding, dex_code->registers_size_)); + stack_map, encoding, accessor.RegistersSize())); } } @@ -224,9 +225,9 @@ class ElfCompilationUnitWriter { WriteName("this"); info_.WriteFlagPresent(DW_AT_artificial); WriteLazyType(dex_class_desc); - if (dex_code != nullptr) { + if (accessor.HasCodeItem()) { // Write the stack location of the parameter. - const uint32_t vreg = dex_code->registers_size_ - dex_code->ins_size_ + arg_reg; + const uint32_t vreg = accessor.RegistersSize() - accessor.InsSize() + arg_reg; const bool is64bitValue = false; WriteRegLocation(mi, dex_reg_maps, vreg, is64bitValue, compilation_unit.code_address); } @@ -244,30 +245,31 @@ class ElfCompilationUnitWriter { const char* type_desc = dex->StringByTypeIdx(dex_params->GetTypeItem(i).type_idx_); WriteLazyType(type_desc); const bool is64bitValue = type_desc[0] == 'D' || type_desc[0] == 'J'; - if (dex_code != nullptr) { + if (accessor.HasCodeItem()) { // Write the stack location of the parameter. - const uint32_t vreg = dex_code->registers_size_ - dex_code->ins_size_ + arg_reg; + const uint32_t vreg = accessor.RegistersSize() - accessor.InsSize() + arg_reg; WriteRegLocation(mi, dex_reg_maps, vreg, is64bitValue, compilation_unit.code_address); } arg_reg += is64bitValue ? 2 : 1; info_.EndTag(); } - if (dex_code != nullptr) { - DCHECK_EQ(arg_reg, dex_code->ins_size_); + if (accessor.HasCodeItem()) { + DCHECK_EQ(arg_reg, accessor.InsSize()); } } // Write local variables. LocalInfos local_infos; - uint32_t debug_info_offset = OatFile::GetDebugInfoOffset(*dex, dex_code); - if (dex->DecodeDebugLocalInfo(dex_code, - debug_info_offset, + if (dex->DecodeDebugLocalInfo(accessor.RegistersSize(), + accessor.InsSize(), + accessor.InsnsSizeInCodeUnits(), + accessor.DebugInfoOffset(), is_static, mi->dex_method_index, LocalInfoCallback, &local_infos)) { for (const DexFile::LocalInfo& var : local_infos) { - if (var.reg_ < dex_code->registers_size_ - dex_code->ins_size_) { + if (var.reg_ < accessor.RegistersSize() - accessor.InsSize()) { info_.StartTag(DW_TAG_variable); WriteName(var.name_); WriteLazyType(var.descriptor_); @@ -296,7 +298,7 @@ class ElfCompilationUnitWriter { CHECK_EQ(info_.Depth(), 0); std::vector<uint8_t> buffer; buffer.reserve(info_.data()->size() + KB); - const size_t offset = owner_->builder_->GetDebugInfo()->GetSize(); + const size_t offset = owner_->builder_->GetDebugInfo()->GetPosition(); // All compilation units share single table which is at the start of .debug_abbrev. const size_t debug_abbrev_offset = 0; WriteDebugInfoCU(debug_abbrev_offset, info_, offset, &buffer, &owner_->debug_info_patches_); @@ -461,7 +463,7 @@ class ElfCompilationUnitWriter { CHECK_EQ(info_.Depth(), 0); std::vector<uint8_t> buffer; buffer.reserve(info_.data()->size() + KB); - const size_t offset = owner_->builder_->GetDebugInfo()->GetSize(); + const size_t offset = owner_->builder_->GetDebugInfo()->GetPosition(); // All compilation units share single table which is at the start of .debug_abbrev. const size_t debug_abbrev_offset = 0; WriteDebugInfoCU(debug_abbrev_offset, info_, offset, &buffer, &owner_->debug_info_patches_); diff --git a/compiler/debug/elf_debug_line_writer.h b/compiler/debug/elf_debug_line_writer.h index 943e03a765..d7fd52448c 100644 --- a/compiler/debug/elf_debug_line_writer.h +++ b/compiler/debug/elf_debug_line_writer.h @@ -60,7 +60,7 @@ class ElfDebugLineWriter { ? builder_->GetText()->GetAddress() : 0; - compilation_unit.debug_line_offset = builder_->GetDebugLine()->GetSize(); + compilation_unit.debug_line_offset = builder_->GetDebugLine()->GetPosition(); std::vector<dwarf::FileEntry> files; std::unordered_map<std::string, size_t> files_map; @@ -159,9 +159,9 @@ class ElfDebugLineWriter { PositionInfos dex2line_map; DCHECK(mi->dex_file != nullptr); const DexFile* dex = mi->dex_file; - uint32_t debug_info_offset = OatFile::GetDebugInfoOffset(*dex, mi->code_item); - if (!dex->DecodeDebugPositionInfo( - mi->code_item, debug_info_offset, PositionInfoCallback, &dex2line_map)) { + CodeItemDebugInfoAccessor accessor(dex, mi->code_item); + const uint32_t debug_info_offset = accessor.DebugInfoOffset(); + if (!dex->DecodeDebugPositionInfo(debug_info_offset, PositionInfoCallback, &dex2line_map)) { continue; } @@ -268,7 +268,7 @@ class ElfDebugLineWriter { } std::vector<uint8_t> buffer; buffer.reserve(opcodes.data()->size() + KB); - size_t offset = builder_->GetDebugLine()->GetSize(); + size_t offset = builder_->GetDebugLine()->GetPosition(); WriteDebugLineTable(directories, files, opcodes, offset, &buffer, &debug_line_patches_); builder_->GetDebugLine()->WriteFully(buffer.data(), buffer.size()); return buffer.size(); diff --git a/compiler/debug/elf_debug_loc_writer.h b/compiler/debug/elf_debug_loc_writer.h index bb856b29f4..1d609af4e6 100644 --- a/compiler/debug/elf_debug_loc_writer.h +++ b/compiler/debug/elf_debug_loc_writer.h @@ -251,7 +251,10 @@ static void WriteDebugLocEntry(const MethodDebugInfo* method_info, // kInStackLargeOffset and kConstantLargeValue are hidden by GetKind(). // kInRegisterHigh and kInFpuRegisterHigh should be handled by // the special cases above and they should not occur alone. - LOG(ERROR) << "Unexpected register location kind: " << kind; + LOG(WARNING) << "Unexpected register location: " << kind + << " (This can indicate either a bug in the dexer when generating" + << " local variable information, or a bug in ART compiler." + << " Please file a bug at go/art-bug)"; break; } if (is64bitValue) { diff --git a/compiler/debug/elf_debug_writer.cc b/compiler/debug/elf_debug_writer.cc index 33c46d7e1f..a6267292bf 100644 --- a/compiler/debug/elf_debug_writer.cc +++ b/compiler/debug/elf_debug_writer.cc @@ -108,29 +108,32 @@ void WriteDebugInfo(linker::ElfBuilder<ElfTypes>* builder, std::vector<uint8_t> MakeMiniDebugInfo( InstructionSet isa, const InstructionSetFeatures* features, - size_t rodata_size, + uint64_t text_address, size_t text_size, const ArrayRef<const MethodDebugInfo>& method_infos) { if (Is64BitInstructionSet(isa)) { return MakeMiniDebugInfoInternal<ElfTypes64>(isa, features, - rodata_size, + text_address, text_size, method_infos); } else { return MakeMiniDebugInfoInternal<ElfTypes32>(isa, features, - rodata_size, + text_address, text_size, method_infos); } } template <typename ElfTypes> -static std::vector<uint8_t> WriteDebugElfFileForMethodsInternal( +static std::vector<uint8_t> MakeElfFileForJITInternal( InstructionSet isa, const InstructionSetFeatures* features, - const ArrayRef<const MethodDebugInfo>& method_infos) { + bool mini_debug_info, + const MethodDebugInfo& mi) { + CHECK_EQ(mi.is_code_address_text_relative, false); + ArrayRef<const MethodDebugInfo> method_infos(&mi, 1); std::vector<uint8_t> buffer; buffer.reserve(KB); linker::VectorOutputStream out("Debug ELF file", &buffer); @@ -138,23 +141,34 @@ static std::vector<uint8_t> WriteDebugElfFileForMethodsInternal( new linker::ElfBuilder<ElfTypes>(isa, features, &out)); // No program headers since the ELF file is not linked and has no allocated sections. builder->Start(false /* write_program_headers */); - WriteDebugInfo(builder.get(), - method_infos, - dwarf::DW_DEBUG_FRAME_FORMAT, - false /* write_oat_patches */); + if (mini_debug_info) { + std::vector<uint8_t> mdi = MakeMiniDebugInfo(isa, + features, + mi.code_address, + mi.code_size, + method_infos); + builder->WriteSection(".gnu_debugdata", &mdi); + } else { + builder->GetText()->AllocateVirtualMemory(mi.code_address, mi.code_size); + WriteDebugInfo(builder.get(), + method_infos, + dwarf::DW_DEBUG_FRAME_FORMAT, + false /* write_oat_patches */); + } builder->End(); CHECK(builder->Good()); return buffer; } -std::vector<uint8_t> WriteDebugElfFileForMethods( +std::vector<uint8_t> MakeElfFileForJIT( InstructionSet isa, const InstructionSetFeatures* features, - const ArrayRef<const MethodDebugInfo>& method_infos) { + bool mini_debug_info, + const MethodDebugInfo& method_info) { if (Is64BitInstructionSet(isa)) { - return WriteDebugElfFileForMethodsInternal<ElfTypes64>(isa, features, method_infos); + return MakeElfFileForJITInternal<ElfTypes64>(isa, features, mini_debug_info, method_info); } else { - return WriteDebugElfFileForMethodsInternal<ElfTypes32>(isa, features, method_infos); + return MakeElfFileForJITInternal<ElfTypes32>(isa, features, mini_debug_info, method_info); } } diff --git a/compiler/debug/elf_debug_writer.h b/compiler/debug/elf_debug_writer.h index d24ca9b203..a47bf076b9 100644 --- a/compiler/debug/elf_debug_writer.h +++ b/compiler/debug/elf_debug_writer.h @@ -43,14 +43,15 @@ void WriteDebugInfo( std::vector<uint8_t> MakeMiniDebugInfo( InstructionSet isa, const InstructionSetFeatures* features, - size_t rodata_section_size, + uint64_t text_section_address, size_t text_section_size, const ArrayRef<const MethodDebugInfo>& method_infos); -std::vector<uint8_t> WriteDebugElfFileForMethods( +std::vector<uint8_t> MakeElfFileForJIT( InstructionSet isa, const InstructionSetFeatures* features, - const ArrayRef<const MethodDebugInfo>& method_infos); + bool mini_debug_info, + const MethodDebugInfo& method_info); std::vector<uint8_t> WriteDebugElfFileForClasses( InstructionSet isa, diff --git a/compiler/debug/elf_gnu_debugdata_writer.h b/compiler/debug/elf_gnu_debugdata_writer.h index 1cdf6b0ad1..78b8e2780c 100644 --- a/compiler/debug/elf_gnu_debugdata_writer.h +++ b/compiler/debug/elf_gnu_debugdata_writer.h @@ -80,7 +80,7 @@ template <typename ElfTypes> static std::vector<uint8_t> MakeMiniDebugInfoInternal( InstructionSet isa, const InstructionSetFeatures* features, - size_t rodata_section_size, + typename ElfTypes::Addr text_section_address, size_t text_section_size, const ArrayRef<const MethodDebugInfo>& method_infos) { std::vector<uint8_t> buffer; @@ -88,11 +88,9 @@ static std::vector<uint8_t> MakeMiniDebugInfoInternal( linker::VectorOutputStream out("Mini-debug-info ELF file", &buffer); std::unique_ptr<linker::ElfBuilder<ElfTypes>> builder( new linker::ElfBuilder<ElfTypes>(isa, features, &out)); - builder->Start(); - // Mirror .rodata and .text as NOBITS sections. - // It is needed to detected relocations after compression. - builder->GetRoData()->WriteNoBitsSection(rodata_section_size); - builder->GetText()->WriteNoBitsSection(text_section_size); + builder->Start(false /* write_program_headers */); + // Mirror .text as NOBITS section since the added symbols will reference it. + builder->GetText()->AllocateVirtualMemory(text_section_address, text_section_size); WriteDebugSymbols(builder.get(), method_infos, false /* with_signature */); WriteCFISection(builder.get(), method_infos, diff --git a/compiler/debug/elf_symtab_writer.h b/compiler/debug/elf_symtab_writer.h index 0907e102a0..57e010f232 100644 --- a/compiler/debug/elf_symtab_writer.h +++ b/compiler/debug/elf_symtab_writer.h @@ -79,8 +79,9 @@ static void WriteDebugSymbols(linker::ElfBuilder<ElfTypes>* builder, last_name_offset = name_offset; } - const auto* text = info.is_code_address_text_relative ? builder->GetText() : nullptr; - uint64_t address = info.code_address + (text != nullptr ? text->GetAddress() : 0); + const auto* text = builder->GetText(); + uint64_t address = info.code_address; + address += info.is_code_address_text_relative ? text->GetAddress() : 0; // Add in code delta, e.g., thumb bit 0 for Thumb2 code. address += CompiledMethod::CodeDelta(info.isa); symtab->Add(name_offset, text, address, info.code_size, STB_GLOBAL, STT_FUNC); diff --git a/compiler/dex/dex_to_dex_compiler.cc b/compiler/dex/dex_to_dex_compiler.cc index 476903522e..ead909af9a 100644 --- a/compiler/dex/dex_to_dex_compiler.cc +++ b/compiler/dex/dex_to_dex_compiler.cc @@ -296,7 +296,6 @@ void DexCompiler::CompileInvokeVirtual(Instruction* inst, uint32_t dex_pc, ClassLinker* class_linker = unit_.GetClassLinker(); ArtMethod* resolved_method = class_linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>( - GetDexFile(), method_idx, unit_.GetDexCache(), unit_.GetClassLoader(), diff --git a/compiler/driver/compiler_driver-inl.h b/compiler/driver/compiler_driver-inl.h index 42fc4aa5ba..294072d7e7 100644 --- a/compiler/driver/compiler_driver-inl.h +++ b/compiler/driver/compiler_driver-inl.h @@ -33,13 +33,15 @@ namespace art { inline ObjPtr<mirror::Class> CompilerDriver::ResolveClass( - const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache, - Handle<mirror::ClassLoader> class_loader, dex::TypeIndex cls_index, + const ScopedObjectAccess& soa, + Handle<mirror::DexCache> dex_cache, + Handle<mirror::ClassLoader> class_loader, + dex::TypeIndex cls_index, const DexCompilationUnit* mUnit) { DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile()); DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get()); - ObjPtr<mirror::Class> cls = mUnit->GetClassLinker()->ResolveType( - *mUnit->GetDexFile(), cls_index, dex_cache, class_loader); + ObjPtr<mirror::Class> cls = + mUnit->GetClassLinker()->ResolveType(cls_index, dex_cache, class_loader); DCHECK_EQ(cls == nullptr, soa.Self()->IsExceptionPending()); if (UNLIKELY(cls == nullptr)) { // Clean up any exception left by type resolution. @@ -49,8 +51,10 @@ inline ObjPtr<mirror::Class> CompilerDriver::ResolveClass( } inline ObjPtr<mirror::Class> CompilerDriver::ResolveCompilingMethodsClass( - const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache, - Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit) { + const ScopedObjectAccess& soa, + Handle<mirror::DexCache> dex_cache, + Handle<mirror::ClassLoader> class_loader, + const DexCompilationUnit* mUnit) { DCHECK_EQ(dex_cache->GetDexFile(), mUnit->GetDexFile()); DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get()); const DexFile::MethodId& referrer_method_id = @@ -105,7 +109,7 @@ inline ArtMethod* CompilerDriver::ResolveMethod( DCHECK_EQ(class_loader.Get(), mUnit->GetClassLoader().Get()); ArtMethod* resolved_method = mUnit->GetClassLinker()->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>( - *dex_cache->GetDexFile(), method_idx, dex_cache, class_loader, nullptr, invoke_type); + method_idx, dex_cache, class_loader, /* referrer */ nullptr, invoke_type); if (UNLIKELY(resolved_method == nullptr)) { DCHECK(soa.Self()->IsExceptionPending()); // Clean up any exception left by type resolution. diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index fcc6b8be9e..0631c0f12c 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -1048,22 +1048,21 @@ void CompilerDriver::LoadImageClasses(TimingLogger* timings) { for (const auto& exception_type : unresolved_exception_types) { dex::TypeIndex exception_type_idx = exception_type.first; const DexFile* dex_file = exception_type.second; - StackHandleScope<2> hs2(self); + StackHandleScope<1> hs2(self); Handle<mirror::DexCache> dex_cache(hs2.NewHandle(class_linker->RegisterDexFile(*dex_file, nullptr))); - Handle<mirror::Class> klass(hs2.NewHandle( + ObjPtr<mirror::Class> klass = (dex_cache != nullptr) - ? class_linker->ResolveType(*dex_file, - exception_type_idx, + ? class_linker->ResolveType(exception_type_idx, dex_cache, ScopedNullHandle<mirror::ClassLoader>()) - : nullptr)); + : nullptr; if (klass == nullptr) { const DexFile::TypeId& type_id = dex_file->GetTypeId(exception_type_idx); const char* descriptor = dex_file->GetTypeDescriptor(type_id); LOG(FATAL) << "Failed to resolve class " << descriptor; } - DCHECK(java_lang_Throwable->IsAssignableFrom(klass.Get())); + DCHECK(java_lang_Throwable->IsAssignableFrom(klass)); } // Resolving exceptions may load classes that reference more exceptions, iterate until no // more are found @@ -1638,7 +1637,7 @@ class ResolveClassFieldsAndMethodsVisitor : public CompilationVisitor { soa.Self(), dex_file))); // Resolve the class. ObjPtr<mirror::Class> klass = - class_linker->ResolveType(dex_file, class_def.class_idx_, dex_cache, class_loader); + class_linker->ResolveType(class_def.class_idx_, dex_cache, class_loader); bool resolve_fields_and_methods; if (klass == nullptr) { // Class couldn't be resolved, for example, super-class is in a different dex file. Don't @@ -1690,7 +1689,10 @@ class ResolveClassFieldsAndMethodsVisitor : public CompilationVisitor { if (resolve_fields_and_methods) { while (it.HasNextMethod()) { ArtMethod* method = class_linker->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>( - dex_file, it.GetMemberIndex(), dex_cache, class_loader, nullptr, + it.GetMemberIndex(), + dex_cache, + class_loader, + /* referrer */ nullptr, it.GetMethodInvokeType(class_def)); if (method == nullptr) { CheckAndClearResolveException(soa.Self()); @@ -1726,7 +1728,7 @@ class ResolveTypeVisitor : public CompilationVisitor { dex_file, class_loader.Get()))); ObjPtr<mirror::Class> klass = (dex_cache != nullptr) - ? class_linker->ResolveType(dex_file, dex::TypeIndex(type_idx), dex_cache, class_loader) + ? class_linker->ResolveType(dex::TypeIndex(type_idx), dex_cache, class_loader) : nullptr; if (klass == nullptr) { diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h index ab788e326f..e001726c95 100644 --- a/compiler/driver/compiler_driver.h +++ b/compiler/driver/compiler_driver.h @@ -219,15 +219,17 @@ class CompilerDriver { REQUIRES_SHARED(Locks::mutator_lock_); // Resolve compiling method's class. Returns null on failure. - ObjPtr<mirror::Class> ResolveCompilingMethodsClass( - const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache, - Handle<mirror::ClassLoader> class_loader, const DexCompilationUnit* mUnit) + ObjPtr<mirror::Class> ResolveCompilingMethodsClass(const ScopedObjectAccess& soa, + Handle<mirror::DexCache> dex_cache, + Handle<mirror::ClassLoader> class_loader, + const DexCompilationUnit* mUnit) REQUIRES_SHARED(Locks::mutator_lock_); - ObjPtr<mirror::Class> ResolveClass( - const ScopedObjectAccess& soa, Handle<mirror::DexCache> dex_cache, - Handle<mirror::ClassLoader> class_loader, dex::TypeIndex type_index, - const DexCompilationUnit* mUnit) + ObjPtr<mirror::Class> ResolveClass(const ScopedObjectAccess& soa, + Handle<mirror::DexCache> dex_cache, + Handle<mirror::ClassLoader> class_loader, + dex::TypeIndex type_index, + const DexCompilationUnit* mUnit) REQUIRES_SHARED(Locks::mutator_lock_); // Resolve a field. Returns null on failure, including incompatible class change. @@ -240,10 +242,10 @@ class CompilerDriver { REQUIRES_SHARED(Locks::mutator_lock_); // Can we fast-path an IGET/IPUT access to an instance field? If yes, compute the field offset. - std::pair<bool, bool> IsFastInstanceField( - ObjPtr<mirror::DexCache> dex_cache, - ObjPtr<mirror::Class> referrer_class, - ArtField* resolved_field, uint16_t field_idx) + std::pair<bool, bool> IsFastInstanceField(ObjPtr<mirror::DexCache> dex_cache, + ObjPtr<mirror::Class> referrer_class, + ArtField* resolved_field, + uint16_t field_idx) REQUIRES_SHARED(Locks::mutator_lock_); // Resolve a method. Returns null on failure, including incompatible class change. diff --git a/compiler/linker/elf_builder.h b/compiler/linker/elf_builder.h index b30b55e9b4..aa3cd98595 100644 --- a/compiler/linker/elf_builder.h +++ b/compiler/linker/elf_builder.h @@ -108,8 +108,6 @@ class ElfBuilder FINAL { section_index_(0), name_(name), link_(link), - started_(false), - finished_(false), phdr_flags_(PF_R), phdr_type_(0) { DCHECK_GE(align, 1u); @@ -120,90 +118,62 @@ class ElfBuilder FINAL { header_.sh_entsize = entsize; } - // Start writing of this section. - void Start() { - CHECK(!started_); - CHECK(!finished_); - started_ = true; - auto& sections = owner_->sections_; - // Check that the previous section is complete. - CHECK(sections.empty() || sections.back()->finished_); - // The first ELF section index is 1. Index 0 is reserved for NULL. - section_index_ = sections.size() + 1; - // Page-align if we switch between allocated and non-allocated sections, - // or if we change the type of allocation (e.g. executable vs non-executable). - if (!sections.empty()) { - if (header_.sh_flags != sections.back()->header_.sh_flags) { - header_.sh_addralign = kPageSize; - } - } - // Align file position. - if (header_.sh_type != SHT_NOBITS) { - header_.sh_offset = owner_->AlignFileOffset(header_.sh_addralign); - } else { - header_.sh_offset = 0; - } - // Align virtual memory address. - if ((header_.sh_flags & SHF_ALLOC) != 0) { - header_.sh_addr = owner_->AlignVirtualAddress(header_.sh_addralign); - } else { - header_.sh_addr = 0; - } - // Push this section on the list of written sections. - sections.push_back(this); + // Allocate chunk of virtual memory for this section from the owning ElfBuilder. + // This must be done at the start for all SHF_ALLOC sections (i.e. mmaped by linker). + // It is fine to allocate section but never call Start/End() (e.g. the .bss section). + void AllocateVirtualMemory(Elf_Word size) { + AllocateVirtualMemory(owner_->virtual_address_, size); } - // Finish writing of this section. - void End() { - CHECK(started_); - CHECK(!finished_); - finished_ = true; - if (header_.sh_type == SHT_NOBITS) { - CHECK_GT(header_.sh_size, 0u); - } else { - // Use the current file position to determine section size. - off_t file_offset = owner_->stream_.Seek(0, kSeekCurrent); - CHECK_GE(file_offset, (off_t)header_.sh_offset); - header_.sh_size = file_offset - header_.sh_offset; - } - if ((header_.sh_flags & SHF_ALLOC) != 0) { - owner_->virtual_address_ += header_.sh_size; - } + void AllocateVirtualMemory(Elf_Addr addr, Elf_Word size) { + CHECK_NE(header_.sh_flags & SHF_ALLOC, 0u); + Elf_Word align = AddSection(); + CHECK_EQ(header_.sh_addr, 0u); + header_.sh_addr = RoundUp(addr, align); + CHECK(header_.sh_size == 0u || header_.sh_size == size); + header_.sh_size = size; + CHECK_LE(owner_->virtual_address_, header_.sh_addr); + owner_->virtual_address_ = header_.sh_addr + header_.sh_size; } - // Get the location of this section in virtual memory. - Elf_Addr GetAddress() const { - CHECK(started_); - return header_.sh_addr; + // Start writing file data of this section. + void Start() { + CHECK(owner_->current_section_ == nullptr); + Elf_Word align = AddSection(); + CHECK_EQ(header_.sh_offset, 0u); + header_.sh_offset = owner_->AlignFileOffset(align); + owner_->current_section_ = this; } - // Returns the size of the content of this section. - Elf_Word GetSize() const { - if (finished_) { - return header_.sh_size; - } else { - CHECK(started_); - CHECK_NE(header_.sh_type, (Elf_Word)SHT_NOBITS); - return owner_->stream_.Seek(0, kSeekCurrent) - header_.sh_offset; - } + // Finish writing file data of this section. + void End() { + CHECK(owner_->current_section_ == this); + Elf_Word position = GetPosition(); + CHECK(header_.sh_size == 0u || header_.sh_size == position); + header_.sh_size = position; + owner_->current_section_ = nullptr; + } + + // Get the number of bytes written so far. + // Only valid while writing the section. + Elf_Word GetPosition() const { + CHECK(owner_->current_section_ == this); + off_t file_offset = owner_->stream_.Seek(0, kSeekCurrent); + DCHECK_GE(file_offset, (off_t)header_.sh_offset); + return file_offset - header_.sh_offset; } - // Write this section as "NOBITS" section. (used for the .bss section) - // This means that the ELF file does not contain the initial data for this section - // and it will be zero-initialized when the ELF file is loaded in the running program. - void WriteNoBitsSection(Elf_Word size) { + // Get the location of this section in virtual memory. + Elf_Addr GetAddress() const { DCHECK_NE(header_.sh_flags & SHF_ALLOC, 0u); - header_.sh_type = SHT_NOBITS; - Start(); - header_.sh_size = size; - End(); + DCHECK_NE(header_.sh_addr, 0u); + return header_.sh_addr; } // This function always succeeds to simplify code. // Use builder's Good() to check the actual status. bool WriteFully(const void* buffer, size_t byte_count) OVERRIDE { - CHECK(started_); - CHECK(!finished_); + CHECK(owner_->current_section_ == this); return owner_->stream_.WriteFully(buffer, byte_count); } @@ -221,19 +191,32 @@ class ElfBuilder FINAL { } Elf_Word GetSectionIndex() const { - DCHECK(started_); DCHECK_NE(section_index_, 0u); return section_index_; } private: + // Add this section to the list of generated ELF sections (if not there already). + // It also ensures the alignment is sufficient to generate valid program headers, + // since that depends on the previous section. It returns the required alignment. + Elf_Word AddSection() { + if (section_index_ == 0) { + std::vector<Section*>& sections = owner_->sections_; + Elf_Word last = sections.empty() ? PF_R : sections.back()->phdr_flags_; + if (phdr_flags_ != last) { + header_.sh_addralign = kPageSize; // Page-align if R/W/X flags changed. + } + sections.push_back(this); + section_index_ = sections.size(); // First ELF section has index 1. + } + return owner_->write_program_headers_ ? header_.sh_addralign : 1; + } + ElfBuilder<ElfTypes>* owner_; Elf_Shdr header_; Elf_Word section_index_; const std::string name_; const Section* const link_; - bool started_; - bool finished_; Elf_Word phdr_flags_; Elf_Word phdr_type_; @@ -370,7 +353,7 @@ class ElfBuilder FINAL { Elf_Word section_index; if (section != nullptr) { DCHECK_LE(section->GetAddress(), addr); - DCHECK_LE(addr, section->GetAddress() + section->GetSize()); + DCHECK_LE(addr, section->GetAddress() + section->header_.sh_size); section_index = section->GetSectionIndex(); } else { section_index = static_cast<Elf_Word>(SHN_ABS); @@ -479,6 +462,10 @@ class ElfBuilder FINAL { digest_start_(-1) { } + Elf_Word GetSize() { + return 16 + kBuildIdLen; + } + void Write() { // The size fields are 32-bit on both 32-bit and 64-bit systems, confirmed // with the 64-bit linker and libbfd code. The size of name and desc must @@ -490,6 +477,7 @@ class ElfBuilder FINAL { digest_start_ = this->Seek(0, kSeekCurrent); static_assert(kBuildIdLen % 4 == 0, "expecting a mutliple of 4 for build ID length"); this->WriteFully(std::string(kBuildIdLen, '\0').c_str(), kBuildIdLen); // desc. + DCHECK_EQ(this->GetPosition(), GetSize()); } off_t GetDigestStart() { @@ -530,6 +518,7 @@ class ElfBuilder FINAL { abiflags_(this, ".MIPS.abiflags", SHT_MIPS_ABIFLAGS, SHF_ALLOC, nullptr, 0, kPageSize, 0, isa, features), build_id_(this, ".note.gnu.build-id", SHT_NOTE, SHF_ALLOC, nullptr, 0, 4, 0), + current_section_(nullptr), started_(false), write_program_headers_(false), loaded_size_(0u), @@ -545,6 +534,7 @@ class ElfBuilder FINAL { ~ElfBuilder() {} InstructionSet GetIsa() { return isa_; } + BuildIdSection* GetBuildId() { return &build_id_; } Section* GetRoData() { return &rodata_; } Section* GetText() { return &text_; } Section* GetBss() { return &bss_; } @@ -622,6 +612,9 @@ class ElfBuilder FINAL { if (section->link_ != nullptr) { section->header_.sh_link = section->link_->GetSectionIndex(); } + if (section->header_.sh_offset == 0) { + section->header_.sh_type = SHT_NOBITS; + } } shstrtab_.End(); @@ -680,65 +673,57 @@ class ElfBuilder FINAL { soname = soname.substr(directory_separator_pos + 1); } - // Calculate addresses of .text, .bss and .dynstr. - DCHECK_EQ(rodata_.header_.sh_addralign, static_cast<Elf_Word>(kPageSize)); - DCHECK_EQ(text_.header_.sh_addralign, static_cast<Elf_Word>(kPageSize)); - DCHECK_EQ(bss_.header_.sh_addralign, static_cast<Elf_Word>(kPageSize)); - DCHECK_EQ(dynstr_.header_.sh_addralign, static_cast<Elf_Word>(kPageSize)); - Elf_Word rodata_address = rodata_.GetAddress(); - Elf_Word text_address = RoundUp(rodata_address + rodata_size, kPageSize); - Elf_Word bss_address = RoundUp(text_address + text_size, kPageSize); - Elf_Word abiflags_address = RoundUp(bss_address + bss_size, kPageSize); - Elf_Word abiflags_size = 0; + // Allocate all pre-dynamic sections. + rodata_.AllocateVirtualMemory(rodata_size); + text_.AllocateVirtualMemory(text_size); + if (bss_size != 0) { + bss_.AllocateVirtualMemory(bss_size); + } if (isa_ == InstructionSet::kMips || isa_ == InstructionSet::kMips64) { - abiflags_size = abiflags_.GetSize(); + abiflags_.AllocateVirtualMemory(abiflags_.GetSize()); } - Elf_Word dynstr_address = RoundUp(abiflags_address + abiflags_size, kPageSize); // Cache .dynstr, .dynsym and .hash data. dynstr_.Add(""); // dynstr should start with empty string. - Elf_Word rodata_index = rodata_.GetSectionIndex(); Elf_Word oatdata = dynstr_.Add("oatdata"); - dynsym_.Add(oatdata, rodata_index, rodata_address, rodata_size, STB_GLOBAL, STT_OBJECT); + dynsym_.Add(oatdata, &rodata_, rodata_.GetAddress(), rodata_size, STB_GLOBAL, STT_OBJECT); if (text_size != 0u) { - Elf_Word text_index = rodata_index + 1u; Elf_Word oatexec = dynstr_.Add("oatexec"); - dynsym_.Add(oatexec, text_index, text_address, text_size, STB_GLOBAL, STT_OBJECT); + dynsym_.Add(oatexec, &text_, text_.GetAddress(), text_size, STB_GLOBAL, STT_OBJECT); Elf_Word oatlastword = dynstr_.Add("oatlastword"); - Elf_Word oatlastword_address = text_address + text_size - 4; - dynsym_.Add(oatlastword, text_index, oatlastword_address, 4, STB_GLOBAL, STT_OBJECT); + Elf_Word oatlastword_address = text_.GetAddress() + text_size - 4; + dynsym_.Add(oatlastword, &text_, oatlastword_address, 4, STB_GLOBAL, STT_OBJECT); } else if (rodata_size != 0) { // rodata_ can be size 0 for dwarf_test. Elf_Word oatlastword = dynstr_.Add("oatlastword"); - Elf_Word oatlastword_address = rodata_address + rodata_size - 4; - dynsym_.Add(oatlastword, rodata_index, oatlastword_address, 4, STB_GLOBAL, STT_OBJECT); + Elf_Word oatlastword_address = rodata_.GetAddress() + rodata_size - 4; + dynsym_.Add(oatlastword, &rodata_, oatlastword_address, 4, STB_GLOBAL, STT_OBJECT); } DCHECK_LE(bss_roots_offset, bss_size); if (bss_size != 0u) { - Elf_Word bss_index = rodata_index + 1u + (text_size != 0 ? 1u : 0u); Elf_Word oatbss = dynstr_.Add("oatbss"); - dynsym_.Add(oatbss, bss_index, bss_address, bss_roots_offset, STB_GLOBAL, STT_OBJECT); + dynsym_.Add(oatbss, &bss_, bss_.GetAddress(), bss_roots_offset, STB_GLOBAL, STT_OBJECT); DCHECK_LE(bss_methods_offset, bss_roots_offset); DCHECK_LE(bss_roots_offset, bss_size); // Add a symbol marking the start of the methods part of the .bss, if not empty. if (bss_methods_offset != bss_roots_offset) { - Elf_Word bss_methods_address = bss_address + bss_methods_offset; + Elf_Word bss_methods_address = bss_.GetAddress() + bss_methods_offset; Elf_Word bss_methods_size = bss_roots_offset - bss_methods_offset; Elf_Word oatbssroots = dynstr_.Add("oatbssmethods"); dynsym_.Add( - oatbssroots, bss_index, bss_methods_address, bss_methods_size, STB_GLOBAL, STT_OBJECT); + oatbssroots, &bss_, bss_methods_address, bss_methods_size, STB_GLOBAL, STT_OBJECT); } // Add a symbol marking the start of the GC roots part of the .bss, if not empty. if (bss_roots_offset != bss_size) { - Elf_Word bss_roots_address = bss_address + bss_roots_offset; + Elf_Word bss_roots_address = bss_.GetAddress() + bss_roots_offset; Elf_Word bss_roots_size = bss_size - bss_roots_offset; Elf_Word oatbssroots = dynstr_.Add("oatbssroots"); dynsym_.Add( - oatbssroots, bss_index, bss_roots_address, bss_roots_size, STB_GLOBAL, STT_OBJECT); + oatbssroots, &bss_, bss_roots_address, bss_roots_size, STB_GLOBAL, STT_OBJECT); } Elf_Word oatbsslastword = dynstr_.Add("oatbsslastword"); - Elf_Word bsslastword_address = bss_address + bss_size - 4; - dynsym_.Add(oatbsslastword, bss_index, bsslastword_address, 4, STB_GLOBAL, STT_OBJECT); + Elf_Word bsslastword_address = bss_.GetAddress() + bss_size - 4; + dynsym_.Add(oatbsslastword, &bss_, bsslastword_address, 4, STB_GLOBAL, STT_OBJECT); } Elf_Word soname_offset = dynstr_.Add(soname); @@ -759,28 +744,24 @@ class ElfBuilder FINAL { hash.push_back(0); // Last symbol terminates the chain. hash_.Add(hash.data(), hash.size() * sizeof(hash[0])); - // Calculate addresses of .dynsym, .hash and .dynamic. - DCHECK_EQ(dynstr_.header_.sh_flags, dynsym_.header_.sh_flags); - DCHECK_EQ(dynsym_.header_.sh_flags, hash_.header_.sh_flags); - Elf_Word dynsym_address = - RoundUp(dynstr_address + dynstr_.GetCacheSize(), dynsym_.header_.sh_addralign); - Elf_Word hash_address = - RoundUp(dynsym_address + dynsym_.GetCacheSize(), hash_.header_.sh_addralign); - DCHECK_EQ(dynamic_.header_.sh_addralign, static_cast<Elf_Word>(kPageSize)); - Elf_Word dynamic_address = RoundUp(hash_address + dynsym_.GetCacheSize(), kPageSize); + // Allocate all remaining sections. + dynstr_.AllocateVirtualMemory(dynstr_.GetCacheSize()); + dynsym_.AllocateVirtualMemory(dynsym_.GetCacheSize()); + hash_.AllocateVirtualMemory(hash_.GetCacheSize()); Elf_Dyn dyns[] = { - { DT_HASH, { hash_address } }, - { DT_STRTAB, { dynstr_address } }, - { DT_SYMTAB, { dynsym_address } }, + { DT_HASH, { hash_.GetAddress() } }, + { DT_STRTAB, { dynstr_.GetAddress() } }, + { DT_SYMTAB, { dynsym_.GetAddress() } }, { DT_SYMENT, { sizeof(Elf_Sym) } }, { DT_STRSZ, { dynstr_.GetCacheSize() } }, { DT_SONAME, { soname_offset } }, { DT_NULL, { 0 } }, }; dynamic_.Add(&dyns, sizeof(dyns)); + dynamic_.AllocateVirtualMemory(dynamic_.GetCacheSize()); - loaded_size_ = RoundUp(dynamic_address + dynamic_.GetCacheSize(), kPageSize); + loaded_size_ = RoundUp(virtual_address_, kPageSize); } void WriteDynamicSection() { @@ -788,8 +769,6 @@ class ElfBuilder FINAL { dynsym_.WriteCachedSection(); hash_.WriteCachedSection(); dynamic_.WriteCachedSection(); - - CHECK_EQ(loaded_size_, RoundUp(dynamic_.GetAddress() + dynamic_.GetSize(), kPageSize)); } Elf_Word GetLoadedSize() { @@ -828,10 +807,6 @@ class ElfBuilder FINAL { return stream_.Seek(RoundUp(stream_.Seek(0, kSeekCurrent), alignment), kSeekSet); } - Elf_Addr AlignVirtualAddress(size_t alignment) { - return virtual_address_ = RoundUp(virtual_address_, alignment); - } - private: static Elf_Ehdr MakeElfHeader(InstructionSet isa, const InstructionSetFeatures* features) { Elf_Ehdr elf_header = Elf_Ehdr(); @@ -902,7 +877,6 @@ class ElfBuilder FINAL { elf_header.e_ehsize = sizeof(Elf_Ehdr); elf_header.e_phentsize = sizeof(Elf_Phdr); elf_header.e_shentsize = sizeof(Elf_Shdr); - elf_header.e_phoff = sizeof(Elf_Ehdr); return elf_header; } @@ -933,6 +907,7 @@ class ElfBuilder FINAL { for (auto* section : sections_) { const Elf_Shdr& shdr = section->header_; if ((shdr.sh_flags & SHF_ALLOC) != 0 && shdr.sh_size != 0) { + DCHECK(shdr.sh_addr != 0u) << "Allocate virtual memory for the section"; // PT_LOAD tells the linker to mmap part of the file. // The linker can only mmap page-aligned sections. // Single PT_LOAD may contain several ELF sections. @@ -1010,6 +985,7 @@ class ElfBuilder FINAL { // List of used section in the order in which they were written. std::vector<Section*> sections_; + Section* current_section_; // The section which is currently being written. bool started_; bool write_program_headers_; diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index a175c21760..8750910fe1 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -294,7 +294,7 @@ static dex::TypeIndex FindClassIndexIn(mirror::Class* cls, // as there may be different class loaders. So only return the index if it's // the right class already resolved with the class loader. if (index.IsValid()) { - ObjPtr<mirror::Class> resolved = ClassLinker::LookupResolvedType( + ObjPtr<mirror::Class> resolved = compilation_unit.GetClassLinker()->LookupResolvedType( index, compilation_unit.GetDexCache().Get(), compilation_unit.GetClassLoader().Get()); if (resolved != cls) { index = dex::TypeIndex::Invalid(); @@ -682,7 +682,7 @@ HInliner::InlineCacheType HInliner::ExtractClassesFromOfflineProfile( << "is invalid in location" << dex_cache->GetDexFile()->GetLocation(); return kInlineCacheNoData; } - ObjPtr<mirror::Class> clazz = ClassLinker::LookupResolvedType( + ObjPtr<mirror::Class> clazz = caller_compilation_unit_.GetClassLinker()->LookupResolvedType( class_ref.type_index, dex_cache, caller_compilation_unit_.GetClassLoader().Get()); diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc index bce4de32d5..e36d91fb05 100644 --- a/compiler/optimizing/instruction_builder.cc +++ b/compiler/optimizing/instruction_builder.cc @@ -442,17 +442,15 @@ ArenaBitVector* HInstructionBuilder::FindNativeDebugInfoLocations() { return false; } }; - const uint32_t num_instructions = code_item_->insns_size_in_code_units_; + CodeItemDebugInfoAccessor accessor(dex_file_, code_item_); ArenaBitVector* locations = ArenaBitVector::Create(local_allocator_, - num_instructions, + accessor.InsnsSizeInCodeUnits(), /* expandable */ false, kArenaAllocGraphBuilder); locations->ClearAllBits(); - uint32_t debug_info_offset = OatFile::GetDebugInfoOffset(*dex_file_, code_item_); - dex_file_->DecodeDebugPositionInfo(code_item_, debug_info_offset, Callback::Position, locations); + dex_file_->DecodeDebugPositionInfo(accessor.DebugInfoOffset(), Callback::Position, locations); // Instruction-specific tweaks. - IterationRange<DexInstructionIterator> instructions = code_item_->Instructions(); - for (const DexInstructionPcPair& inst : instructions) { + for (const DexInstructionPcPair& inst : accessor) { switch (inst->Opcode()) { case Instruction::MOVE_EXCEPTION: { // Stop in native debugger after the exception has been moved. @@ -461,7 +459,7 @@ ArenaBitVector* HInstructionBuilder::FindNativeDebugInfoLocations() { locations->ClearBit(inst.DexPc()); DexInstructionIterator next = std::next(DexInstructionIterator(inst)); DCHECK(next.DexPc() != inst.DexPc()); - if (next != instructions.end()) { + if (next != accessor.end()) { locations->SetBit(next.DexPc()); } break; @@ -796,7 +794,6 @@ ArtMethod* HInstructionBuilder::ResolveMethod(uint16_t method_idx, InvokeType in ArtMethod* resolved_method = class_linker->ResolveMethod<ClassLinker::ResolveMode::kCheckICCEAndIAE>( - *dex_compilation_unit_->GetDexFile(), method_idx, dex_compilation_unit_->GetDexCache(), class_loader, @@ -831,7 +828,6 @@ ArtMethod* HInstructionBuilder::ResolveMethod(uint16_t method_idx, InvokeType in return nullptr; } ObjPtr<mirror::Class> referenced_class = class_linker->LookupResolvedType( - *dex_compilation_unit_->GetDexFile(), dex_compilation_unit_->GetDexFile()->GetMethodId(method_idx).class_idx_, dex_compilation_unit_->GetDexCache().Get(), class_loader.Get()); @@ -1425,7 +1421,7 @@ bool HInstructionBuilder::BuildInstanceFieldAccess(const Instruction& instructio } static ObjPtr<mirror::Class> GetClassFrom(CompilerDriver* driver, - const DexCompilationUnit& compilation_unit) { + const DexCompilationUnit& compilation_unit) { ScopedObjectAccess soa(Thread::Current()); Handle<mirror::ClassLoader> class_loader = compilation_unit.GetClassLoader(); Handle<mirror::DexCache> dex_cache = compilation_unit.GetDexCache(); @@ -2934,7 +2930,7 @@ bool HInstructionBuilder::ProcessDexInstruction(const Instruction& instruction, ObjPtr<mirror::Class> HInstructionBuilder::LookupResolvedType( dex::TypeIndex type_index, const DexCompilationUnit& compilation_unit) const { - return ClassLinker::LookupResolvedType( + return compilation_unit.GetClassLinker()->LookupResolvedType( type_index, compilation_unit.GetDexCache().Get(), compilation_unit.GetClassLoader().Get()); } diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc index a42a85dc1d..53e449bbbe 100644 --- a/compiler/optimizing/instruction_simplifier.cc +++ b/compiler/optimizing/instruction_simplifier.cc @@ -994,6 +994,27 @@ void InstructionSimplifierVisitor::VisitIf(HIf* instruction) { instruction->GetBlock()->SwapSuccessors(); RecordSimplification(); } + HInstruction* input = instruction->InputAt(0); + + // If a condition 'cond' is evaluated in an HIf instruction then in the successors of the + // IF_BLOCK we statically know the value of the condition (TRUE in TRUE_SUCC, FALSE in + // FALSE_SUCC). Using that we can replace another evaluation (use) EVAL of the same 'cond' + // with TRUE value (FALSE value) if every path from the ENTRY_BLOCK to EVAL_BLOCK contains the + // edge HIF_BLOCK->TRUE_SUCC (HIF_BLOCK->FALSE_SUCC). + if (!input->IsConstant()) { + HBasicBlock* true_succ = instruction->IfTrueSuccessor(); + HBasicBlock* false_succ = instruction->IfFalseSuccessor(); + + DCHECK_EQ(true_succ->GetPredecessors().size(), 1u); + input->ReplaceUsesDominatedBy( + true_succ->GetFirstInstruction(), GetGraph()->GetIntConstant(1), /* strictly */ false); + RecordSimplification(); + + DCHECK_EQ(false_succ->GetPredecessors().size(), 1u); + input->ReplaceUsesDominatedBy( + false_succ->GetFirstInstruction(), GetGraph()->GetIntConstant(0), /* strictly */ false); + RecordSimplification(); + } } void InstructionSimplifierVisitor::VisitArrayLength(HArrayLength* instruction) { diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index 5f33ed6303..d39c2aded5 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -1111,10 +1111,10 @@ bool HInstructionList::FoundBefore(const HInstruction* instruction1, return true; } -bool HInstruction::StrictlyDominates(HInstruction* other_instruction) const { +bool HInstruction::Dominates(HInstruction* other_instruction, bool strictly) const { if (other_instruction == this) { // An instruction does not strictly dominate itself. - return false; + return !strictly; } HBasicBlock* block = GetBlock(); HBasicBlock* other_block = other_instruction->GetBlock(); @@ -1148,6 +1148,10 @@ bool HInstruction::StrictlyDominates(HInstruction* other_instruction) const { } } +bool HInstruction::StrictlyDominates(HInstruction* other_instruction) const { + return Dominates(other_instruction, /* strictly */ true); +} + void HInstruction::RemoveEnvironment() { RemoveEnvironmentUses(this); environment_ = nullptr; @@ -1170,14 +1174,16 @@ void HInstruction::ReplaceWith(HInstruction* other) { DCHECK(env_uses_.empty()); } -void HInstruction::ReplaceUsesDominatedBy(HInstruction* dominator, HInstruction* replacement) { +void HInstruction::ReplaceUsesDominatedBy(HInstruction* dominator, + HInstruction* replacement, + bool strictly) { const HUseList<HInstruction*>& uses = GetUses(); for (auto it = uses.begin(), end = uses.end(); it != end; /* ++it below */) { HInstruction* user = it->GetUser(); size_t index = it->GetIndex(); // Increment `it` now because `*it` may disappear thanks to user->ReplaceInput(). ++it; - if (dominator->StrictlyDominates(user)) { + if (dominator->Dominates(user, strictly)) { user->ReplaceInput(replacement, index); } } diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 42a9d95b9a..affd54ed72 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -2098,9 +2098,13 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> { return IsRemovable() && !HasUses(); } - // Does this instruction strictly dominate `other_instruction`? - // Returns false if this instruction and `other_instruction` are the same. + // Does this instruction dominate (strictly or in regular sense depending on 'strictly') + // `other_instruction`? + // Returns '!strictly' if this instruction and `other_instruction` are the same. // Aborts if this instruction and `other_instruction` are both phis. + bool Dominates(HInstruction* other_instruction, bool strictly) const; + + // Return 'Dominates(other_instruction, /*strictly*/ true)'. bool StrictlyDominates(HInstruction* other_instruction) const; int GetId() const { return id_; } @@ -2161,7 +2165,13 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> { void SetLocations(LocationSummary* locations) { locations_ = locations; } void ReplaceWith(HInstruction* instruction); - void ReplaceUsesDominatedBy(HInstruction* dominator, HInstruction* replacement); + + // Replace all uses of the instruction which are dominated by 'dominator' with 'replacement'. + // 'strictly' determines whether strict or regular domination relation should be checked. + void ReplaceUsesDominatedBy(HInstruction* dominator, + HInstruction* replacement, + bool strictly = true); + void ReplaceInput(HInstruction* replacement, size_t index); // This is almost the same as doing `ReplaceWith()`. But in this helper, the diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h index 096349fd73..87dff8403b 100644 --- a/compiler/optimizing/nodes_vector.h +++ b/compiler/optimizing/nodes_vector.h @@ -109,6 +109,16 @@ class HVecOperation : public HVariableInputSizeInstruction { // Assumes vector nodes cannot be moved by default. Each concrete implementation // that can be moved should override this method and return true. + // + // Note: similar approach is used for instruction scheduling (if it is turned on for the target): + // by default HScheduler::IsSchedulable returns false for a particular HVecOperation. + // HScheduler${ARCH}::IsSchedulable can be overridden to return true for an instruction (see + // scheduler_arm64.h for example) if it is safe to schedule it; in this case one *must* also + // look at/update HScheduler${ARCH}::IsSchedulingBarrier for this instruction. + // + // Note: For newly introduced vector instructions HScheduler${ARCH}::IsSchedulingBarrier must be + // altered to return true if the instruction might reside outside the SIMD loop body since SIMD + // registers are not kept alive across vector loop boundaries (yet). bool CanBeMoved() const OVERRIDE { return false; } // Tests if all data of a vector node (vector length and packed type) is equal. diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 73c72fc57a..24b1a123ee 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -1224,7 +1224,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, } const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions(); - if (compiler_options.GetGenerateDebugInfo()) { + if (compiler_options.GenerateAnyDebugInfo()) { const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code); const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode()); debug::MethodDebugInfo info = {}; @@ -1244,10 +1244,13 @@ bool OptimizingCompiler::JitCompile(Thread* self, info.frame_size_in_bytes = method_header->GetFrameSizeInBytes(); info.code_info = nullptr; info.cfi = jni_compiled_method.GetCfi(); - std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForMethods( + // If both flags are passed, generate full debug info. + const bool mini_debug_info = !compiler_options.GetGenerateDebugInfo(); + std::vector<uint8_t> elf_file = debug::MakeElfFileForJIT( GetCompilerDriver()->GetInstructionSet(), GetCompilerDriver()->GetInstructionSetFeatures(), - ArrayRef<const debug::MethodDebugInfo>(&info, 1)); + mini_debug_info, + info); CreateJITCodeEntryForAddress(code_address, std::move(elf_file)); } @@ -1352,7 +1355,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, } const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions(); - if (compiler_options.GetGenerateDebugInfo()) { + if (compiler_options.GenerateAnyDebugInfo()) { const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code); const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode()); debug::MethodDebugInfo info = {}; @@ -1372,10 +1375,13 @@ bool OptimizingCompiler::JitCompile(Thread* self, info.frame_size_in_bytes = method_header->GetFrameSizeInBytes(); info.code_info = stack_map_size == 0 ? nullptr : stack_map_data; info.cfi = ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()); - std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForMethods( + // If both flags are passed, generate full debug info. + const bool mini_debug_info = !compiler_options.GetGenerateDebugInfo(); + std::vector<uint8_t> elf_file = debug::MakeElfFileForJIT( GetCompilerDriver()->GetInstructionSet(), GetCompilerDriver()->GetInstructionSetFeatures(), - ArrayRef<const debug::MethodDebugInfo>(&info, 1)); + mini_debug_info, + info); CreateJITCodeEntryForAddress(code_address, std::move(elf_file)); } diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index d84f14acc0..8bb124e066 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -544,7 +544,7 @@ void ReferenceTypePropagation::RTPVisitor::SetClassAsTypeInfo(HInstruction* inst // the method is from the String class, the null loader is good enough. Handle<mirror::ClassLoader> loader(hs.NewHandle<mirror::ClassLoader>(nullptr)); ArtMethod* method = cl->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>( - dex_file, invoke->GetDexMethodIndex(), dex_cache, loader, nullptr, kDirect); + invoke->GetDexMethodIndex(), dex_cache, loader, /* referrer */ nullptr, kDirect); DCHECK(method != nullptr); mirror::Class* declaring_class = method->GetDeclaringClass(); DCHECK(declaring_class != nullptr); @@ -576,8 +576,8 @@ void ReferenceTypePropagation::RTPVisitor::UpdateReferenceTypeInfo(HInstruction* ScopedObjectAccess soa(Thread::Current()); ObjPtr<mirror::DexCache> dex_cache = FindDexCacheWithHint(soa.Self(), dex_file, hint_dex_cache_); - ObjPtr<mirror::Class> klass = - ClassLinker::LookupResolvedType(type_idx, dex_cache, class_loader_.Get()); + ObjPtr<mirror::Class> klass = Runtime::Current()->GetClassLinker()->LookupResolvedType( + type_idx, dex_cache, class_loader_.Get()); SetClassAsTypeInfo(instr, klass, is_exact); } diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h index bb7c353bc2..dfa077f7de 100644 --- a/compiler/optimizing/scheduler.h +++ b/compiler/optimizing/scheduler.h @@ -462,6 +462,11 @@ class HScheduler { // containing basic block from being scheduled. // This method is used to restrict scheduling to instructions that we know are // safe to handle. + // + // For newly introduced instructions by default HScheduler::IsSchedulable returns false. + // HScheduler${ARCH}::IsSchedulable can be overridden to return true for an instruction (see + // scheduler_arm64.h for example) if it is safe to schedule it; in this case one *must* also + // look at/update HScheduler${ARCH}::IsSchedulingBarrier for this instruction. virtual bool IsSchedulable(const HInstruction* instruction) const; bool IsSchedulable(const HBasicBlock* block) const; diff --git a/compiler/optimizing/scheduler_arm64.h b/compiler/optimizing/scheduler_arm64.h index 32f161f26a..f71cb5b784 100644 --- a/compiler/optimizing/scheduler_arm64.h +++ b/compiler/optimizing/scheduler_arm64.h @@ -151,6 +151,20 @@ class HSchedulerARM64 : public HScheduler { #undef CASE_INSTRUCTION_KIND } + // Treat as scheduling barriers those vector instructions whose live ranges exceed the vectorized + // loop boundaries. This is a workaround for the lack of notion of SIMD register in the compiler; + // around a call we have to save/restore all live SIMD&FP registers (only lower 64 bits of + // SIMD&FP registers are callee saved) so don't reorder such vector instructions. + // + // TODO: remove this when a proper support of SIMD registers is introduced to the compiler. + bool IsSchedulingBarrier(const HInstruction* instr) const OVERRIDE { + return HScheduler::IsSchedulingBarrier(instr) || + instr->IsVecReduce() || + instr->IsVecExtractScalar() || + instr->IsVecSetScalars() || + instr->IsVecReplicateScalar(); + } + private: SchedulingLatencyVisitorARM64 arm64_latency_visitor_; DISALLOW_COPY_AND_ASSIGN(HSchedulerARM64); diff --git a/compiler/verifier_deps_test.cc b/compiler/verifier_deps_test.cc index 4709fd0e9e..ee1d7c69bc 100644 --- a/compiler/verifier_deps_test.cc +++ b/compiler/verifier_deps_test.cc @@ -158,11 +158,10 @@ class VerifierDepsTest : public CommonCompilerTest { while (it.HasNextDirectMethod()) { ArtMethod* resolved_method = class_linker_->ResolveMethod<ClassLinker::ResolveMode::kNoChecks>( - *primary_dex_file_, it.GetMemberIndex(), dex_cache_handle, class_loader_handle, - nullptr, + /* referrer */ nullptr, it.GetMethodInvokeType(*class_def)); CHECK(resolved_method != nullptr); if (method_name == resolved_method->GetName()) { |