diff options
-rw-r--r-- | compiler/optimizing/code_generator.cc | 8 | ||||
-rw-r--r-- | compiler/optimizing/code_generator.h | 13 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm.cc | 64 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm.h | 13 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 60 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm64.h | 13 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm_vixl.cc | 4 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_mips.cc | 17 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 58 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86.h | 8 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 72 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86_64.h | 9 | ||||
-rw-r--r-- | compiler/optimizing/inliner.cc | 2 | ||||
-rw-r--r-- | compiler/optimizing/nodes.cc | 4 | ||||
-rw-r--r-- | compiler/optimizing/nodes.h | 15 | ||||
-rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 144 | ||||
-rw-r--r-- | compiler/optimizing/sharpening.cc | 17 | ||||
-rw-r--r-- | compiler/optimizing/sharpening.h | 7 | ||||
-rw-r--r-- | runtime/jit/jit_code_cache.cc | 82 |
19 files changed, 253 insertions, 357 deletions
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index 402eeee65f..fa6a5225e7 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -1402,14 +1402,6 @@ void CodeGenerator::EmitJitRoots(uint8_t* code, entry.second = index; ++index; } - for (auto& entry : jit_class_roots_) { - // Update the `roots` with the class, and replace the address temporarily - // stored to the index in the table. - uint64_t address = entry.second; - roots->Set(index, reinterpret_cast<StackReference<mirror::Class>*>(address)->AsMirrorPtr()); - entry.second = index; - ++index; - } EmitJitRootPatches(code, roots_data); } diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index 2e2c3c00af..4b11e7c699 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -34,7 +34,6 @@ #include "stack_map_stream.h" #include "string_reference.h" #include "utils/label.h" -#include "utils/type_reference.h" namespace art { @@ -344,7 +343,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { void BuildStackMaps(MemoryRegion region, const DexFile::CodeItem& code_item); size_t ComputeStackMapsSize(); size_t GetNumberOfJitRoots() const { - return jit_string_roots_.size() + jit_class_roots_.size(); + return jit_string_roots_.size(); } // Fills the `literals` array with literals collected during code generation. @@ -612,8 +611,6 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { block_order_(nullptr), jit_string_roots_(StringReferenceValueComparator(), graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - jit_class_roots_(TypeReferenceValueComparator(), - graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), disasm_info_(nullptr), stats_(stats), graph_(graph), @@ -684,7 +681,6 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { virtual void EmitJitRootPatches(uint8_t* code ATTRIBUTE_UNUSED, const uint8_t* roots_data ATTRIBUTE_UNUSED) { DCHECK_EQ(jit_string_roots_.size(), 0u); - DCHECK_EQ(jit_class_roots_.size(), 0u); } // Frame size required for this method. @@ -715,12 +711,7 @@ class CodeGenerator : public DeletableArenaObject<kArenaAllocCodeGenerator> { // Maps a StringReference (dex_file, string_index) to the index in the literal table. // Entries are intially added with a 0 index, and `EmitJitRoots` will compute all the // indices. - ArenaSafeMap<StringReference, uint32_t, StringReferenceValueComparator> jit_string_roots_; - - // Maps a ClassReference (dex_file, type_index) to the index in the literal table. - // Entries are intially added with a pointer in the handle zone, and `EmitJitRoots` - // will compute all the indices. - ArenaSafeMap<TypeReference, uint64_t, TypeReferenceValueComparator> jit_class_roots_; + ArenaSafeMap<StringReference, size_t, StringReferenceValueComparator> jit_string_roots_; DisassemblyInformation* disasm_info_; diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 866f2c58d8..ed6eef1b55 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -1216,9 +1216,7 @@ CodeGeneratorARM::CodeGeneratorARM(HGraph* graph, boot_image_address_patches_(std::less<uint32_t>(), graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), jit_string_patches_(StringReferenceValueComparator(), - graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - jit_class_patches_(TypeReferenceValueComparator(), - graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) { + graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) { // Always save the LR register to mimic Quick. AddAllocatedRegister(Location::RegisterLocation(LR)); } @@ -5714,7 +5712,8 @@ HLoadClass::LoadKind CodeGeneratorARM::GetSupportedLoadClassKind( break; case HLoadClass::LoadKind::kBootImageAddress: break; - case HLoadClass::LoadKind::kJitTableAddress: + case HLoadClass::LoadKind::kDexCacheAddress: + DCHECK(Runtime::Current()->UseJitCompilation()); break; case HLoadClass::LoadKind::kDexCachePcRelative: DCHECK(!Runtime::Current()->UseJitCompilation()); @@ -5815,12 +5814,22 @@ void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) { __ LoadLiteral(out, codegen_->DeduplicateBootImageAddressLiteral(address)); break; } - case HLoadClass::LoadKind::kJitTableAddress: { - __ LoadLiteral(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(), - cls->GetTypeIndex(), - cls->GetAddress())); - // /* GcRoot<mirror::Class> */ out = *out - GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption); + case HLoadClass::LoadKind::kDexCacheAddress: { + DCHECK_NE(cls->GetAddress(), 0u); + uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress()); + // 16-bit LDR immediate has a 5-bit offset multiplied by the size and that gives + // a 128B range. To try and reduce the number of literals if we load multiple types, + // simply split the dex cache address to a 128B aligned base loaded from a literal + // and the remaining offset embedded in the load. + static_assert(sizeof(GcRoot<mirror::Class>) == 4u, "Expected GC root to be 4 bytes."); + DCHECK_ALIGNED(cls->GetAddress(), 4u); + constexpr size_t offset_bits = /* encoded bits */ 5 + /* scale */ 2; + uint32_t base_address = address & ~MaxInt<uint32_t>(offset_bits); + uint32_t offset = address & MaxInt<uint32_t>(offset_bits); + __ LoadLiteral(out, codegen_->DeduplicateDexCacheAddressLiteral(base_address)); + // /* GcRoot<mirror::Class> */ out = *(base_address + offset) + GenerateGcRootFieldLoad(cls, out_loc, out, offset, read_barrier_option); + generate_null_check = !cls->IsInDexCache(); break; } case HLoadClass::LoadKind::kDexCachePcRelative: { @@ -7382,15 +7391,6 @@ Literal* CodeGeneratorARM::DeduplicateJitStringLiteral(const DexFile& dex_file, [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); }); } -Literal* CodeGeneratorARM::DeduplicateJitClassLiteral(const DexFile& dex_file, - dex::TypeIndex type_index, - uint64_t address) { - jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index), address); - return jit_class_patches_.GetOrCreate( - TypeReference(&dex_file, type_index), - [this]() { return __ NewLiteral<uint32_t>(/* placeholder */ 0u); }); -} - template <LinkerPatch (*Factory)(size_t, const DexFile*, uint32_t, uint32_t)> inline void CodeGeneratorARM::EmitPcRelativeLinkerPatches( const ArenaDeque<PcRelativePatchInfo>& infos, @@ -7707,28 +7707,18 @@ void InstructionCodeGeneratorARM::VisitClassTableGet(HClassTableGet* instruction } } -static void PatchJitRootUse(uint8_t* code, - const uint8_t* roots_data, - Literal* literal, - uint64_t index_in_table) { - DCHECK(literal->GetLabel()->IsBound()); - uint32_t literal_offset = literal->GetLabel()->Position(); - uintptr_t address = - reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>); - uint8_t* data = code + literal_offset; - reinterpret_cast<uint32_t*>(data)[0] = dchecked_integral_cast<uint32_t>(address); -} - void CodeGeneratorARM::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) { for (const auto& entry : jit_string_patches_) { const auto& it = jit_string_roots_.find(entry.first); DCHECK(it != jit_string_roots_.end()); - PatchJitRootUse(code, roots_data, entry.second, it->second); - } - for (const auto& entry : jit_class_patches_) { - const auto& it = jit_class_roots_.find(entry.first); - DCHECK(it != jit_class_roots_.end()); - PatchJitRootUse(code, roots_data, entry.second, it->second); + size_t index_in_table = it->second; + Literal* literal = entry.second; + DCHECK(literal->GetLabel()->IsBound()); + uint32_t literal_offset = literal->GetLabel()->Position(); + uintptr_t address = + reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>); + uint8_t* data = code + literal_offset; + reinterpret_cast<uint32_t*>(data)[0] = dchecked_integral_cast<uint32_t>(address); } } diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h index b9291728ff..8230512825 100644 --- a/compiler/optimizing/code_generator_arm.h +++ b/compiler/optimizing/code_generator_arm.h @@ -491,9 +491,6 @@ class CodeGeneratorARM : public CodeGenerator { Literal* DeduplicateBootImageAddressLiteral(uint32_t address); Literal* DeduplicateDexCacheAddressLiteral(uint32_t address); Literal* DeduplicateJitStringLiteral(const DexFile& dex_file, dex::StringIndex string_index); - Literal* DeduplicateJitClassLiteral(const DexFile& dex_file, - dex::TypeIndex type_index, - uint64_t address); void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE; @@ -602,9 +599,9 @@ class CodeGeneratorARM : public CodeGenerator { using StringToLiteralMap = ArenaSafeMap<StringReference, Literal*, StringReferenceValueComparator>; - using TypeToLiteralMap = ArenaSafeMap<TypeReference, - Literal*, - TypeReferenceValueComparator>; + using BootTypeToLiteralMap = ArenaSafeMap<TypeReference, + Literal*, + TypeReferenceValueComparator>; Literal* DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map); Literal* DeduplicateMethodLiteral(MethodReference target_method, MethodToLiteralMap* map); @@ -641,7 +638,7 @@ class CodeGeneratorARM : public CodeGenerator { // PC-relative String patch info; type depends on configuration (app .bss or boot image PIC). ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_; // Deduplication map for boot type literals for kBootImageLinkTimeAddress. - TypeToLiteralMap boot_image_type_patches_; + BootTypeToLiteralMap boot_image_type_patches_; // PC-relative type patch info. ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_; // Deduplication map for patchable boot image addresses. @@ -649,8 +646,6 @@ class CodeGeneratorARM : public CodeGenerator { // Patches for string literals in JIT compiled code. StringToLiteralMap jit_string_patches_; - // Patches for class literals in JIT compiled code. - TypeToLiteralMap jit_class_patches_; DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARM); }; diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 89e5d9eb68..6eebd69a04 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -1162,9 +1162,7 @@ CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph, boot_image_address_patches_(std::less<uint32_t>(), graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), jit_string_patches_(StringReferenceValueComparator(), - graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - jit_class_patches_(TypeReferenceValueComparator(), - graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) { + graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) { // Save the link register (containing the return address) to mimic Quick. AddAllocatedRegister(LocationFrom(lr)); } @@ -4184,14 +4182,6 @@ vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitStringLitera [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); }); } -vixl::aarch64::Literal<uint32_t>* CodeGeneratorARM64::DeduplicateJitClassLiteral( - const DexFile& dex_file, dex::TypeIndex type_index, uint64_t address) { - jit_class_roots_.Overwrite(TypeReference(&dex_file, type_index), address); - return jit_class_patches_.GetOrCreate( - TypeReference(&dex_file, type_index), - [this]() { return __ CreateLiteralDestroyedWithPool<uint32_t>(/* placeholder */ 0u); }); -} - void CodeGeneratorARM64::EmitAdrpPlaceholder(vixl::aarch64::Label* fixup_label, vixl::aarch64::Register reg) { DCHECK(reg.IsX()); @@ -4369,7 +4359,7 @@ HLoadClass::LoadKind CodeGeneratorARM64::GetSupportedLoadClassKind( break; case HLoadClass::LoadKind::kBootImageAddress: break; - case HLoadClass::LoadKind::kJitTableAddress: + case HLoadClass::LoadKind::kDexCacheAddress: DCHECK(Runtime::Current()->UseJitCompilation()); break; case HLoadClass::LoadKind::kDexCachePcRelative: @@ -4462,16 +4452,26 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) { __ Ldr(out.W(), codegen_->DeduplicateBootImageAddressLiteral(cls->GetAddress())); break; } - case HLoadClass::LoadKind::kJitTableAddress: { - __ Ldr(out, codegen_->DeduplicateJitClassLiteral(cls->GetDexFile(), - cls->GetTypeIndex(), - cls->GetAddress())); + case HLoadClass::LoadKind::kDexCacheAddress: { + DCHECK_NE(cls->GetAddress(), 0u); + // LDR immediate has a 12-bit offset multiplied by the size and for 32-bit loads + // that gives a 16KiB range. To try and reduce the number of literals if we load + // multiple types, simply split the dex cache address to a 16KiB aligned base + // loaded from a literal and the remaining offset embedded in the load. + static_assert(sizeof(GcRoot<mirror::Class>) == 4u, "Expected GC root to be 4 bytes."); + DCHECK_ALIGNED(cls->GetAddress(), 4u); + constexpr size_t offset_bits = /* encoded bits */ 12 + /* scale */ 2; + uint64_t base_address = cls->GetAddress() & ~MaxInt<uint64_t>(offset_bits); + uint32_t offset = cls->GetAddress() & MaxInt<uint64_t>(offset_bits); + __ Ldr(out.X(), codegen_->DeduplicateDexCacheAddressLiteral(base_address)); + // /* GcRoot<mirror::Class> */ out = *(base_address + offset) GenerateGcRootFieldLoad(cls, out_loc, out.X(), - /* offset */ 0, + offset, /* fixup_label */ nullptr, - kCompilerReadBarrierOption); + read_barrier_option); + generate_null_check = !cls->IsInDexCache(); break; } case HLoadClass::LoadKind::kDexCachePcRelative: { @@ -5782,27 +5782,17 @@ void InstructionCodeGeneratorARM64::VisitClassTableGet(HClassTableGet* instructi } } -static void PatchJitRootUse(uint8_t* code, - const uint8_t* roots_data, - vixl::aarch64::Literal<uint32_t>* literal, - uint64_t index_in_table) { - uint32_t literal_offset = literal->GetOffset(); - uintptr_t address = - reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>); - uint8_t* data = code + literal_offset; - reinterpret_cast<uint32_t*>(data)[0] = dchecked_integral_cast<uint32_t>(address); -} - void CodeGeneratorARM64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) { for (const auto& entry : jit_string_patches_) { const auto& it = jit_string_roots_.find(entry.first); DCHECK(it != jit_string_roots_.end()); - PatchJitRootUse(code, roots_data, entry.second, it->second); - } - for (const auto& entry : jit_class_patches_) { - const auto& it = jit_class_roots_.find(entry.first); - DCHECK(it != jit_class_roots_.end()); - PatchJitRootUse(code, roots_data, entry.second, it->second); + size_t index_in_table = it->second; + vixl::aarch64::Literal<uint32_t>* literal = entry.second; + uint32_t literal_offset = literal->GetOffset(); + uintptr_t address = + reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>); + uint8_t* data = code + literal_offset; + reinterpret_cast<uint32_t*>(data)[0] = dchecked_integral_cast<uint32_t>(address); } } diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h index 50bbc99ca5..868c8b07ed 100644 --- a/compiler/optimizing/code_generator_arm64.h +++ b/compiler/optimizing/code_generator_arm64.h @@ -569,9 +569,6 @@ class CodeGeneratorARM64 : public CodeGenerator { vixl::aarch64::Literal<uint64_t>* DeduplicateDexCacheAddressLiteral(uint64_t address); vixl::aarch64::Literal<uint32_t>* DeduplicateJitStringLiteral(const DexFile& dex_file, dex::StringIndex string_index); - vixl::aarch64::Literal<uint32_t>* DeduplicateJitClassLiteral(const DexFile& dex_file, - dex::TypeIndex string_index, - uint64_t address); void EmitAdrpPlaceholder(vixl::aarch64::Label* fixup_label, vixl::aarch64::Register reg); void EmitAddPlaceholder(vixl::aarch64::Label* fixup_label, @@ -685,9 +682,9 @@ class CodeGeneratorARM64 : public CodeGenerator { using StringToLiteralMap = ArenaSafeMap<StringReference, vixl::aarch64::Literal<uint32_t>*, StringReferenceValueComparator>; - using TypeToLiteralMap = ArenaSafeMap<TypeReference, - vixl::aarch64::Literal<uint32_t>*, - TypeReferenceValueComparator>; + using BootTypeToLiteralMap = ArenaSafeMap<TypeReference, + vixl::aarch64::Literal<uint32_t>*, + TypeReferenceValueComparator>; vixl::aarch64::Literal<uint32_t>* DeduplicateUint32Literal(uint32_t value, Uint32ToLiteralMap* map); @@ -752,7 +749,7 @@ class CodeGeneratorARM64 : public CodeGenerator { // PC-relative String patch info; type depends on configuration (app .bss or boot image PIC). ArenaDeque<PcRelativePatchInfo> pc_relative_string_patches_; // Deduplication map for boot type literals for kBootImageLinkTimeAddress. - TypeToLiteralMap boot_image_type_patches_; + BootTypeToLiteralMap boot_image_type_patches_; // PC-relative type patch info. ArenaDeque<PcRelativePatchInfo> pc_relative_type_patches_; // Deduplication map for patchable boot image addresses. @@ -760,8 +757,6 @@ class CodeGeneratorARM64 : public CodeGenerator { // Patches for string literals in JIT compiled code. StringToLiteralMap jit_string_patches_; - // Patches for class literals in JIT compiled code. - TypeToLiteralMap jit_class_patches_; DISALLOW_COPY_AND_ASSIGN(CodeGeneratorARM64); }; diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index 2c6df38daa..3a3d2a9db1 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -5776,7 +5776,7 @@ HLoadClass::LoadKind CodeGeneratorARMVIXL::GetSupportedLoadClassKind( case HLoadClass::LoadKind::kBootImageAddress: // TODO(VIXL): Enable it back when literal pools are fixed in VIXL. return HLoadClass::LoadKind::kDexCacheViaMethod; - case HLoadClass::LoadKind::kJitTableAddress: + case HLoadClass::LoadKind::kDexCacheAddress: // TODO(VIXL): Enable it back when literal pools are fixed in VIXL. return HLoadClass::LoadKind::kDexCacheViaMethod; case HLoadClass::LoadKind::kDexCachePcRelative: @@ -5868,7 +5868,7 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) { TODO_VIXL32(FATAL); break; } - case HLoadClass::LoadKind::kJitTableAddress: { + case HLoadClass::LoadKind::kDexCacheAddress: { TODO_VIXL32(FATAL); break; } diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index cae4161daf..ff48f6642d 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -5251,9 +5251,9 @@ HLoadClass::LoadKind CodeGeneratorMIPS::GetSupportedLoadClassKind( break; case HLoadClass::LoadKind::kBootImageAddress: break; - case HLoadClass::LoadKind::kJitTableAddress: + case HLoadClass::LoadKind::kDexCacheAddress: DCHECK(Runtime::Current()->UseJitCompilation()); - fallback_load = true; + fallback_load = false; break; case HLoadClass::LoadKind::kDexCachePcRelative: DCHECK(!Runtime::Current()->UseJitCompilation()); @@ -5614,8 +5614,17 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) { codegen_->DeduplicateBootImageAddressLiteral(address)); break; } - case HLoadClass::LoadKind::kJitTableAddress: { - LOG(FATAL) << "Unimplemented"; + case HLoadClass::LoadKind::kDexCacheAddress: { + DCHECK_NE(cls->GetAddress(), 0u); + uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress()); + static_assert(sizeof(GcRoot<mirror::Class>) == 4u, "Expected GC root to be 4 bytes."); + DCHECK_ALIGNED(cls->GetAddress(), 4u); + int16_t offset = Low16Bits(address); + uint32_t base_address = address - offset; // This accounts for offset sign extension. + __ Lui(out, High16Bits(base_address)); + // /* GcRoot<mirror::Class> */ out = *(base_address + offset) + GenerateGcRootFieldLoad(cls, out_loc, out, offset); + generate_null_check = !cls->IsInDexCache(); break; } case HLoadClass::LoadKind::kDexCachePcRelative: { diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 8612a67c8b..d6e92ccb81 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -1013,7 +1013,6 @@ CodeGeneratorX86::CodeGeneratorX86(HGraph* graph, string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), constant_area_start_(-1), fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), method_address_offset_(-1) { @@ -6035,7 +6034,7 @@ HLoadClass::LoadKind CodeGeneratorX86::GetSupportedLoadClassKind( break; case HLoadClass::LoadKind::kBootImageAddress: break; - case HLoadClass::LoadKind::kJitTableAddress: + case HLoadClass::LoadKind::kDexCacheAddress: DCHECK(Runtime::Current()->UseJitCompilation()); break; case HLoadClass::LoadKind::kDexCacheViaMethod: @@ -6074,16 +6073,6 @@ void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) { locations->SetOut(Location::RequiresRegister()); } -Label* CodeGeneratorX86::NewJitRootClassPatch(const DexFile& dex_file, - dex::TypeIndex dex_index, - uint64_t address) { - jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index), address); - // Add a patch entry and return the label. - jit_class_patches_.emplace_back(dex_file, dex_index.index_); - PatchInfo<Label>* info = &jit_class_patches_.back(); - return &info->label; -} - void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) { LocationSummary* locations = cls->GetLocations(); if (cls->NeedsAccessCheck()) { @@ -6135,12 +6124,16 @@ void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) { codegen_->RecordSimplePatch(); break; } - case HLoadClass::LoadKind::kJitTableAddress: { - Address address = Address::Absolute(CodeGeneratorX86::kDummy32BitOffset); - Label* fixup_label = codegen_->NewJitRootClassPatch( - cls->GetDexFile(), cls->GetTypeIndex(), cls->GetAddress()); + case HLoadClass::LoadKind::kDexCacheAddress: { + DCHECK_NE(cls->GetAddress(), 0u); + uint32_t address = dchecked_integral_cast<uint32_t>(cls->GetAddress()); // /* GcRoot<mirror::Class> */ out = *address - GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, kCompilerReadBarrierOption); + GenerateGcRootFieldLoad(cls, + out_loc, + Address::Absolute(address), + /* fixup_label */ nullptr, + read_barrier_option); + generate_null_check = !cls->IsInDexCache(); break; } case HLoadClass::LoadKind::kDexCachePcRelative: { @@ -7777,31 +7770,18 @@ void CodeGeneratorX86::MoveFromReturnRegister(Location target, Primitive::Type t } } -void CodeGeneratorX86::PatchJitRootUse(uint8_t* code, - const uint8_t* roots_data, - const PatchInfo<Label>& info, - uint64_t index_in_table) const { - uint32_t code_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment; - uintptr_t address = - reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>); - typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t; - reinterpret_cast<unaligned_uint32_t*>(code + code_offset)[0] = - dchecked_integral_cast<uint32_t>(address); -} - void CodeGeneratorX86::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) { for (const PatchInfo<Label>& info : jit_string_patches_) { - const auto& it = jit_string_roots_.find( - StringReference(&info.dex_file, dex::StringIndex(info.index))); + const auto& it = jit_string_roots_.find(StringReference(&info.dex_file, + dex::StringIndex(info.index))); DCHECK(it != jit_string_roots_.end()); - PatchJitRootUse(code, roots_data, info, it->second); - } - - for (const PatchInfo<Label>& info : jit_class_patches_) { - const auto& it = jit_class_roots_.find( - TypeReference(&info.dex_file, dex::TypeIndex(info.index))); - DCHECK(it != jit_class_roots_.end()); - PatchJitRootUse(code, roots_data, info, it->second); + size_t index_in_table = it->second; + uint32_t code_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment; + uintptr_t address = + reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>); + typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t; + reinterpret_cast<unaligned_uint32_t*>(code + code_offset)[0] = + dchecked_integral_cast<uint32_t>(address); } } diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h index c44da97a90..2ae3670bed 100644 --- a/compiler/optimizing/code_generator_x86.h +++ b/compiler/optimizing/code_generator_x86.h @@ -416,17 +416,12 @@ class CodeGeneratorX86 : public CodeGenerator { Label* NewStringBssEntryPatch(HLoadString* load_string); Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset); Label* NewJitRootStringPatch(const DexFile& dex_file, dex::StringIndex dex_index); - Label* NewJitRootClassPatch(const DexFile& dex_file, dex::TypeIndex dex_index, uint64_t address); void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE; // Emit linker patches. void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE; - void PatchJitRootUse(uint8_t* code, - const uint8_t* roots_data, - const PatchInfo<Label>& info, - uint64_t index_in_table) const; void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE; // Emit a write barrier. @@ -628,9 +623,6 @@ class CodeGeneratorX86 : public CodeGenerator { // Patches for string root accesses in JIT compiled code. ArenaDeque<PatchInfo<Label>> jit_string_patches_; - // Patches for class root accesses in JIT compiled code. - ArenaDeque<PatchInfo<Label>> jit_class_patches_; - // Offset to the start of the constant area in the assembled code. // Used for fixups to the constant area. int32_t constant_area_start_; diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 7dfc736d9c..4474decf59 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -1260,8 +1260,7 @@ CodeGeneratorX86_64::CodeGeneratorX86_64(HGraph* graph, string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), type_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), fixups_to_jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), - jit_class_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) { + jit_string_patches_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)) { AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister)); } @@ -5461,7 +5460,8 @@ HLoadClass::LoadKind CodeGeneratorX86_64::GetSupportedLoadClassKind( break; case HLoadClass::LoadKind::kBootImageAddress: break; - case HLoadClass::LoadKind::kJitTableAddress: + case HLoadClass::LoadKind::kDexCacheAddress: + DCHECK(Runtime::Current()->UseJitCompilation()); break; case HLoadClass::LoadKind::kDexCachePcRelative: DCHECK(!Runtime::Current()->UseJitCompilation()); @@ -5500,16 +5500,6 @@ void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) { locations->SetOut(Location::RequiresRegister()); } -Label* CodeGeneratorX86_64::NewJitRootClassPatch(const DexFile& dex_file, - dex::TypeIndex dex_index, - uint64_t address) { - jit_class_roots_.Overwrite(TypeReference(&dex_file, dex_index), address); - // Add a patch entry and return the label. - jit_class_patches_.emplace_back(dex_file, dex_index.index_); - PatchInfo<Label>* info = &jit_class_patches_.back(); - return &info->label; -} - void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) { LocationSummary* locations = cls->GetLocations(); if (cls->NeedsAccessCheck()) { @@ -5553,13 +5543,26 @@ void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) { codegen_->RecordSimplePatch(); break; } - case HLoadClass::LoadKind::kJitTableAddress: { - Address address = Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, - /* no_rip */ true); - Label* fixup_label = - codegen_->NewJitRootClassPatch(cls->GetDexFile(), cls->GetTypeIndex(), cls->GetAddress()); + case HLoadClass::LoadKind::kDexCacheAddress: { + DCHECK_NE(cls->GetAddress(), 0u); // /* GcRoot<mirror::Class> */ out = *address - GenerateGcRootFieldLoad(cls, out_loc, address, fixup_label, kCompilerReadBarrierOption); + if (IsUint<32>(cls->GetAddress())) { + Address address = Address::Absolute(cls->GetAddress(), /* no_rip */ true); + GenerateGcRootFieldLoad(cls, + out_loc, + address, + /* fixup_label */ nullptr, + read_barrier_option); + } else { + // TODO: Consider using opcode A1, i.e. movl eax, moff32 (with 64-bit address). + __ movq(out, Immediate(cls->GetAddress())); + GenerateGcRootFieldLoad(cls, + out_loc, + Address(out, 0), + /* fixup_label */ nullptr, + read_barrier_option); + } + generate_null_check = !cls->IsInDexCache(); break; } case HLoadClass::LoadKind::kDexCachePcRelative: { @@ -7124,31 +7127,18 @@ void CodeGeneratorX86_64::MoveInt64ToAddress(const Address& addr_low, } } -void CodeGeneratorX86_64::PatchJitRootUse(uint8_t* code, - const uint8_t* roots_data, - const PatchInfo<Label>& info, - uint64_t index_in_table) const { - uint32_t code_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment; - uintptr_t address = - reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>); - typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t; - reinterpret_cast<unaligned_uint32_t*>(code + code_offset)[0] = - dchecked_integral_cast<uint32_t>(address); -} - void CodeGeneratorX86_64::EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) { for (const PatchInfo<Label>& info : jit_string_patches_) { - const auto& it = jit_string_roots_.find( - StringReference(&info.dex_file, dex::StringIndex(info.index))); + const auto& it = jit_string_roots_.find(StringReference(&info.dex_file, + dex::StringIndex(info.index))); DCHECK(it != jit_string_roots_.end()); - PatchJitRootUse(code, roots_data, info, it->second); - } - - for (const PatchInfo<Label>& info : jit_class_patches_) { - const auto& it = jit_class_roots_.find( - TypeReference(&info.dex_file, dex::TypeIndex(info.index))); - DCHECK(it != jit_class_roots_.end()); - PatchJitRootUse(code, roots_data, info, it->second); + size_t index_in_table = it->second; + uint32_t code_offset = info.label.Position() - kLabelPositionToLiteralOffsetAdjustment; + uintptr_t address = + reinterpret_cast<uintptr_t>(roots_data) + index_in_table * sizeof(GcRoot<mirror::Object>); + typedef __attribute__((__aligned__(1))) uint32_t unaligned_uint32_t; + reinterpret_cast<unaligned_uint32_t*>(code + code_offset)[0] = + dchecked_integral_cast<uint32_t>(address); } } diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h index 391a23b7ce..2f41f73da6 100644 --- a/compiler/optimizing/code_generator_x86_64.h +++ b/compiler/optimizing/code_generator_x86_64.h @@ -413,17 +413,11 @@ class CodeGeneratorX86_64 : public CodeGenerator { Label* NewStringBssEntryPatch(HLoadString* load_string); Label* NewPcRelativeDexCacheArrayPatch(const DexFile& dex_file, uint32_t element_offset); Label* NewJitRootStringPatch(const DexFile& dex_file, dex::StringIndex dex_index); - Label* NewJitRootClassPatch(const DexFile& dex_file, dex::TypeIndex dex_index, uint64_t address); void MoveFromReturnRegister(Location trg, Primitive::Type type) OVERRIDE; void EmitLinkerPatches(ArenaVector<LinkerPatch>* linker_patches) OVERRIDE; - void PatchJitRootUse(uint8_t* code, - const uint8_t* roots_data, - const PatchInfo<Label>& info, - uint64_t index_in_table) const; - void EmitJitRootPatches(uint8_t* code, const uint8_t* roots_data) OVERRIDE; const X86_64InstructionSetFeatures& GetInstructionSetFeatures() const { @@ -614,9 +608,6 @@ class CodeGeneratorX86_64 : public CodeGenerator { // Patches for string literals in JIT compiled code. ArenaDeque<PatchInfo<Label>> jit_string_patches_; - // Patches for class literals in JIT compiled code. - ArenaDeque<PatchInfo<Label>> jit_class_patches_; - DISALLOW_COPY_AND_ASSIGN(CodeGeneratorX86_64); }; diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index fe4662abb1..8d93867230 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -1444,7 +1444,7 @@ size_t HInliner::RunOptimizations(HGraph* callee_graph, // optimization that could lead to a HDeoptimize. The following optimizations do not. HDeadCodeElimination dce(callee_graph, stats_, "dead_code_elimination$inliner"); HConstantFolding fold(callee_graph, "constant_folding$inliner"); - HSharpening sharpening(callee_graph, codegen_, dex_compilation_unit, compiler_driver_, handles_); + HSharpening sharpening(callee_graph, codegen_, dex_compilation_unit, compiler_driver_); InstructionSimplifier simplify(callee_graph, stats_); IntrinsicsRecognizer intrinsics(callee_graph, stats_); diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index 925d4f1fd1..594255c625 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -2487,8 +2487,8 @@ std::ostream& operator<<(std::ostream& os, HLoadClass::LoadKind rhs) { return os << "BootImageLinkTimePcRelative"; case HLoadClass::LoadKind::kBootImageAddress: return os << "BootImageAddress"; - case HLoadClass::LoadKind::kJitTableAddress: - return os << "JitTableAddress"; + case HLoadClass::LoadKind::kDexCacheAddress: + return os << "DexCacheAddress"; case HLoadClass::LoadKind::kDexCachePcRelative: return os << "DexCachePcRelative"; case HLoadClass::LoadKind::kDexCacheViaMethod: diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 659cddae05..e3f4d8f035 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -5493,8 +5493,9 @@ class HLoadClass FINAL : public HInstruction { // GetIncludePatchInformation(). kBootImageAddress, - // Load from the root table associated with the JIT compiled method. - kJitTableAddress, + // Load from the resolved types array at an absolute address. + // Used for classes outside the boot image referenced by JIT-compiled code. + kDexCacheAddress, // Load from resolved types array in the dex cache using a PC-relative load. // Used for classes outside boot image when we know that we can access @@ -5587,6 +5588,7 @@ class HLoadClass FINAL : public HInstruction { NeedsAccessCheck(); } + bool CanThrow() const OVERRIDE { return CanCallRuntime(); } @@ -5611,9 +5613,7 @@ class HLoadClass FINAL : public HInstruction { return load_data_.address; } - bool NeedsDexCacheOfDeclaringClass() const OVERRIDE { - return !IsReferrersClass(); - } + bool NeedsDexCacheOfDeclaringClass() const OVERRIDE { return !IsReferrersClass(); } static SideEffects SideEffectsForArchRuntimeCalls() { return SideEffects::CanTriggerGC(); @@ -5672,8 +5672,7 @@ class HLoadClass FINAL : public HInstruction { } static bool HasAddress(LoadKind load_kind) { - return load_kind == LoadKind::kBootImageAddress || - load_kind == LoadKind::kJitTableAddress; + return load_kind == LoadKind::kBootImageAddress || load_kind == LoadKind::kDexCacheAddress; } static bool HasDexCacheReference(LoadKind load_kind) { @@ -5692,7 +5691,7 @@ class HLoadClass FINAL : public HInstruction { union { uint32_t dex_cache_element_index; // Only for dex cache reference. - uint64_t address; // Up to 64-bit, needed for kJitTableAddress on 64-bit targets. + uint64_t address; // Up to 64-bit, needed for kDexCacheAddress on 64-bit targets. } load_data_; ReferenceTypeInfo loaded_class_rti_; diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index 64c87dc13a..8ea2b06530 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -375,8 +375,7 @@ class OptimizingCompiler FINAL : public Compiler { const DexFile& dex_file, Handle<mirror::DexCache> dex_cache, ArtMethod* method, - bool osr, - VariableSizedHandleScope* handles) const; + bool osr) const; void MaybeRunInliner(HGraph* graph, CodeGenerator* codegen, @@ -496,7 +495,7 @@ static HOptimization* BuildOptimization( number_of_dex_registers, /* depth */ 0); } else if (opt_name == HSharpening::kSharpeningPassName) { - return new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver, handles); + return new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver); } else if (opt_name == HSelectGenerator::kSelectGeneratorPassName) { return new (arena) HSelectGenerator(graph, stats); } else if (opt_name == HInductionVarAnalysis::kInductionPassName) { @@ -768,8 +767,7 @@ void OptimizingCompiler::RunOptimizations(HGraph* graph, HInductionVarAnalysis* induction = new (arena) HInductionVarAnalysis(graph); BoundsCheckElimination* bce = new (arena) BoundsCheckElimination(graph, *side_effects, induction); HLoopOptimization* loop = new (arena) HLoopOptimization(graph, induction); - HSharpening* sharpening = new (arena) HSharpening( - graph, codegen, dex_compilation_unit, driver, handles); + HSharpening* sharpening = new (arena) HSharpening(graph, codegen, dex_compilation_unit, driver); InstructionSimplifier* simplify2 = new (arena) InstructionSimplifier( graph, stats, "instruction_simplifier$after_inlining"); InstructionSimplifier* simplify3 = new (arena) InstructionSimplifier( @@ -868,8 +866,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena, const DexFile& dex_file, Handle<mirror::DexCache> dex_cache, ArtMethod* method, - bool osr, - VariableSizedHandleScope* handles) const { + bool osr) const { MaybeRecordStat(MethodCompilationStat::kAttemptCompilation); CompilerDriver* compiler_driver = GetCompilerDriver(); InstructionSet instruction_set = compiler_driver->GetInstructionSet(); @@ -979,55 +976,63 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena, compiler_driver, dump_mutex_); + VLOG(compiler) << "Building " << pass_observer.GetMethodName(); + { - VLOG(compiler) << "Building " << pass_observer.GetMethodName(); - PassScope scope(HGraphBuilder::kBuilderPassName, &pass_observer); - HGraphBuilder builder(graph, - &dex_compilation_unit, - &dex_compilation_unit, - &dex_file, - *code_item, - compiler_driver, - compilation_stats_.get(), - interpreter_metadata, - dex_cache, - handles); - GraphAnalysisResult result = builder.BuildGraph(); - if (result != kAnalysisSuccess) { - switch (result) { - case kAnalysisSkipped: - MaybeRecordStat(MethodCompilationStat::kNotCompiledSkipped); - break; - case kAnalysisInvalidBytecode: - MaybeRecordStat(MethodCompilationStat::kNotCompiledInvalidBytecode); - break; - case kAnalysisFailThrowCatchLoop: - MaybeRecordStat(MethodCompilationStat::kNotCompiledThrowCatchLoop); - break; - case kAnalysisFailAmbiguousArrayOp: - MaybeRecordStat(MethodCompilationStat::kNotCompiledAmbiguousArrayOp); - break; - case kAnalysisSuccess: - UNREACHABLE(); + ScopedObjectAccess soa(Thread::Current()); + VariableSizedHandleScope handles(soa.Self()); + // Do not hold `mutator_lock_` between optimizations. + ScopedThreadSuspension sts(soa.Self(), kNative); + + { + PassScope scope(HGraphBuilder::kBuilderPassName, &pass_observer); + HGraphBuilder builder(graph, + &dex_compilation_unit, + &dex_compilation_unit, + &dex_file, + *code_item, + compiler_driver, + compilation_stats_.get(), + interpreter_metadata, + dex_cache, + &handles); + GraphAnalysisResult result = builder.BuildGraph(); + if (result != kAnalysisSuccess) { + switch (result) { + case kAnalysisSkipped: + MaybeRecordStat(MethodCompilationStat::kNotCompiledSkipped); + break; + case kAnalysisInvalidBytecode: + MaybeRecordStat(MethodCompilationStat::kNotCompiledInvalidBytecode); + break; + case kAnalysisFailThrowCatchLoop: + MaybeRecordStat(MethodCompilationStat::kNotCompiledThrowCatchLoop); + break; + case kAnalysisFailAmbiguousArrayOp: + MaybeRecordStat(MethodCompilationStat::kNotCompiledAmbiguousArrayOp); + break; + case kAnalysisSuccess: + UNREACHABLE(); + } + pass_observer.SetGraphInBadState(); + return nullptr; } - pass_observer.SetGraphInBadState(); - return nullptr; } - } - RunOptimizations(graph, - codegen.get(), - compiler_driver, - dex_compilation_unit, - &pass_observer, - handles); + RunOptimizations(graph, + codegen.get(), + compiler_driver, + dex_compilation_unit, + &pass_observer, + &handles); - RegisterAllocator::Strategy regalloc_strategy = - compiler_options.GetRegisterAllocationStrategy(); - AllocateRegisters(graph, codegen.get(), &pass_observer, regalloc_strategy); + RegisterAllocator::Strategy regalloc_strategy = + compiler_options.GetRegisterAllocationStrategy(); + AllocateRegisters(graph, codegen.get(), &pass_observer, regalloc_strategy); - codegen->Compile(code_allocator); - pass_observer.DumpDisassembly(); + codegen->Compile(code_allocator); + pass_observer.DumpDisassembly(); + } return codegen.release(); } @@ -1050,27 +1055,19 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item, verified_method->GetEncounteredVerificationFailures())) { ArenaAllocator arena(Runtime::Current()->GetArenaPool()); CodeVectorAllocator code_allocator(&arena); - std::unique_ptr<CodeGenerator> codegen; - { - ScopedObjectAccess soa(Thread::Current()); - VariableSizedHandleScope handles(soa.Self()); - // Go to native so that we don't block GC during compilation. - ScopedThreadSuspension sts(soa.Self(), kNative); - codegen.reset( - TryCompile(&arena, - &code_allocator, - code_item, - access_flags, - invoke_type, - class_def_idx, - method_idx, - jclass_loader, - dex_file, - dex_cache, - nullptr, - /* osr */ false, - &handles)); - } + std::unique_ptr<CodeGenerator> codegen( + TryCompile(&arena, + &code_allocator, + code_item, + access_flags, + invoke_type, + class_def_idx, + method_idx, + jclass_loader, + dex_file, + dex_cache, + nullptr, + /* osr */ false)); if (codegen.get() != nullptr) { MaybeRecordStat(MethodCompilationStat::kCompiled); method = Emit(&arena, &code_allocator, codegen.get(), compiler_driver, code_item); @@ -1141,8 +1138,6 @@ bool OptimizingCompiler::JitCompile(Thread* self, ArenaAllocator arena(Runtime::Current()->GetJitArenaPool()); CodeVectorAllocator code_allocator(&arena); - VariableSizedHandleScope handles(self); - std::unique_ptr<CodeGenerator> codegen; { // Go to native so that we don't block GC during compilation. @@ -1159,8 +1154,7 @@ bool OptimizingCompiler::JitCompile(Thread* self, *dex_file, dex_cache, method, - osr, - &handles)); + osr)); if (codegen.get() == nullptr) { return false; } diff --git a/compiler/optimizing/sharpening.cc b/compiler/optimizing/sharpening.cc index 93123a26db..daf160a483 100644 --- a/compiler/optimizing/sharpening.cc +++ b/compiler/optimizing/sharpening.cc @@ -190,12 +190,15 @@ void HSharpening::ProcessLoadClass(HLoadClass* load_class) { // TODO: Use direct pointers for all non-moving spaces, not just boot image. Bug: 29530787 desired_load_kind = HLoadClass::LoadKind::kBootImageAddress; address = reinterpret_cast64<uint64_t>(klass); - } else if (is_in_dex_cache) { - desired_load_kind = HLoadClass::LoadKind::kJitTableAddress; - // We store in the address field the location of the stack reference maintained - // by the handle. We do this now so that the code generation does not need to figure - // out which class loader to use. - address = reinterpret_cast<uint64_t>(handles_->NewHandle(klass).GetReference()); + } else { + // Note: If the class is not in the dex cache or isn't initialized, the + // instruction needs environment and will not be inlined across dex files. + // Within a dex file, the slow-path helper loads the correct class and + // inlined frames are used correctly for OOM stack trace. + // TODO: Write a test for this. Bug: 29416588 + desired_load_kind = HLoadClass::LoadKind::kDexCacheAddress; + void* dex_cache_element_address = &dex_cache->GetResolvedTypes()[type_index.index_]; + address = reinterpret_cast64<uint64_t>(dex_cache_element_address); } // AOT app compilation. Check if the class is in the boot image. } else if (is_in_boot_image && !codegen_->GetCompilerOptions().GetCompilePic()) { @@ -242,7 +245,7 @@ void HSharpening::ProcessLoadClass(HLoadClass* load_class) { load_class->SetLoadKindWithTypeReference(load_kind, dex_file, type_index); break; case HLoadClass::LoadKind::kBootImageAddress: - case HLoadClass::LoadKind::kJitTableAddress: + case HLoadClass::LoadKind::kDexCacheAddress: DCHECK_NE(address, 0u); load_class->SetLoadKindWithAddress(load_kind, address); break; diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h index 74189549fd..d35ae66e05 100644 --- a/compiler/optimizing/sharpening.h +++ b/compiler/optimizing/sharpening.h @@ -35,13 +35,11 @@ class HSharpening : public HOptimization { HSharpening(HGraph* graph, CodeGenerator* codegen, const DexCompilationUnit& compilation_unit, - CompilerDriver* compiler_driver, - VariableSizedHandleScope* handles) + CompilerDriver* compiler_driver) : HOptimization(graph, kSharpeningPassName), codegen_(codegen), compilation_unit_(compilation_unit), - compiler_driver_(compiler_driver), - handles_(handles) { } + compiler_driver_(compiler_driver) { } void Run() OVERRIDE; @@ -55,7 +53,6 @@ class HSharpening : public HOptimization { CodeGenerator* codegen_; const DexCompilationUnit& compilation_unit_; CompilerDriver* compiler_driver_; - VariableSizedHandleScope* handles_; }; } // namespace art diff --git a/runtime/jit/jit_code_cache.cc b/runtime/jit/jit_code_cache.cc index 1b0ad8341b..93f50ad2b1 100644 --- a/runtime/jit/jit_code_cache.cc +++ b/runtime/jit/jit_code_cache.cc @@ -297,11 +297,10 @@ static void FillRootTable(uint8_t* roots_data, Handle<mirror::ObjectArray<mirror ObjPtr<mirror::Object> object = roots->Get(i); if (kIsDebugBuild) { // Ensure the string is strongly interned. b/32995596 - if (object->IsString()) { - ObjPtr<mirror::String> str = reinterpret_cast<mirror::String*>(object.Ptr()); - ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); - CHECK(class_linker->GetInternTable()->LookupStrong(Thread::Current(), str) != nullptr); - } + CHECK(object->IsString()); + ObjPtr<mirror::String> str = reinterpret_cast<mirror::String*>(object.Ptr()); + ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + CHECK(class_linker->GetInternTable()->LookupStrong(Thread::Current(), str) != nullptr); } gc_roots[i] = GcRoot<mirror::Object>(object); } @@ -317,31 +316,6 @@ static uint8_t* GetRootTable(const void* code_ptr, uint32_t* number_of_roots = n return data - ComputeRootTableSize(roots); } -// Helper for the GC to process a weak class in a JIT root table. -static inline void ProcessWeakClass(GcRoot<mirror::Class>* root_ptr, IsMarkedVisitor* visitor) - REQUIRES_SHARED(Locks::mutator_lock_) { - // This does not need a read barrier because this is called by GC. - mirror::Class* cls = root_ptr->Read<kWithoutReadBarrier>(); - if (cls != nullptr) { - DCHECK((cls->IsClass<kDefaultVerifyFlags, kWithoutReadBarrier>())); - // Look at the classloader of the class to know if it has been unloaded. - // This does not need a read barrier because this is called by GC. - mirror::Object* class_loader = - cls->GetClassLoader<kDefaultVerifyFlags, kWithoutReadBarrier>(); - if (class_loader == nullptr || visitor->IsMarked(class_loader) != nullptr) { - // The class loader is live, update the entry if the class has moved. - mirror::Class* new_cls = down_cast<mirror::Class*>(visitor->IsMarked(cls)); - // Note that new_object can be null for CMS and newly allocated objects. - if (new_cls != nullptr && new_cls != cls) { - *root_ptr = GcRoot<mirror::Class>(new_cls); - } - } else { - // The class loader is not live, clear the entry. - *root_ptr = GcRoot<mirror::Class>(nullptr); - } - } -} - void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) { MutexLock mu(Thread::Current(), lock_); for (const auto& entry : method_code_map_) { @@ -351,22 +325,17 @@ void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) { for (uint32_t i = 0; i < number_of_roots; ++i) { // This does not need a read barrier because this is called by GC. mirror::Object* object = roots[i].Read<kWithoutReadBarrier>(); - if (object == nullptr) { - // entry got deleted in a previous sweep. - } else if (object->IsString<kDefaultVerifyFlags, kWithoutReadBarrier>()) { - mirror::Object* new_object = visitor->IsMarked(object); - // We know the string is marked because it's a strongly-interned string that - // is always alive. The IsMarked implementation of the CMS collector returns - // null for newly allocated objects, but we know those haven't moved. Therefore, - // only update the entry if we get a different non-null string. - // TODO: Do not use IsMarked for j.l.Class, and adjust once we move this method - // out of the weak access/creation pause. b/32167580 - if (new_object != nullptr && new_object != object) { - DCHECK(new_object->IsString()); - roots[i] = GcRoot<mirror::Object>(new_object); - } - } else { - ProcessWeakClass(reinterpret_cast<GcRoot<mirror::Class>*>(&roots[i]), visitor); + DCHECK(object != nullptr); + mirror::Object* new_object = visitor->IsMarked(object); + // We know the string is marked because it's a strongly-interned string that + // is always alive. The IsMarked implementation of the CMS collector returns + // null for newly allocated objects, but we know those haven't moved. Therefore, + // only update the entry if we get a different non-null string. + // TODO: Do not use IsMarked for j.l.Class, and adjust once we move this method + // out of the weak access/creation pause. b/32167580 + if (new_object != nullptr && new_object != object) { + DCHECK(new_object->IsString()); + roots[i] = GcRoot<mirror::Object>(new_object); } } } @@ -375,7 +344,26 @@ void JitCodeCache::SweepRootTables(IsMarkedVisitor* visitor) { for (size_t i = 0; i < info->number_of_inline_caches_; ++i) { InlineCache* cache = &info->cache_[i]; for (size_t j = 0; j < InlineCache::kIndividualCacheSize; ++j) { - ProcessWeakClass(&cache->classes_[j], visitor); + // This does not need a read barrier because this is called by GC. + mirror::Class* cls = cache->classes_[j].Read<kWithoutReadBarrier>(); + if (cls != nullptr) { + // Look at the classloader of the class to know if it has been + // unloaded. + // This does not need a read barrier because this is called by GC. + mirror::Object* class_loader = + cls->GetClassLoader<kDefaultVerifyFlags, kWithoutReadBarrier>(); + if (class_loader == nullptr || visitor->IsMarked(class_loader) != nullptr) { + // The class loader is live, update the entry if the class has moved. + mirror::Class* new_cls = down_cast<mirror::Class*>(visitor->IsMarked(cls)); + // Note that new_object can be null for CMS and newly allocated objects. + if (new_cls != nullptr && new_cls != cls) { + cache->classes_[j] = GcRoot<mirror::Class>(new_cls); + } + } else { + // The class loader is not live, clear the entry. + cache->classes_[j] = GcRoot<mirror::Class>(nullptr); + } + } } } } |