diff options
Diffstat (limited to 'compiler')
| -rw-r--r-- | compiler/driver/compiler_driver.cc | 30 | ||||
| -rw-r--r-- | compiler/driver/compiler_driver.h | 4 | ||||
| -rw-r--r-- | compiler/driver/compiler_options.h | 19 | ||||
| -rw-r--r-- | compiler/optimizing/inliner.cc | 25 | ||||
| -rw-r--r-- | compiler/optimizing/inliner.h | 3 | ||||
| -rw-r--r-- | compiler/optimizing/stack_map_stream.h | 45 | ||||
| -rw-r--r-- | compiler/optimizing/stack_map_test.cc | 71 |
7 files changed, 150 insertions, 47 deletions
diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index ff4e0d850a..34963a9675 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -491,11 +491,12 @@ void CompilerDriver::CompileAll(jobject class_loader, } } -static DexToDexCompilationLevel GetDexToDexCompilationlevel( +DexToDexCompilationLevel CompilerDriver::GetDexToDexCompilationlevel( Thread* self, Handle<mirror::ClassLoader> class_loader, const DexFile& dex_file, - const DexFile::ClassDef& class_def) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + const DexFile::ClassDef& class_def) { auto* const runtime = Runtime::Current(); - if (runtime->UseJit()) { + if (runtime->UseJit() || GetCompilerOptions().VerifyAtRuntime()) { + // Verify at runtime shouldn't dex to dex since we didn't resolve of verify. return kDontDexToDexCompile; } const char* descriptor = dex_file.GetClassDescriptor(class_def); @@ -605,12 +606,22 @@ void CompilerDriver::PreCompile(jobject class_loader, const std::vector<const De LoadImageClasses(timings); VLOG(compiler) << "LoadImageClasses: " << GetMemoryUsageString(false); - Resolve(class_loader, dex_files, thread_pool, timings); - VLOG(compiler) << "Resolve: " << GetMemoryUsageString(false); + const bool verification_enabled = compiler_options_->IsVerificationEnabled(); + const bool never_verify = compiler_options_->NeverVerify(); - if (!compiler_options_->IsVerificationEnabled()) { + // We need to resolve for never_verify since it needs to run dex to dex to add the + // RETURN_VOID_NO_BARRIER. + if (never_verify || verification_enabled) { + Resolve(class_loader, dex_files, thread_pool, timings); + VLOG(compiler) << "Resolve: " << GetMemoryUsageString(false); + } + + if (never_verify) { VLOG(compiler) << "Verify none mode specified, skipping verification."; SetVerified(class_loader, dex_files, thread_pool, timings); + } + + if (!verification_enabled) { return; } @@ -2090,6 +2101,8 @@ void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, siz return; } + CompilerDriver* const driver = manager->GetCompiler(); + // Can we run DEX-to-DEX compiler on this class ? DexToDexCompilationLevel dex_to_dex_compilation_level = kDontDexToDexCompile; { @@ -2097,8 +2110,8 @@ void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, siz StackHandleScope<1> hs(soa.Self()); Handle<mirror::ClassLoader> class_loader( hs.NewHandle(soa.Decode<mirror::ClassLoader*>(jclass_loader))); - dex_to_dex_compilation_level = GetDexToDexCompilationlevel(soa.Self(), class_loader, dex_file, - class_def); + dex_to_dex_compilation_level = driver->GetDexToDexCompilationlevel( + soa.Self(), class_loader, dex_file, class_def); } ClassDataItemIterator it(dex_file, class_data); // Skip fields @@ -2108,7 +2121,6 @@ void CompilerDriver::CompileClass(const ParallelCompilationManager* manager, siz while (it.HasNextInstanceField()) { it.Next(); } - CompilerDriver* driver = manager->GetCompiler(); bool compilation_enabled = driver->IsClassToCompile( dex_file.StringByTypeIdx(class_def.class_idx_)); diff --git a/compiler/driver/compiler_driver.h b/compiler/driver/compiler_driver.h index 28a82457cc..9463c2c9bd 100644 --- a/compiler/driver/compiler_driver.h +++ b/compiler/driver/compiler_driver.h @@ -468,6 +468,10 @@ class CompilerDriver { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: + DexToDexCompilationLevel GetDexToDexCompilationlevel( + Thread* self, Handle<mirror::ClassLoader> class_loader, const DexFile& dex_file, + const DexFile::ClassDef& class_def) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + void PreCompile(jobject class_loader, const std::vector<const DexFile*>& dex_files, ThreadPool* thread_pool, TimingLogger* timings) LOCKS_EXCLUDED(Locks::mutator_lock_); diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h index 5042c7594c..d06ec278ab 100644 --- a/compiler/driver/compiler_options.h +++ b/compiler/driver/compiler_options.h @@ -32,7 +32,8 @@ class CompilerOptions FINAL { public: enum CompilerFilter { kVerifyNone, // Skip verification and compile nothing except JNI stubs. - kInterpretOnly, // Compile nothing except JNI stubs. + kInterpretOnly, // Verify, and compile only JNI stubs. + kVerifyAtRuntime, // Only compile JNI stubs and verify at runtime. kSpace, // Maximize space savings. kBalanced, // Try to get the best performance return on compilation investment. kSpeed, // Maximize runtime performance. @@ -81,13 +82,23 @@ class CompilerOptions FINAL { compiler_filter_ = compiler_filter; } + bool VerifyAtRuntime() const { + return compiler_filter_ == CompilerOptions::kVerifyAtRuntime; + } + bool IsCompilationEnabled() const { - return ((compiler_filter_ != CompilerOptions::kVerifyNone) && - (compiler_filter_ != CompilerOptions::kInterpretOnly)); + return compiler_filter_ != CompilerOptions::kVerifyNone && + compiler_filter_ != CompilerOptions::kInterpretOnly && + compiler_filter_ != CompilerOptions::kVerifyAtRuntime; } bool IsVerificationEnabled() const { - return (compiler_filter_ != CompilerOptions::kVerifyNone); + return compiler_filter_ != CompilerOptions::kVerifyNone && + compiler_filter_ != CompilerOptions::kVerifyAtRuntime; + } + + bool NeverVerify() const { + return compiler_filter_ == CompilerOptions::kVerifyNone; } size_t GetHugeMethodThreshold() const { diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index bd9267c4db..968fe3e73c 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -118,6 +118,29 @@ bool HInliner::TryInline(HInvoke* invoke_instruction, return false; } + if (resolved_method->ShouldNotInline()) { + VLOG(compiler) << "Method " << PrettyMethod(method_index, outer_dex_file) + << " was already flagged as non inlineable"; + return false; + } + + if (!TryBuildAndInline(resolved_method, invoke_instruction, method_index)) { + resolved_method->SetShouldNotInline(); + return false; + } + + VLOG(compiler) << "Successfully inlined " << PrettyMethod(method_index, outer_dex_file); + MaybeRecordStat(kInlinedInvoke); + return true; +} + +bool HInliner::TryBuildAndInline(Handle<mirror::ArtMethod> resolved_method, + HInvoke* invoke_instruction, + uint32_t method_index) const { + ScopedObjectAccess soa(Thread::Current()); + const DexFile::CodeItem* code_item = resolved_method->GetCodeItem(); + const DexFile& outer_dex_file = *outer_compilation_unit_.GetDexFile(); + DexCompilationUnit dex_compilation_unit( nullptr, outer_compilation_unit_.GetClassLoader(), @@ -225,8 +248,6 @@ bool HInliner::TryInline(HInvoke* invoke_instruction, // instruction id of the caller, so that new instructions added // after optimizations get a unique id. graph_->SetCurrentInstructionId(callee_graph->GetNextInstructionId()); - VLOG(compiler) << "Successfully inlined " << PrettyMethod(method_index, outer_dex_file); - MaybeRecordStat(kInlinedInvoke); return true; } diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h index 2b08d3d91a..1251977138 100644 --- a/compiler/optimizing/inliner.h +++ b/compiler/optimizing/inliner.h @@ -46,6 +46,9 @@ class HInliner : public HOptimization { private: bool TryInline(HInvoke* invoke_instruction, uint32_t method_index, InvokeType invoke_type) const; + bool TryBuildAndInline(Handle<mirror::ArtMethod> resolved_method, + HInvoke* invoke_instruction, + uint32_t method_index) const; const DexCompilationUnit& outer_compilation_unit_; CompilerDriver* const compiler_driver_; diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h index e1a5afe0e7..9914ef49c3 100644 --- a/compiler/optimizing/stack_map_stream.h +++ b/compiler/optimizing/stack_map_stream.h @@ -38,6 +38,8 @@ class StackMapStream : public ValueObject { dex_register_locations_(allocator, 10 * 4), inline_infos_(allocator, 2), stack_mask_max_(-1), + dex_pc_max_(0), + native_pc_offset_max_(0), number_of_stack_maps_with_inline_info_(0) {} // Compute bytes needed to encode a mask with the given maximum element. @@ -92,6 +94,9 @@ class StackMapStream : public ValueObject { if (inlining_depth > 0) { number_of_stack_maps_with_inline_info_++; } + + dex_pc_max_ = std::max(dex_pc_max_, dex_pc); + native_pc_offset_max_ = std::max(native_pc_offset_max_, native_pc_offset); } void AddInlineInfoEntry(uint32_t method_index) { @@ -114,7 +119,12 @@ class StackMapStream : public ValueObject { } size_t ComputeStackMapsSize() const { - return stack_maps_.Size() * StackMap::ComputeStackMapSize(ComputeStackMaskSize()); + return stack_maps_.Size() * StackMap::ComputeStackMapSize( + ComputeStackMaskSize(), + ComputeInlineInfoSize(), + ComputeDexRegisterMapsSize(), + dex_pc_max_, + native_pc_offset_max_); } // Compute the size of the Dex register map of `entry`. @@ -165,16 +175,20 @@ class StackMapStream : public ValueObject { code_info.SetOverallSize(region.size()); size_t stack_mask_size = ComputeStackMaskSize(); - uint8_t* memory_start = region.start(); + + size_t dex_register_map_size = ComputeDexRegisterMapsSize(); + size_t inline_info_size = ComputeInlineInfoSize(); MemoryRegion dex_register_locations_region = region.Subregion( ComputeDexRegisterMapsStart(), - ComputeDexRegisterMapsSize()); + dex_register_map_size); MemoryRegion inline_infos_region = region.Subregion( ComputeInlineInfoStart(), - ComputeInlineInfoSize()); + inline_info_size); + code_info.SetEncoding( + inline_info_size, dex_register_map_size, dex_pc_max_, native_pc_offset_max_); code_info.SetNumberOfStackMaps(stack_maps_.Size()); code_info.SetStackMaskSize(stack_mask_size); DCHECK_EQ(code_info.StackMapsSize(), ComputeStackMapsSize()); @@ -185,11 +199,11 @@ class StackMapStream : public ValueObject { StackMap stack_map = code_info.GetStackMapAt(i); StackMapEntry entry = stack_maps_.Get(i); - stack_map.SetDexPc(entry.dex_pc); - stack_map.SetNativePcOffset(entry.native_pc_offset); - stack_map.SetRegisterMask(entry.register_mask); + stack_map.SetDexPc(code_info, entry.dex_pc); + stack_map.SetNativePcOffset(code_info, entry.native_pc_offset); + stack_map.SetRegisterMask(code_info, entry.register_mask); if (entry.sp_mask != nullptr) { - stack_map.SetStackMask(*entry.sp_mask); + stack_map.SetStackMask(code_info, *entry.sp_mask); } if (entry.num_dex_registers != 0) { @@ -200,7 +214,8 @@ class StackMapStream : public ValueObject { ComputeDexRegisterMapSize(entry)); next_dex_register_map_offset += register_region.size(); DexRegisterMap dex_register_map(register_region); - stack_map.SetDexRegisterMapOffset(register_region.start() - memory_start); + stack_map.SetDexRegisterMapOffset( + code_info, register_region.start() - dex_register_locations_region.start()); // Offset in `dex_register_map` where to store the next register entry. size_t offset = DexRegisterMap::kFixedSize; @@ -222,7 +237,7 @@ class StackMapStream : public ValueObject { // Ensure we reached the end of the Dex registers region. DCHECK_EQ(offset, register_region.size()); } else { - stack_map.SetDexRegisterMapOffset(StackMap::kNoDexRegisterMap); + stack_map.SetDexRegisterMapOffset(code_info, StackMap::kNoDexRegisterMap); } // Set the inlining info. @@ -233,7 +248,9 @@ class StackMapStream : public ValueObject { next_inline_info_offset += inline_region.size(); InlineInfo inline_info(inline_region); - stack_map.SetInlineDescriptorOffset(inline_region.start() - memory_start); + // Currently relative to the dex register map. + stack_map.SetInlineDescriptorOffset( + code_info, inline_region.start() - dex_register_locations_region.start()); inline_info.SetDepth(entry.inlining_depth); for (size_t j = 0; j < entry.inlining_depth; ++j) { @@ -241,7 +258,9 @@ class StackMapStream : public ValueObject { inline_info.SetMethodReferenceIndexAtDepth(j, inline_entry.method_index); } } else { - stack_map.SetInlineDescriptorOffset(StackMap::kNoInlineInfo); + if (inline_info_size != 0) { + stack_map.SetInlineDescriptorOffset(code_info, StackMap::kNoInlineInfo); + } } } } @@ -262,6 +281,8 @@ class StackMapStream : public ValueObject { GrowableArray<DexRegisterLocation> dex_register_locations_; GrowableArray<InlineInfoEntry> inline_infos_; int stack_mask_max_; + uint32_t dex_pc_max_; + uint32_t native_pc_offset_max_; size_t number_of_stack_maps_with_inline_info_; ART_FRIEND_TEST(StackMapTest, Test1); diff --git a/compiler/optimizing/stack_map_test.cc b/compiler/optimizing/stack_map_test.cc index 87ac2e79e9..e7075c0aef 100644 --- a/compiler/optimizing/stack_map_test.cc +++ b/compiler/optimizing/stack_map_test.cc @@ -54,14 +54,14 @@ TEST(StackMapTest, Test1) { StackMap stack_map = code_info.GetStackMapAt(0); ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0))); ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64))); - ASSERT_EQ(0u, stack_map.GetDexPc()); - ASSERT_EQ(64u, stack_map.GetNativePcOffset()); - ASSERT_EQ(0x3u, stack_map.GetRegisterMask()); + ASSERT_EQ(0u, stack_map.GetDexPc(code_info)); + ASSERT_EQ(64u, stack_map.GetNativePcOffset(code_info)); + ASSERT_EQ(0x3u, stack_map.GetRegisterMask(code_info)); - MemoryRegion stack_mask = stack_map.GetStackMask(); + MemoryRegion stack_mask = stack_map.GetStackMask(code_info); ASSERT_TRUE(SameBits(stack_mask, sp_mask)); - ASSERT_TRUE(stack_map.HasDexRegisterMap()); + ASSERT_TRUE(stack_map.HasDexRegisterMap(code_info)); DexRegisterMap dex_registers = code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers); ASSERT_EQ(7u, dex_registers.Size()); DexRegisterLocation location0 = dex_registers.GetLocationKindAndValue(0, number_of_dex_registers); @@ -73,7 +73,7 @@ TEST(StackMapTest, Test1) { ASSERT_EQ(0, location0.GetValue()); ASSERT_EQ(-2, location1.GetValue()); - ASSERT_FALSE(stack_map.HasInlineInfo()); + ASSERT_FALSE(stack_map.HasInlineInfo(code_info)); } TEST(StackMapTest, Test2) { @@ -112,14 +112,14 @@ TEST(StackMapTest, Test2) { StackMap stack_map = code_info.GetStackMapAt(0); ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(0))); ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(64))); - ASSERT_EQ(0u, stack_map.GetDexPc()); - ASSERT_EQ(64u, stack_map.GetNativePcOffset()); - ASSERT_EQ(0x3u, stack_map.GetRegisterMask()); + ASSERT_EQ(0u, stack_map.GetDexPc(code_info)); + ASSERT_EQ(64u, stack_map.GetNativePcOffset(code_info)); + ASSERT_EQ(0x3u, stack_map.GetRegisterMask(code_info)); - MemoryRegion stack_mask = stack_map.GetStackMask(); + MemoryRegion stack_mask = stack_map.GetStackMask(code_info); ASSERT_TRUE(SameBits(stack_mask, sp_mask1)); - ASSERT_TRUE(stack_map.HasDexRegisterMap()); + ASSERT_TRUE(stack_map.HasDexRegisterMap(code_info)); DexRegisterMap dex_registers = code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers); ASSERT_EQ(7u, dex_registers.Size()); @@ -134,7 +134,7 @@ TEST(StackMapTest, Test2) { ASSERT_EQ(0, location0.GetValue()); ASSERT_EQ(-2, location1.GetValue()); - ASSERT_TRUE(stack_map.HasInlineInfo()); + ASSERT_TRUE(stack_map.HasInlineInfo(code_info)); InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map); ASSERT_EQ(2u, inline_info.GetDepth()); ASSERT_EQ(42u, inline_info.GetMethodReferenceIndexAtDepth(0)); @@ -146,14 +146,14 @@ TEST(StackMapTest, Test2) { StackMap stack_map = code_info.GetStackMapAt(1); ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForDexPc(1u))); ASSERT_TRUE(stack_map.Equals(code_info.GetStackMapForNativePcOffset(128u))); - ASSERT_EQ(1u, stack_map.GetDexPc()); - ASSERT_EQ(128u, stack_map.GetNativePcOffset()); - ASSERT_EQ(0xFFu, stack_map.GetRegisterMask()); + ASSERT_EQ(1u, stack_map.GetDexPc(code_info)); + ASSERT_EQ(128u, stack_map.GetNativePcOffset(code_info)); + ASSERT_EQ(0xFFu, stack_map.GetRegisterMask(code_info)); - MemoryRegion stack_mask = stack_map.GetStackMask(); + MemoryRegion stack_mask = stack_map.GetStackMask(code_info); ASSERT_TRUE(SameBits(stack_mask, sp_mask2)); - ASSERT_TRUE(stack_map.HasDexRegisterMap()); + ASSERT_TRUE(stack_map.HasDexRegisterMap(code_info)); DexRegisterMap dex_registers = code_info.GetDexRegisterMapOf(stack_map, number_of_dex_registers); ASSERT_EQ(3u, dex_registers.Size()); @@ -168,7 +168,7 @@ TEST(StackMapTest, Test2) { ASSERT_EQ(18, location0.GetValue()); ASSERT_EQ(3, location1.GetValue()); - ASSERT_FALSE(stack_map.HasInlineInfo()); + ASSERT_FALSE(stack_map.HasInlineInfo(code_info)); } } @@ -190,14 +190,45 @@ TEST(StackMapTest, TestNonLiveDexRegisters) { CodeInfo code_info(region); StackMap stack_map = code_info.GetStackMapAt(0); - ASSERT_TRUE(stack_map.HasDexRegisterMap()); + ASSERT_TRUE(stack_map.HasDexRegisterMap(code_info)); DexRegisterMap dex_registers = code_info.GetDexRegisterMapOf(stack_map, 2); ASSERT_EQ(DexRegisterLocation::Kind::kNone, dex_registers.GetLocationKind(0, number_of_dex_registers)); ASSERT_EQ(DexRegisterLocation::Kind::kConstant, dex_registers.GetLocationKind(1, number_of_dex_registers)); ASSERT_EQ(-2, dex_registers.GetConstant(1, number_of_dex_registers)); - ASSERT_FALSE(stack_map.HasInlineInfo()); + ASSERT_FALSE(stack_map.HasInlineInfo(code_info)); +} + +// Generate a stack map whose dex register offset is +// StackMap::kNoDexRegisterMapSmallEncoding, and ensure we do +// not treat it as kNoDexRegisterMap. +TEST(StackMapTest, DexRegisterMapOffsetOverflow) { + ArenaPool pool; + ArenaAllocator arena(&pool); + StackMapStream stream(&arena); + + ArenaBitVector sp_mask(&arena, 0, false); + uint32_t number_of_dex_registers = 0xEA; + stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0); + for (uint32_t i = 0; i < number_of_dex_registers - 9; ++i) { + stream.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kConstant, 0); + } + stream.AddStackMapEntry(0, 64, 0x3, &sp_mask, number_of_dex_registers, 0); + for (uint32_t i = 0; i < number_of_dex_registers; ++i) { + stream.AddDexRegisterEntry(i, DexRegisterLocation::Kind::kConstant, 0); + } + + size_t size = stream.ComputeNeededSize(); + void* memory = arena.Alloc(size, kArenaAllocMisc); + MemoryRegion region(memory, size); + stream.FillIn(region); + + CodeInfo code_info(region); + StackMap stack_map = code_info.GetStackMapAt(1); + ASSERT_TRUE(stack_map.HasDexRegisterMap(code_info)); + ASSERT_NE(stack_map.GetDexRegisterMapOffset(code_info), StackMap::kNoDexRegisterMap); + ASSERT_EQ(stack_map.GetDexRegisterMapOffset(code_info), StackMap::kNoDexRegisterMapSmallEncoding); } } // namespace art |