diff options
Diffstat (limited to 'compiler')
| -rw-r--r-- | compiler/dex/quick/codegen_util.cc | 4 | ||||
| -rw-r--r-- | compiler/dex/quick/quick_cfi_test.cc | 3 | ||||
| -rw-r--r-- | compiler/dex/quick/x86/quick_assemble_x86_test.cc | 3 | ||||
| -rw-r--r-- | compiler/driver/compiler_options.cc | 9 | ||||
| -rw-r--r-- | compiler/driver/compiler_options.h | 17 | ||||
| -rw-r--r-- | compiler/elf_writer_quick.cc | 5 | ||||
| -rw-r--r-- | compiler/jit/jit_compiler.cc | 3 | ||||
| -rw-r--r-- | compiler/jni/quick/jni_compiler.cc | 2 | ||||
| -rw-r--r-- | compiler/oat_writer.cc | 3 | ||||
| -rw-r--r-- | compiler/optimizing/builder.cc | 15 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_arm.cc | 45 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 37 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 38 | ||||
| -rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 42 | ||||
| -rw-r--r-- | compiler/optimizing/nodes.cc | 15 | ||||
| -rw-r--r-- | compiler/optimizing/nodes.h | 31 | ||||
| -rw-r--r-- | compiler/optimizing/optimizing_compiler.cc | 6 | ||||
| -rw-r--r-- | compiler/optimizing/register_allocator.cc | 18 |
18 files changed, 196 insertions, 100 deletions
diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index 86bb69d01e..dc8bf1a0cf 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -670,7 +670,7 @@ bool Mir2Lir::VerifyCatchEntries() { void Mir2Lir::CreateMappingTables() { - bool generate_src_map = cu_->compiler_driver->GetCompilerOptions().GetIncludeDebugSymbols(); + bool generate_src_map = cu_->compiler_driver->GetCompilerOptions().GetGenerateDebugInfo(); uint32_t pc2dex_data_size = 0u; uint32_t pc2dex_entries = 0u; @@ -1071,7 +1071,7 @@ Mir2Lir::Mir2Lir(CompilationUnit* cu, MIRGraph* mir_graph, ArenaAllocator* arena pc_rel_temp_(nullptr), dex_cache_arrays_min_offset_(std::numeric_limits<uint32_t>::max()), cfi_(&last_lir_insn_, - cu->compiler_driver->GetCompilerOptions().GetIncludeCFI(), + cu->compiler_driver->GetCompilerOptions().GetGenerateDebugInfo(), arena), in_to_reg_storage_mapping_(arena) { switch_tables_.reserve(4); diff --git a/compiler/dex/quick/quick_cfi_test.cc b/compiler/dex/quick/quick_cfi_test.cc index b3c73557a7..87bbe14040 100644 --- a/compiler/dex/quick/quick_cfi_test.cc +++ b/compiler/dex/quick/quick_cfi_test.cc @@ -59,8 +59,7 @@ class QuickCFITest : public CFITest { false, CompilerOptions::kDefaultTopKProfileThreshold, false, - true, // include_debug_symbols. - true, // include_cfi + true, // generate_debug_info. false, false, false, diff --git a/compiler/dex/quick/x86/quick_assemble_x86_test.cc b/compiler/dex/quick/x86/quick_assemble_x86_test.cc index f58f206af5..798e23fbac 100644 --- a/compiler/dex/quick/x86/quick_assemble_x86_test.cc +++ b/compiler/dex/quick/x86/quick_assemble_x86_test.cc @@ -42,8 +42,7 @@ class QuickAssembleX86TestBase : public testing::Test { false, CompilerOptions::kDefaultTopKProfileThreshold, false, - false, - false, + CompilerOptions::kDefaultGenerateDebugInfo, false, false, false, diff --git a/compiler/driver/compiler_options.cc b/compiler/driver/compiler_options.cc index c5fc98a957..226e6b7952 100644 --- a/compiler/driver/compiler_options.cc +++ b/compiler/driver/compiler_options.cc @@ -30,8 +30,7 @@ CompilerOptions::CompilerOptions() include_patch_information_(kDefaultIncludePatchInformation), top_k_profile_threshold_(kDefaultTopKProfileThreshold), debuggable_(false), - include_debug_symbols_(kDefaultIncludeDebugSymbols), - include_cfi_(false), + generate_debug_info_(kDefaultGenerateDebugInfo), implicit_null_checks_(true), implicit_so_checks_(true), implicit_suspend_checks_(false), @@ -56,8 +55,7 @@ CompilerOptions::CompilerOptions(CompilerFilter compiler_filter, bool include_patch_information, double top_k_profile_threshold, bool debuggable, - bool include_debug_symbols, - bool include_cfi, + bool generate_debug_info, bool implicit_null_checks, bool implicit_so_checks, bool implicit_suspend_checks, @@ -76,8 +74,7 @@ CompilerOptions::CompilerOptions(CompilerFilter compiler_filter, include_patch_information_(include_patch_information), top_k_profile_threshold_(top_k_profile_threshold), debuggable_(debuggable), - include_debug_symbols_(include_debug_symbols), - include_cfi_(include_cfi), + generate_debug_info_(generate_debug_info), implicit_null_checks_(implicit_null_checks), implicit_so_checks_(implicit_so_checks), implicit_suspend_checks_(implicit_suspend_checks), diff --git a/compiler/driver/compiler_options.h b/compiler/driver/compiler_options.h index bf3f8ec08a..356663bd8a 100644 --- a/compiler/driver/compiler_options.h +++ b/compiler/driver/compiler_options.h @@ -49,7 +49,7 @@ class CompilerOptions FINAL { static const size_t kDefaultTinyMethodThreshold = 20; static const size_t kDefaultNumDexMethodsThreshold = 900; static constexpr double kDefaultTopKProfileThreshold = 90.0; - static const bool kDefaultIncludeDebugSymbols = kIsDebugBuild; + static const bool kDefaultGenerateDebugInfo = kIsDebugBuild; static const bool kDefaultIncludePatchInformation = false; CompilerOptions(); @@ -64,8 +64,7 @@ class CompilerOptions FINAL { bool include_patch_information, double top_k_profile_threshold, bool debuggable, - bool include_debug_symbols, - bool include_cfi, + bool generate_debug_info, bool implicit_null_checks, bool implicit_so_checks, bool implicit_suspend_checks, @@ -146,13 +145,8 @@ class CompilerOptions FINAL { return debuggable_; } - bool GetIncludeDebugSymbols() const { - return include_debug_symbols_; - } - - bool GetIncludeCFI() const { - // include-debug-symbols implies include-cfi. - return include_cfi_ || include_debug_symbols_; + bool GetGenerateDebugInfo() const { + return generate_debug_info_; } bool GetImplicitNullChecks() const { @@ -212,8 +206,7 @@ class CompilerOptions FINAL { // When using a profile file only the top K% of the profiled samples will be compiled. const double top_k_profile_threshold_; const bool debuggable_; - const bool include_debug_symbols_; - const bool include_cfi_; + const bool generate_debug_info_; const bool implicit_null_checks_; const bool implicit_so_checks_; const bool implicit_suspend_checks_; diff --git a/compiler/elf_writer_quick.cc b/compiler/elf_writer_quick.cc index 3f0a80bc99..dce1e861b4 100644 --- a/compiler/elf_writer_quick.cc +++ b/compiler/elf_writer_quick.cc @@ -192,7 +192,8 @@ bool ElfWriterQuick<ElfTypes>::Write( std::unique_ptr<RawSection> debug_line_oat_patches(new RawSection( ".debug_line.oat_patches", SHT_OAT_PATCH)); if (!oat_writer->GetMethodDebugInfo().empty()) { - if (compiler_driver_->GetCompilerOptions().GetIncludeCFI()) { + if (compiler_driver_->GetCompilerOptions().GetGenerateDebugInfo()) { + // Generate CFI (stack unwinding information). if (kCFIFormat == dwarf::DW_EH_FRAME_FORMAT) { dwarf::WriteCFISection( compiler_driver_, oat_writer, @@ -213,8 +214,6 @@ bool ElfWriterQuick<ElfTypes>::Write( debug_frame_oat_patches->GetBuffer()); builder->RegisterSection(debug_frame_oat_patches.get()); } - } - if (compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols()) { // Add methods to .symtab. WriteDebugSymbols(builder.get(), oat_writer); // Generate DWARF .debug_* sections. diff --git a/compiler/jit/jit_compiler.cc b/compiler/jit/jit_compiler.cc index 7ed70971a3..e28f8f0418 100644 --- a/compiler/jit/jit_compiler.cc +++ b/compiler/jit/jit_compiler.cc @@ -74,8 +74,7 @@ JitCompiler::JitCompiler() : total_time_(0) { false, CompilerOptions::kDefaultTopKProfileThreshold, false, // TODO: Think about debuggability of JIT-compiled code. - false, - false, + CompilerOptions::kDefaultGenerateDebugInfo, false, false, false, diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc index a06303d23e..573c088aba 100644 --- a/compiler/jni/quick/jni_compiler.cc +++ b/compiler/jni/quick/jni_compiler.cc @@ -94,7 +94,7 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, // Assembler that holds generated instructions std::unique_ptr<Assembler> jni_asm(Assembler::Create(instruction_set)); - jni_asm->cfi().SetEnabled(driver->GetCompilerOptions().GetIncludeCFI()); + jni_asm->cfi().SetEnabled(driver->GetCompilerOptions().GetGenerateDebugInfo()); // Offsets into data structures // TODO: if cross compiling these offsets are for the host not the target diff --git a/compiler/oat_writer.cc b/compiler/oat_writer.cc index 745cdcfaab..8f153b1905 100644 --- a/compiler/oat_writer.cc +++ b/compiler/oat_writer.cc @@ -449,8 +449,7 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor { } } - if (writer_->compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols() || - writer_->compiler_driver_->GetCompilerOptions().GetIncludeCFI()) { + if (writer_->compiler_driver_->GetCompilerOptions().GetGenerateDebugInfo()) { // Record debug information for this function if we are doing that. const uint32_t quick_code_start = quick_code_offset - writer_->oat_header_->GetExecutableOffset() - thumb_offset; diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc index c4f033df52..41c1d2cf1b 100644 --- a/compiler/optimizing/builder.cc +++ b/compiler/optimizing/builder.cc @@ -712,7 +712,11 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction, } else { clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit; HLoadClass* load_class = new (arena_) HLoadClass( - storage_index, *dex_compilation_unit_->GetDexFile(), is_referrer_class, dex_pc); + graph_->GetCurrentMethod(), + storage_index, + *dex_compilation_unit_->GetDexFile(), + is_referrer_class, + dex_pc); current_block_->AddInstruction(load_class); clinit_check = new (arena_) HClinitCheck(load_class, dex_pc); current_block_->AddInstruction(clinit_check); @@ -915,8 +919,11 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction, *outer_compilation_unit_->GetDexFile(), storage_index); bool is_initialized = resolved_field->GetDeclaringClass()->IsInitialized() && is_in_dex_cache; - HLoadClass* constant = new (arena_) HLoadClass( - storage_index, *dex_compilation_unit_->GetDexFile(), is_referrer_class, dex_pc); + HLoadClass* constant = new (arena_) HLoadClass(graph_->GetCurrentMethod(), + storage_index, + *dex_compilation_unit_->GetDexFile(), + is_referrer_class, + dex_pc); current_block_->AddInstruction(constant); HInstruction* cls = constant; @@ -1156,6 +1163,7 @@ bool HGraphBuilder::BuildTypeCheck(const Instruction& instruction, } HInstruction* object = LoadLocal(reference, Primitive::kPrimNot); HLoadClass* cls = new (arena_) HLoadClass( + graph_->GetCurrentMethod(), type_index, *dex_compilation_unit_->GetDexFile(), IsOutermostCompilingClass(type_index), @@ -2171,6 +2179,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32 return false; } current_block_->AddInstruction(new (arena_) HLoadClass( + graph_->GetCurrentMethod(), type_index, *dex_compilation_unit_->GetDexFile(), IsOutermostCompilingClass(type_index), diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 92dc54df5d..ab3b6b0b70 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -41,6 +41,7 @@ static bool ExpectedPairLayout(Location location) { } static constexpr int kCurrentMethodStackOffset = 0; +static constexpr Register kMethodRegisterArgument = R0; // We unconditionally allocate R5 to ensure we can do long operations // with baseline. @@ -544,7 +545,7 @@ void CodeGeneratorARM::GenerateFrameEntry() { uint32_t push_mask = (core_spill_mask_ & (~(1 << PC))) | 1 << LR; __ PushList(push_mask); __ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(push_mask)); - __ cfi().RelOffsetForMany(DWARFReg(R0), 0, push_mask, kArmWordSize); + __ cfi().RelOffsetForMany(DWARFReg(kMethodRegisterArgument), 0, push_mask, kArmWordSize); if (fpu_spill_mask_ != 0) { SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_)); __ vpushs(start_register, POPCOUNT(fpu_spill_mask_)); @@ -554,7 +555,7 @@ void CodeGeneratorARM::GenerateFrameEntry() { int adjust = GetFrameSize() - FrameEntrySpillSize(); __ AddConstant(SP, -adjust); __ cfi().AdjustCFAOffset(adjust); - __ StoreToOffset(kStoreWord, R0, SP, 0); + __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, 0); } void CodeGeneratorARM::GenerateFrameExit() { @@ -803,11 +804,11 @@ void CodeGeneratorARM::Move64(Location destination, Location source) { void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstruction* move_for) { LocationSummary* locations = instruction->GetLocations(); - if (locations != nullptr && locations->Out().Equals(location)) { + if (instruction->IsCurrentMethod()) { + Move32(location, Location::StackSlot(kCurrentMethodStackOffset)); + } else if (locations != nullptr && locations->Out().Equals(location)) { return; - } - - if (locations != nullptr && locations->Out().IsConstant()) { + } else if (locations != nullptr && locations->Out().IsConstant()) { HConstant* const_to_move = locations->Out().GetConstant(); if (const_to_move->IsIntConstant() || const_to_move->IsNullConstant()) { int32_t value = GetInt32ValueOf(const_to_move); @@ -1286,7 +1287,7 @@ void InstructionCodeGeneratorARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirec void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall); - locations->AddTemp(Location::RegisterLocation(R0)); + locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument)); InvokeDexCallingConventionVisitorARM calling_convention_visitor; for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) { @@ -2802,9 +2803,19 @@ void LocationsBuilderARM::VisitParameterValue(HParameterValue* instruction) { locations->SetOut(location); } -void InstructionCodeGeneratorARM::VisitParameterValue(HParameterValue* instruction) { +void InstructionCodeGeneratorARM::VisitParameterValue( + HParameterValue* instruction ATTRIBUTE_UNUSED) { // Nothing to do, the parameter is already at its location. - UNUSED(instruction); +} + +void LocationsBuilderARM::VisitCurrentMethod(HCurrentMethod* instruction) { + LocationSummary* locations = + new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument)); +} + +void InstructionCodeGeneratorARM::VisitCurrentMethod(HCurrentMethod* instruction ATTRIBUTE_UNUSED) { + // Nothing to do, the method is already at its location. } void LocationsBuilderARM::VisitNot(HNot* not_) { @@ -3954,21 +3965,25 @@ void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) { : LocationSummary::kNoCall; LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind); + locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister()); } void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) { - Register out = cls->GetLocations()->Out().AsRegister<Register>(); + LocationSummary* locations = cls->GetLocations(); + Register out = locations->Out().AsRegister<Register>(); + Register current_method = locations->InAt(0).AsRegister<Register>(); if (cls->IsReferrersClass()) { DCHECK(!cls->CanCallRuntime()); DCHECK(!cls->MustGenerateClinitCheck()); - codegen_->LoadCurrentMethod(out); - __ LoadFromOffset(kLoadWord, out, out, mirror::ArtMethod::DeclaringClassOffset().Int32Value()); + __ LoadFromOffset( + kLoadWord, out, current_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value()); } else { DCHECK(cls->CanCallRuntime()); - codegen_->LoadCurrentMethod(out); - __ LoadFromOffset( - kLoadWord, out, out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()); + __ LoadFromOffset(kLoadWord, + out, + current_method, + mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()); __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())); SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM( diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index ced60cd33f..04c38f6df2 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -634,16 +634,16 @@ void CodeGeneratorARM64::Move(HInstruction* instruction, Location location, HInstruction* move_for) { LocationSummary* locations = instruction->GetLocations(); - if (locations != nullptr && locations->Out().Equals(location)) { - return; - } - Primitive::Type type = instruction->GetType(); DCHECK_NE(type, Primitive::kPrimVoid); - if (instruction->IsIntConstant() - || instruction->IsLongConstant() - || instruction->IsNullConstant()) { + if (instruction->IsCurrentMethod()) { + MoveLocation(location, Location::StackSlot(kCurrentMethodStackOffset)); + } else if (locations != nullptr && locations->Out().Equals(location)) { + return; + } else if (instruction->IsIntConstant() + || instruction->IsLongConstant() + || instruction->IsNullConstant()) { int64_t value = GetInt64ValueOf(instruction->AsConstant()); if (location.IsRegister()) { Register dst = RegisterFrom(location, type); @@ -2345,20 +2345,20 @@ void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) { LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath : LocationSummary::kNoCall; LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind); + locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister()); } void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) { Register out = OutputRegister(cls); + Register current_method = InputRegisterAt(cls, 0); if (cls->IsReferrersClass()) { DCHECK(!cls->CanCallRuntime()); DCHECK(!cls->MustGenerateClinitCheck()); - codegen_->LoadCurrentMethod(out); - __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset())); + __ Ldr(out, HeapOperand(current_method, mirror::ArtMethod::DeclaringClassOffset())); } else { DCHECK(cls->CanCallRuntime()); - codegen_->LoadCurrentMethod(out); - __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DexCacheResolvedTypesOffset())); + __ Ldr(out, HeapOperand(current_method, mirror::ArtMethod::DexCacheResolvedTypesOffset())); __ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()))); SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64( @@ -2674,9 +2674,20 @@ void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) { locations->SetOut(location); } -void InstructionCodeGeneratorARM64::VisitParameterValue(HParameterValue* instruction) { +void InstructionCodeGeneratorARM64::VisitParameterValue( + HParameterValue* instruction ATTRIBUTE_UNUSED) { // Nothing to do, the parameter is already at its location. - UNUSED(instruction); +} + +void LocationsBuilderARM64::VisitCurrentMethod(HCurrentMethod* instruction) { + LocationSummary* locations = + new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + locations->SetOut(LocationFrom(x0)); +} + +void InstructionCodeGeneratorARM64::VisitCurrentMethod( + HCurrentMethod* instruction ATTRIBUTE_UNUSED) { + // Nothing to do, the method is already at its location. } void LocationsBuilderARM64::VisitPhi(HPhi* instruction) { diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 419730077f..1688efcee9 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -36,6 +36,7 @@ namespace art { namespace x86 { static constexpr int kCurrentMethodStackOffset = 0; +static constexpr Register kMethodRegisterArgument = EAX; static constexpr Register kCoreCalleeSaves[] = { EBP, ESI, EDI }; @@ -498,7 +499,7 @@ void CodeGeneratorX86::GenerateFrameEntry() { int adjust = GetFrameSize() - FrameEntrySpillSize(); __ subl(ESP, Immediate(adjust)); __ cfi().AdjustCFAOffset(adjust); - __ movl(Address(ESP, kCurrentMethodStackOffset), EAX); + __ movl(Address(ESP, kCurrentMethodStackOffset), kMethodRegisterArgument); } void CodeGeneratorX86::GenerateFrameExit() { @@ -717,11 +718,11 @@ void CodeGeneratorX86::Move64(Location destination, Location source) { void CodeGeneratorX86::Move(HInstruction* instruction, Location location, HInstruction* move_for) { LocationSummary* locations = instruction->GetLocations(); - if (locations != nullptr && locations->Out().Equals(location)) { + if (instruction->IsCurrentMethod()) { + Move32(location, Location::StackSlot(kCurrentMethodStackOffset)); + } else if (locations != nullptr && locations->Out().Equals(location)) { return; - } - - if (locations != nullptr && locations->Out().IsConstant()) { + } else if (locations != nullptr && locations->Out().IsConstant()) { HConstant* const_to_move = locations->Out().GetConstant(); if (const_to_move->IsIntConstant() || const_to_move->IsNullConstant()) { Immediate imm(GetInt32ValueOf(const_to_move)); @@ -1239,7 +1240,7 @@ void LocationsBuilderX86::VisitInvokeVirtual(HInvokeVirtual* invoke) { void LocationsBuilderX86::HandleInvoke(HInvoke* invoke) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall); - locations->AddTemp(Location::RegisterLocation(EAX)); + locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument)); InvokeDexCallingConventionVisitorX86 calling_convention_visitor; for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) { @@ -3012,8 +3013,17 @@ void LocationsBuilderX86::VisitParameterValue(HParameterValue* instruction) { locations->SetOut(location); } -void InstructionCodeGeneratorX86::VisitParameterValue(HParameterValue* instruction) { - UNUSED(instruction); +void InstructionCodeGeneratorX86::VisitParameterValue( + HParameterValue* instruction ATTRIBUTE_UNUSED) { +} + +void LocationsBuilderX86::VisitCurrentMethod(HCurrentMethod* instruction) { + LocationSummary* locations = + new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument)); +} + +void InstructionCodeGeneratorX86::VisitCurrentMethod(HCurrentMethod* instruction ATTRIBUTE_UNUSED) { } void LocationsBuilderX86::VisitNot(HNot* not_) { @@ -4281,20 +4291,22 @@ void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) { : LocationSummary::kNoCall; LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind); + locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister()); } void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) { - Register out = cls->GetLocations()->Out().AsRegister<Register>(); + LocationSummary* locations = cls->GetLocations(); + Register out = locations->Out().AsRegister<Register>(); + Register current_method = locations->InAt(0).AsRegister<Register>(); if (cls->IsReferrersClass()) { DCHECK(!cls->CanCallRuntime()); DCHECK(!cls->MustGenerateClinitCheck()); - codegen_->LoadCurrentMethod(out); - __ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value())); + __ movl(out, Address(current_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value())); } else { DCHECK(cls->CanCallRuntime()); - codegen_->LoadCurrentMethod(out); - __ movl(out, Address(out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value())); + __ movl(out, Address( + current_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value())); __ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()))); SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86( diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 7491aa25d0..78477016b4 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -39,13 +39,13 @@ namespace x86_64 { static constexpr Register TMP = R11; static constexpr int kCurrentMethodStackOffset = 0; +static constexpr Register kMethodRegisterArgument = RDI; static constexpr Register kCoreCalleeSaves[] = { RBX, RBP, R12, R13, R14, R15 }; static constexpr FloatRegister kFpuCalleeSaves[] = { XMM12, XMM13, XMM14, XMM15 }; static constexpr int kC2ConditionMask = 0x400; - #define __ reinterpret_cast<X86_64Assembler*>(codegen->GetAssembler())-> class NullCheckSlowPathX86_64 : public SlowPathCodeX86_64 { @@ -545,7 +545,8 @@ void CodeGeneratorX86_64::GenerateFrameEntry() { } } - __ movl(Address(CpuRegister(RSP), kCurrentMethodStackOffset), CpuRegister(RDI)); + __ movl(Address(CpuRegister(RSP), kCurrentMethodStackOffset), + CpuRegister(kMethodRegisterArgument)); } void CodeGeneratorX86_64::GenerateFrameExit() { @@ -689,11 +690,11 @@ void CodeGeneratorX86_64::Move(HInstruction* instruction, Location location, HInstruction* move_for) { LocationSummary* locations = instruction->GetLocations(); - if (locations != nullptr && locations->Out().Equals(location)) { + if (instruction->IsCurrentMethod()) { + Move(location, Location::StackSlot(kCurrentMethodStackOffset)); + } else if (locations != nullptr && locations->Out().Equals(location)) { return; - } - - if (locations != nullptr && locations->Out().IsConstant()) { + } else if (locations != nullptr && locations->Out().IsConstant()) { HConstant* const_to_move = locations->Out().GetConstant(); if (const_to_move->IsIntConstant() || const_to_move->IsNullConstant()) { Immediate imm(GetInt32ValueOf(const_to_move)); @@ -1339,7 +1340,7 @@ void InstructionCodeGeneratorX86_64::VisitInvokeStaticOrDirect(HInvokeStaticOrDi void LocationsBuilderX86_64::HandleInvoke(HInvoke* invoke) { LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall); - locations->AddTemp(Location::RegisterLocation(RDI)); + locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument)); InvokeDexCallingConventionVisitorX86_64 calling_convention_visitor; for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) { @@ -3070,9 +3071,20 @@ void LocationsBuilderX86_64::VisitParameterValue(HParameterValue* instruction) { locations->SetOut(location); } -void InstructionCodeGeneratorX86_64::VisitParameterValue(HParameterValue* instruction) { +void InstructionCodeGeneratorX86_64::VisitParameterValue( + HParameterValue* instruction ATTRIBUTE_UNUSED) { // Nothing to do, the parameter is already at its location. - UNUSED(instruction); +} + +void LocationsBuilderX86_64::VisitCurrentMethod(HCurrentMethod* instruction) { + LocationSummary* locations = + new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall); + locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument)); +} + +void InstructionCodeGeneratorX86_64::VisitCurrentMethod( + HCurrentMethod* instruction ATTRIBUTE_UNUSED) { + // Nothing to do, the method is already at its location. } void LocationsBuilderX86_64::VisitNot(HNot* not_) { @@ -4127,20 +4139,22 @@ void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) { : LocationSummary::kNoCall; LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind); + locations->SetInAt(0, Location::RequiresRegister()); locations->SetOut(Location::RequiresRegister()); } void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) { - CpuRegister out = cls->GetLocations()->Out().AsRegister<CpuRegister>(); + LocationSummary* locations = cls->GetLocations(); + CpuRegister out = locations->Out().AsRegister<CpuRegister>(); + CpuRegister current_method = locations->InAt(0).AsRegister<CpuRegister>(); if (cls->IsReferrersClass()) { DCHECK(!cls->CanCallRuntime()); DCHECK(!cls->MustGenerateClinitCheck()); - codegen_->LoadCurrentMethod(out); - __ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value())); + __ movl(out, Address(current_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value())); } else { DCHECK(cls->CanCallRuntime()); - codegen_->LoadCurrentMethod(out); - __ movl(out, Address(out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value())); + __ movl(out, Address( + current_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value())); __ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()))); SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64( cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck()); diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index 483c09e5a9..80d4b4a863 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -295,6 +295,19 @@ HNullConstant* HGraph::GetNullConstant() { return cached_null_constant_; } +HCurrentMethod* HGraph::GetCurrentMethod() { + if (cached_current_method_ == nullptr) { + cached_current_method_ = new (arena_) HCurrentMethod(); + if (entry_block_->GetFirstInstruction() == nullptr) { + entry_block_->AddInstruction(cached_current_method_); + } else { + entry_block_->InsertInstructionBefore( + cached_current_method_, entry_block_->GetFirstInstruction()); + } + } + return cached_current_method_; +} + HConstant* HGraph::GetConstant(Primitive::Type type, int64_t value) { switch (type) { case Primitive::Type::kPrimBoolean: @@ -1461,6 +1474,8 @@ void HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) { DCHECK(parameter_index != last_input_index); } current->ReplaceWith(invoke->InputAt(parameter_index++)); + } else if (current->IsCurrentMethod()) { + current->ReplaceWith(outer_graph->GetCurrentMethod()); } else { DCHECK(current->IsGoto() || current->IsSuspendCheck()); entry_block_->RemoveInstruction(current); diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index a44d745c11..869809d69f 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -35,6 +35,7 @@ namespace art { class GraphChecker; class HBasicBlock; +class HCurrentMethod; class HDoubleConstant; class HEnvironment; class HFloatConstant; @@ -147,7 +148,8 @@ class HGraph : public ArenaObject<kArenaAllocMisc> { cached_int_constants_(std::less<int32_t>(), arena->Adapter()), cached_float_constants_(std::less<int32_t>(), arena->Adapter()), cached_long_constants_(std::less<int64_t>(), arena->Adapter()), - cached_double_constants_(std::less<int64_t>(), arena->Adapter()) {} + cached_double_constants_(std::less<int64_t>(), arena->Adapter()), + cached_current_method_(nullptr) {} ArenaAllocator* GetArena() const { return arena_; } const GrowableArray<HBasicBlock*>& GetBlocks() const { return blocks_; } @@ -278,6 +280,8 @@ class HGraph : public ArenaObject<kArenaAllocMisc> { return CreateConstant(bit_cast<int64_t, double>(value), &cached_double_constants_); } + HCurrentMethod* GetCurrentMethod(); + HBasicBlock* FindCommonDominator(HBasicBlock* first, HBasicBlock* second) const; const DexFile& GetDexFile() const { @@ -386,6 +390,8 @@ class HGraph : public ArenaObject<kArenaAllocMisc> { ArenaSafeMap<int64_t, HLongConstant*> cached_long_constants_; ArenaSafeMap<int64_t, HDoubleConstant*> cached_double_constants_; + HCurrentMethod* cached_current_method_; + friend class SsaBuilder; // For caching constants. friend class SsaLivenessAnalysis; // For the linear order. ART_FRIEND_TEST(GraphTest, IfSuccessorSimpleJoinBlock1); @@ -811,6 +817,7 @@ class HLoopInformationOutwardIterator : public ValueObject { M(ClinitCheck, Instruction) \ M(Compare, BinaryOperation) \ M(Condition, BinaryOperation) \ + M(CurrentMethod, Instruction) \ M(Deoptimize, Instruction) \ M(Div, BinaryOperation) \ M(DivZeroCheck, Instruction) \ @@ -1824,6 +1831,19 @@ class HDeoptimize : public HTemplateInstruction<1> { DISALLOW_COPY_AND_ASSIGN(HDeoptimize); }; +// Represents the ArtMethod that was passed as a first argument to +// the method. It is used by instructions that depend on it, like +// instructions that work with the dex cache. +class HCurrentMethod : public HExpression<0> { + public: + HCurrentMethod() : HExpression(Primitive::kPrimNot, SideEffects::None()) {} + + DECLARE_INSTRUCTION(CurrentMethod); + + private: + DISALLOW_COPY_AND_ASSIGN(HCurrentMethod); +}; + class HUnaryOperation : public HExpression<1> { public: HUnaryOperation(Primitive::Type result_type, HInstruction* input) @@ -3437,9 +3457,10 @@ class HSuspendCheck : public HTemplateInstruction<0> { /** * Instruction to load a Class object. */ -class HLoadClass : public HExpression<0> { +class HLoadClass : public HExpression<1> { public: - HLoadClass(uint16_t type_index, + HLoadClass(HCurrentMethod* current_method, + uint16_t type_index, const DexFile& dex_file, bool is_referrers_class, uint32_t dex_pc) @@ -3449,7 +3470,9 @@ class HLoadClass : public HExpression<0> { is_referrers_class_(is_referrers_class), dex_pc_(dex_pc), generate_clinit_check_(false), - loaded_class_rti_(ReferenceTypeInfo::CreateTop(/* is_exact */ false)) {} + loaded_class_rti_(ReferenceTypeInfo::CreateTop(/* is_exact */ false)) { + SetRawInputAt(0, current_method); + } bool CanBeMoved() const OVERRIDE { return true; } diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc index fa3c310811..3123843b7f 100644 --- a/compiler/optimizing/optimizing_compiler.cc +++ b/compiler/optimizing/optimizing_compiler.cc @@ -401,7 +401,7 @@ CompiledMethod* OptimizingCompiler::CompileOptimized(HGraph* graph, codegen->CompileOptimized(&allocator); DefaultSrcMap src_mapping_table; - if (compiler_driver->GetCompilerOptions().GetIncludeDebugSymbols()) { + if (compiler_driver->GetCompilerOptions().GetGenerateDebugInfo()) { codegen->BuildSourceMap(&src_mapping_table); } @@ -438,7 +438,7 @@ CompiledMethod* OptimizingCompiler::CompileBaseline( std::vector<uint8_t> mapping_table; codegen->BuildMappingTable(&mapping_table); DefaultSrcMap src_mapping_table; - if (compiler_driver->GetCompilerOptions().GetIncludeDebugSymbols()) { + if (compiler_driver->GetCompilerOptions().GetGenerateDebugInfo()) { codegen->BuildSourceMap(&src_mapping_table); } std::vector<uint8_t> vmap_table; @@ -534,7 +534,7 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite return nullptr; } codegen->GetAssembler()->cfi().SetEnabled( - compiler_driver->GetCompilerOptions().GetIncludeCFI()); + compiler_driver->GetCompilerOptions().GetGenerateDebugInfo()); PassInfoPrinter pass_info_printer(graph, method_name.c_str(), diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc index 925099ade6..d4ff4d8dee 100644 --- a/compiler/optimizing/register_allocator.cc +++ b/compiler/optimizing/register_allocator.cc @@ -482,8 +482,9 @@ bool RegisterAllocator::ValidateIntervals(const GrowableArray<LiveInterval*>& in LiveInterval* current = it.CurrentInterval(); HInstruction* defined_by = current->GetParent()->GetDefinedBy(); if (current->GetParent()->HasSpillSlot() - // Parameters have their own stack slot. - && !(defined_by != nullptr && defined_by->IsParameterValue())) { + // Parameters and current method have their own stack slot. + && !(defined_by != nullptr && (defined_by->IsParameterValue() + || defined_by->IsCurrentMethod()))) { BitVector* liveness_of_spill_slot = liveness_of_values.Get(number_of_registers + current->GetParent()->GetSpillSlot() / kVRegSize - number_of_out_slots); @@ -1246,6 +1247,11 @@ void RegisterAllocator::AllocateSpillSlotFor(LiveInterval* interval) { return; } + if (defined_by->IsCurrentMethod()) { + parent->SetSpillSlot(0); + return; + } + if (defined_by->IsConstant()) { // Constants don't need a spill slot. return; @@ -1519,7 +1525,10 @@ void RegisterAllocator::InsertMoveAfter(HInstruction* instruction, void RegisterAllocator::ConnectSiblings(LiveInterval* interval) { LiveInterval* current = interval; - if (current->HasSpillSlot() && current->HasRegister()) { + if (current->HasSpillSlot() + && current->HasRegister() + // Currently, we spill unconditionnally the current method in the code generators. + && !interval->GetDefinedBy()->IsCurrentMethod()) { // We spill eagerly, so move must be at definition. InsertMoveAfter(interval->GetDefinedBy(), interval->ToLocation(), @@ -1715,6 +1724,9 @@ void RegisterAllocator::Resolve() { } else if (current->HasSpillSlot()) { current->SetSpillSlot(current->GetSpillSlot() + codegen_->GetFrameSize()); } + } else if (instruction->IsCurrentMethod()) { + // The current method is always at offset 0. + DCHECK(!current->HasSpillSlot() || (current->GetSpillSlot() == 0)); } else if (current->HasSpillSlot()) { // Adjust the stack slot, now that we know the number of them for each type. // The way this implementation lays out the stack is the following: |