diff options
Diffstat (limited to 'compiler/optimizing')
-rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 66 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm64.h | 1 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm_vixl.cc | 63 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm_vixl.h | 1 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86.cc | 114 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86.h | 2 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86_64.cc | 108 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_x86_64.h | 1 | ||||
-rw-r--r-- | compiler/optimizing/inliner.cc | 5 | ||||
-rw-r--r-- | compiler/optimizing/instruction_builder.cc | 11 | ||||
-rw-r--r-- | compiler/optimizing/nodes.cc | 5 | ||||
-rw-r--r-- | compiler/optimizing/nodes.h | 38 |
12 files changed, 367 insertions, 48 deletions
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 74efc9ea8d..d455614cfd 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -822,6 +822,31 @@ class ReadBarrierForRootSlowPathARM64 : public SlowPathCodeARM64 { DISALLOW_COPY_AND_ASSIGN(ReadBarrierForRootSlowPathARM64); }; +class MethodEntryExitHooksSlowPathARM64 : public SlowPathCodeARM64 { + public: + explicit MethodEntryExitHooksSlowPathARM64(HInstruction* instruction) + : SlowPathCodeARM64(instruction) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + LocationSummary* locations = instruction_->GetLocations(); + QuickEntrypointEnum entry_point = + (instruction_->IsMethodEntryHook()) ? kQuickMethodEntryHook : kQuickMethodExitHook; + CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen); + __ Bind(GetEntryLabel()); + SaveLiveRegisters(codegen, locations); + arm64_codegen->InvokeRuntime(entry_point, instruction_, instruction_->GetDexPc(), this); + RestoreLiveRegisters(codegen, locations); + __ B(GetExitLabel()); + } + + const char* GetDescription() const override { + return "MethodEntryExitHooksSlowPath"; + } + + private: + DISALLOW_COPY_AND_ASSIGN(MethodEntryExitHooksSlowPathARM64); +}; + #undef __ Location InvokeDexCallingConventionVisitorARM64::GetNextLocation(DataType::Type type) { @@ -1113,6 +1138,47 @@ void ParallelMoveResolverARM64::EmitMove(size_t index) { codegen_->MoveLocation(move->GetDestination(), move->GetSource(), DataType::Type::kVoid); } +void LocationsBuilderARM64::VisitMethodExitHook(HMethodExitHook* method_hook) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) + LocationSummary(method_hook, LocationSummary::kCallOnSlowPath); + DataType::Type return_type = method_hook->InputAt(0)->GetType(); + locations->SetInAt(0, ARM64ReturnLocation(return_type)); +} + +void InstructionCodeGeneratorARM64::GenerateMethodEntryExitHook(HInstruction* instruction) { + MacroAssembler* masm = GetVIXLAssembler(); + UseScratchRegisterScope temps(masm); + Register temp = temps.AcquireX(); + Register value = temps.AcquireW(); + + SlowPathCodeARM64* slow_path = + new (codegen_->GetScopedAllocator()) MethodEntryExitHooksSlowPathARM64(instruction); + codegen_->AddSlowPath(slow_path); + + uint64_t address = reinterpret_cast64<uint64_t>(Runtime::Current()->GetInstrumentation()); + int offset = instrumentation::Instrumentation::NeedsEntryExitHooksOffset().Int32Value(); + __ Mov(temp, address + offset); + __ Ldrh(value, MemOperand(temp, 0)); + __ Cbnz(value, slow_path->GetEntryLabel()); + __ Bind(slow_path->GetExitLabel()); +} + +void InstructionCodeGeneratorARM64::VisitMethodExitHook(HMethodExitHook* instruction) { + DCHECK(codegen_->GetCompilerOptions().IsJitCompiler() && GetGraph()->IsDebuggable()); + DCHECK(codegen_->RequiresCurrentMethod()); + GenerateMethodEntryExitHook(instruction); +} + +void LocationsBuilderARM64::VisitMethodEntryHook(HMethodEntryHook* method_hook) { + new (GetGraph()->GetAllocator()) LocationSummary(method_hook, LocationSummary::kCallOnSlowPath); +} + +void InstructionCodeGeneratorARM64::VisitMethodEntryHook(HMethodEntryHook* instruction) { + DCHECK(codegen_->GetCompilerOptions().IsJitCompiler() && GetGraph()->IsDebuggable()); + DCHECK(codegen_->RequiresCurrentMethod()); + GenerateMethodEntryExitHook(instruction); +} + void CodeGeneratorARM64::MaybeIncrementHotness(bool is_frame_entry) { MacroAssembler* masm = GetVIXLAssembler(); if (GetCompilerOptions().CountHotnessInCompiledCode()) { diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h index d4546e5bd5..750151aa24 100644 --- a/compiler/optimizing/code_generator_arm64.h +++ b/compiler/optimizing/code_generator_arm64.h @@ -388,6 +388,7 @@ class InstructionCodeGeneratorARM64 : public InstructionCodeGenerator { void GenerateIntRemForConstDenom(HRem *instruction); void GenerateIntRemForPower2Denom(HRem *instruction); void HandleGoto(HInstruction* got, HBasicBlock* successor); + void GenerateMethodEntryExitHook(HInstruction* instruction); // Helpers to set up locations for vector memory operations. Returns the memory operand and, // if used, sets the output parameter scratch to a temporary register used in this operand, diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index 700202ba20..bf0c77da57 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -971,6 +971,31 @@ class ReadBarrierForRootSlowPathARMVIXL : public SlowPathCodeARMVIXL { DISALLOW_COPY_AND_ASSIGN(ReadBarrierForRootSlowPathARMVIXL); }; +class MethodEntryExitHooksSlowPathARMVIXL : public SlowPathCodeARMVIXL { + public: + explicit MethodEntryExitHooksSlowPathARMVIXL(HInstruction* instruction) + : SlowPathCodeARMVIXL(instruction) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + LocationSummary* locations = instruction_->GetLocations(); + CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen); + QuickEntrypointEnum entry_point = + (instruction_->IsMethodEntryHook()) ? kQuickMethodEntryHook : kQuickMethodExitHook; + __ Bind(GetEntryLabel()); + SaveLiveRegisters(codegen, locations); + arm_codegen->InvokeRuntime(entry_point, instruction_, instruction_->GetDexPc(), this); + RestoreLiveRegisters(codegen, locations); + __ B(GetExitLabel()); + } + + const char* GetDescription() const override { + return "MethodEntryExitHooksSlowPath"; + } + + private: + DISALLOW_COPY_AND_ASSIGN(MethodEntryExitHooksSlowPathARMVIXL); +}; + inline vixl32::Condition ARMCondition(IfCondition cond) { switch (cond) { case kCondEQ: return eq; @@ -2111,6 +2136,44 @@ void CodeGeneratorARMVIXL::ComputeSpillMask() { } } +void LocationsBuilderARMVIXL::VisitMethodExitHook(HMethodExitHook* method_hook) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) + LocationSummary(method_hook, LocationSummary::kCallOnSlowPath); + locations->SetInAt(0, parameter_visitor_.GetReturnLocation(method_hook->InputAt(0)->GetType())); +} + +void InstructionCodeGeneratorARMVIXL::GenerateMethodEntryExitHook(HInstruction* instruction) { + UseScratchRegisterScope temps(GetVIXLAssembler()); + vixl32::Register temp = temps.Acquire(); + + SlowPathCodeARMVIXL* slow_path = + new (codegen_->GetScopedAllocator()) MethodEntryExitHooksSlowPathARMVIXL(instruction); + codegen_->AddSlowPath(slow_path); + + int offset = instrumentation::Instrumentation::NeedsEntryExitHooksOffset().Int32Value(); + uint32_t address = reinterpret_cast32<uint32_t>(Runtime::Current()->GetInstrumentation()); + __ Mov(temp, address + offset); + __ Ldrh(temp, MemOperand(temp, 0)); + __ CompareAndBranchIfNonZero(temp, slow_path->GetEntryLabel()); + __ Bind(slow_path->GetExitLabel()); +} + +void InstructionCodeGeneratorARMVIXL::VisitMethodExitHook(HMethodExitHook* instruction) { + DCHECK(codegen_->GetCompilerOptions().IsJitCompiler() && GetGraph()->IsDebuggable()); + DCHECK(codegen_->RequiresCurrentMethod()); + GenerateMethodEntryExitHook(instruction); +} + +void LocationsBuilderARMVIXL::VisitMethodEntryHook(HMethodEntryHook* method_hook) { + new (GetGraph()->GetAllocator()) LocationSummary(method_hook, LocationSummary::kCallOnSlowPath); +} + +void InstructionCodeGeneratorARMVIXL::VisitMethodEntryHook(HMethodEntryHook* instruction) { + DCHECK(codegen_->GetCompilerOptions().IsJitCompiler() && GetGraph()->IsDebuggable()); + DCHECK(codegen_->RequiresCurrentMethod()); + GenerateMethodEntryExitHook(instruction); +} + void CodeGeneratorARMVIXL::MaybeIncrementHotness(bool is_frame_entry) { if (GetCompilerOptions().CountHotnessInCompiledCode()) { UseScratchRegisterScope temps(GetVIXLAssembler()); diff --git a/compiler/optimizing/code_generator_arm_vixl.h b/compiler/optimizing/code_generator_arm_vixl.h index b797c30a39..aa40755b29 100644 --- a/compiler/optimizing/code_generator_arm_vixl.h +++ b/compiler/optimizing/code_generator_arm_vixl.h @@ -431,6 +431,7 @@ class InstructionCodeGeneratorARMVIXL : public InstructionCodeGenerator { void GenerateDivRemWithAnyConstant(HBinaryOperation* instruction); void GenerateDivRemConstantIntegral(HBinaryOperation* instruction); void HandleGoto(HInstruction* got, HBasicBlock* successor); + void GenerateMethodEntryExitHook(HInstruction* instruction); vixl::aarch32::MemOperand VecAddress( HVecMemoryOperation* instruction, diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index c49b08ba69..a04b4129a8 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -942,6 +942,30 @@ class ReadBarrierForRootSlowPathX86 : public SlowPathCode { DISALLOW_COPY_AND_ASSIGN(ReadBarrierForRootSlowPathX86); }; +class MethodEntryExitHooksSlowPathX86 : public SlowPathCode { + public: + explicit MethodEntryExitHooksSlowPathX86(HInstruction* instruction) : SlowPathCode(instruction) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen); + LocationSummary* locations = instruction_->GetLocations(); + QuickEntrypointEnum entry_point = + (instruction_->IsMethodEntryHook()) ? kQuickMethodEntryHook : kQuickMethodExitHook; + __ Bind(GetEntryLabel()); + SaveLiveRegisters(codegen, locations); + x86_codegen->InvokeRuntime(entry_point, instruction_, instruction_->GetDexPc(), this); + RestoreLiveRegisters(codegen, locations); + __ jmp(GetExitLabel()); + } + + const char* GetDescription() const override { + return "MethodEntryExitHooksSlowPath"; + } + + private: + DISALLOW_COPY_AND_ASSIGN(MethodEntryExitHooksSlowPathX86); +}; + #undef __ // NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy. #define __ down_cast<X86Assembler*>(GetAssembler())-> // NOLINT @@ -1097,6 +1121,70 @@ static dwarf::Reg DWARFReg(Register reg) { return dwarf::Reg::X86Core(static_cast<int>(reg)); } +void SetInForReturnValue(HInstruction* ret, LocationSummary* locations) { + switch (ret->InputAt(0)->GetType()) { + case DataType::Type::kReference: + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + locations->SetInAt(0, Location::RegisterLocation(EAX)); + break; + + case DataType::Type::kInt64: + locations->SetInAt(0, Location::RegisterPairLocation(EAX, EDX)); + break; + + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::FpuRegisterLocation(XMM0)); + break; + + case DataType::Type::kVoid: + locations->SetInAt(0, Location::NoLocation()); + break; + + default: + LOG(FATAL) << "Unknown return type " << ret->InputAt(0)->GetType(); + } +} + +void LocationsBuilderX86::VisitMethodExitHook(HMethodExitHook* method_hook) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) + LocationSummary(method_hook, LocationSummary::kCallOnSlowPath); + SetInForReturnValue(method_hook, locations); +} + +void InstructionCodeGeneratorX86::GenerateMethodEntryExitHook(HInstruction* instruction) { + SlowPathCode* slow_path = + new (codegen_->GetScopedAllocator()) MethodEntryExitHooksSlowPathX86(instruction); + codegen_->AddSlowPath(slow_path); + + uint64_t address = reinterpret_cast64<uint64_t>(Runtime::Current()->GetInstrumentation()); + int offset = instrumentation::Instrumentation::NeedsEntryExitHooksOffset().Int32Value(); + __ cmpw(Address::Absolute(address + offset), Immediate(0)); + __ j(kEqual, slow_path->GetEntryLabel()); + __ Bind(slow_path->GetExitLabel()); +} + +void InstructionCodeGeneratorX86::VisitMethodExitHook(HMethodExitHook* instruction) { + DCHECK(codegen_->GetCompilerOptions().IsJitCompiler() && GetGraph()->IsDebuggable()); + DCHECK(codegen_->RequiresCurrentMethod()); + GenerateMethodEntryExitHook(instruction); +} + +void LocationsBuilderX86::VisitMethodEntryHook(HMethodEntryHook* method_hook) { + new (GetGraph()->GetAllocator()) LocationSummary(method_hook, LocationSummary::kCallOnSlowPath); +} + +void InstructionCodeGeneratorX86::VisitMethodEntryHook(HMethodEntryHook* instruction) { + DCHECK(codegen_->GetCompilerOptions().IsJitCompiler() && GetGraph()->IsDebuggable()); + DCHECK(codegen_->RequiresCurrentMethod()); + GenerateMethodEntryExitHook(instruction); +} + void CodeGeneratorX86::MaybeIncrementHotness(bool is_frame_entry) { if (GetCompilerOptions().CountHotnessInCompiledCode()) { Register reg = EAX; @@ -2408,31 +2496,7 @@ void InstructionCodeGeneratorX86::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_UNU void LocationsBuilderX86::VisitReturn(HReturn* ret) { LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(ret, LocationSummary::kNoCall); - switch (ret->InputAt(0)->GetType()) { - case DataType::Type::kReference: - case DataType::Type::kBool: - case DataType::Type::kUint8: - case DataType::Type::kInt8: - case DataType::Type::kUint16: - case DataType::Type::kInt16: - case DataType::Type::kInt32: - locations->SetInAt(0, Location::RegisterLocation(EAX)); - break; - - case DataType::Type::kInt64: - locations->SetInAt( - 0, Location::RegisterPairLocation(EAX, EDX)); - break; - - case DataType::Type::kFloat32: - case DataType::Type::kFloat64: - locations->SetInAt( - 0, Location::FpuRegisterLocation(XMM0)); - break; - - default: - LOG(FATAL) << "Unknown return type " << ret->InputAt(0)->GetType(); - } + SetInForReturnValue(ret, locations); } void InstructionCodeGeneratorX86::VisitReturn(HReturn* ret) { diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h index 94f010e598..75c5cebb5e 100644 --- a/compiler/optimizing/code_generator_x86.h +++ b/compiler/optimizing/code_generator_x86.h @@ -344,6 +344,8 @@ class InstructionCodeGeneratorX86 : public InstructionCodeGenerator { bool CpuHasAvxFeatureFlag(); bool CpuHasAvx2FeatureFlag(); + void GenerateMethodEntryExitHook(HInstruction* instruction); + X86Assembler* const assembler_; CodeGeneratorX86* const codegen_; diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index dae2ae2b84..4ec2dd7a27 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -965,6 +965,31 @@ class ReadBarrierForRootSlowPathX86_64 : public SlowPathCode { DISALLOW_COPY_AND_ASSIGN(ReadBarrierForRootSlowPathX86_64); }; +class MethodEntryExitHooksSlowPathX86_64 : public SlowPathCode { + public: + explicit MethodEntryExitHooksSlowPathX86_64(HInstruction* instruction) + : SlowPathCode(instruction) {} + + void EmitNativeCode(CodeGenerator* codegen) override { + CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen); + LocationSummary* locations = instruction_->GetLocations(); + QuickEntrypointEnum entry_point = + (instruction_->IsMethodEntryHook()) ? kQuickMethodEntryHook : kQuickMethodExitHook; + __ Bind(GetEntryLabel()); + SaveLiveRegisters(codegen, locations); + x86_64_codegen->InvokeRuntime(entry_point, instruction_, instruction_->GetDexPc(), this); + RestoreLiveRegisters(codegen, locations); + __ jmp(GetExitLabel()); + } + + const char* GetDescription() const override { + return "MethodEntryExitHooksSlowPath"; + } + + private: + DISALLOW_COPY_AND_ASSIGN(MethodEntryExitHooksSlowPathX86_64); +}; + #undef __ // NOLINT on __ macro to suppress wrong warning/fix (misc-macro-parentheses) from clang-tidy. #define __ down_cast<X86_64Assembler*>(GetAssembler())-> // NOLINT @@ -1494,6 +1519,68 @@ static dwarf::Reg DWARFReg(FloatRegister reg) { return dwarf::Reg::X86_64Fp(static_cast<int>(reg)); } +void LocationsBuilderX86_64::VisitMethodEntryHook(HMethodEntryHook* method_hook) { + new (GetGraph()->GetAllocator()) LocationSummary(method_hook, LocationSummary::kCallOnSlowPath); +} + +void InstructionCodeGeneratorX86_64::GenerateMethodEntryExitHook(HInstruction* instruction) { + SlowPathCode* slow_path = + new (codegen_->GetScopedAllocator()) MethodEntryExitHooksSlowPathX86_64(instruction); + codegen_->AddSlowPath(slow_path); + + uint64_t address = reinterpret_cast64<uint64_t>(Runtime::Current()->GetInstrumentation()); + int offset = instrumentation::Instrumentation::NeedsEntryExitHooksOffset().Int32Value(); + __ movq(CpuRegister(TMP), Immediate(address + offset)); + __ cmpw(Address(CpuRegister(TMP), 0), Immediate(0)); + __ j(kNotEqual, slow_path->GetEntryLabel()); + __ Bind(slow_path->GetExitLabel()); +} + +void InstructionCodeGeneratorX86_64::VisitMethodEntryHook(HMethodEntryHook* instruction) { + DCHECK(codegen_->GetCompilerOptions().IsJitCompiler() && GetGraph()->IsDebuggable()); + DCHECK(codegen_->RequiresCurrentMethod()); + GenerateMethodEntryExitHook(instruction); +} + +void SetInForReturnValue(HInstruction* instr, LocationSummary* locations) { + switch (instr->InputAt(0)->GetType()) { + case DataType::Type::kReference: + case DataType::Type::kBool: + case DataType::Type::kUint8: + case DataType::Type::kInt8: + case DataType::Type::kUint16: + case DataType::Type::kInt16: + case DataType::Type::kInt32: + case DataType::Type::kInt64: + locations->SetInAt(0, Location::RegisterLocation(RAX)); + break; + + case DataType::Type::kFloat32: + case DataType::Type::kFloat64: + locations->SetInAt(0, Location::FpuRegisterLocation(XMM0)); + break; + + case DataType::Type::kVoid: + locations->SetInAt(0, Location::NoLocation()); + break; + + default: + LOG(FATAL) << "Unexpected return type " << instr->InputAt(0)->GetType(); + } +} + +void LocationsBuilderX86_64::VisitMethodExitHook(HMethodExitHook* method_hook) { + LocationSummary* locations = new (GetGraph()->GetAllocator()) + LocationSummary(method_hook, LocationSummary::kCallOnSlowPath); + SetInForReturnValue(method_hook, locations); +} + +void InstructionCodeGeneratorX86_64::VisitMethodExitHook(HMethodExitHook* instruction) { + DCHECK(codegen_->GetCompilerOptions().IsJitCompiler() && GetGraph()->IsDebuggable()); + DCHECK(codegen_->RequiresCurrentMethod()); + GenerateMethodEntryExitHook(instruction); +} + void CodeGeneratorX86_64::MaybeIncrementHotness(bool is_frame_entry) { if (GetCompilerOptions().CountHotnessInCompiledCode()) { NearLabel overflow; @@ -2542,26 +2629,7 @@ void InstructionCodeGeneratorX86_64::VisitReturnVoid(HReturnVoid* ret ATTRIBUTE_ void LocationsBuilderX86_64::VisitReturn(HReturn* ret) { LocationSummary* locations = new (GetGraph()->GetAllocator()) LocationSummary(ret, LocationSummary::kNoCall); - switch (ret->InputAt(0)->GetType()) { - case DataType::Type::kReference: - case DataType::Type::kBool: - case DataType::Type::kUint8: - case DataType::Type::kInt8: - case DataType::Type::kUint16: - case DataType::Type::kInt16: - case DataType::Type::kInt32: - case DataType::Type::kInt64: - locations->SetInAt(0, Location::RegisterLocation(RAX)); - break; - - case DataType::Type::kFloat32: - case DataType::Type::kFloat64: - locations->SetInAt(0, Location::FpuRegisterLocation(XMM0)); - break; - - default: - LOG(FATAL) << "Unexpected return type " << ret->InputAt(0)->GetType(); - } + SetInForReturnValue(ret, locations); } void InstructionCodeGeneratorX86_64::VisitReturn(HReturn* ret) { diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h index 3e601bb97a..1115c8379d 100644 --- a/compiler/optimizing/code_generator_x86_64.h +++ b/compiler/optimizing/code_generator_x86_64.h @@ -276,6 +276,7 @@ class InstructionCodeGeneratorX86_64 : public InstructionCodeGenerator { void GenerateMinMaxInt(LocationSummary* locations, bool is_min, DataType::Type type); void GenerateMinMaxFP(LocationSummary* locations, bool is_min, DataType::Type type); void GenerateMinMax(HBinaryOperation* minmax, bool is_min); + void GenerateMethodEntryExitHook(HInstruction* instruction); // Generate a heap reference load using one register `out`: // diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc index 0e4f9ef0ed..17957d8b0f 100644 --- a/compiler/optimizing/inliner.cc +++ b/compiler/optimizing/inliner.cc @@ -812,6 +812,11 @@ void HInliner::AddCHAGuard(HInstruction* invoke_instruction, HBasicBlock* bb_cursor) { HShouldDeoptimizeFlag* deopt_flag = new (graph_->GetAllocator()) HShouldDeoptimizeFlag(graph_->GetAllocator(), dex_pc); + // ShouldDeoptimizeFlag is used to perform a deoptimization because of a CHA + // invalidation or for debugging reasons. It is OK to just check for non-zero + // value here instead of the specific CHA value. When a debugging deopt is + // requested we deoptimize before we execute any code and hence we shouldn't + // see that case here. HInstruction* compare = new (graph_->GetAllocator()) HNotEqual( deopt_flag, graph_->GetIntConstant(0, dex_pc)); HInstruction* deopt = new (graph_->GetAllocator()) HDeoptimize( diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc index 390a2bb0be..ed760f190d 100644 --- a/compiler/optimizing/instruction_builder.cc +++ b/compiler/optimizing/instruction_builder.cc @@ -372,6 +372,9 @@ bool HInstructionBuilder::Build() { if (current_block_->IsEntryBlock()) { InitializeParameters(); AppendInstruction(new (allocator_) HSuspendCheck(0u)); + if (graph_->IsDebuggable() && code_generator_->GetCompilerOptions().IsJitCompiler()) { + AppendInstruction(new (allocator_) HMethodEntryHook(0u)); + } AppendInstruction(new (allocator_) HGoto(0u)); continue; } else if (current_block_->IsExitBlock()) { @@ -822,10 +825,18 @@ void HInstructionBuilder::BuildReturn(const Instruction& instruction, compilation_stats_, MethodCompilationStat::kConstructorFenceGeneratedFinal); } + if (graph_->IsDebuggable() && code_generator_->GetCompilerOptions().IsJitCompiler()) { + // Return value is not used for void functions. We pass NullConstant to + // avoid special cases when generating code. + AppendInstruction(new (allocator_) HMethodExitHook(graph_->GetNullConstant(), dex_pc)); + } AppendInstruction(new (allocator_) HReturnVoid(dex_pc)); } else { DCHECK(!RequiresConstructorBarrier(dex_compilation_unit_)); HInstruction* value = LoadLocal(instruction.VRegA(), type); + if (graph_->IsDebuggable() && code_generator_->GetCompilerOptions().IsJitCompiler()) { + AppendInstruction(new (allocator_) HMethodExitHook(value, dex_pc)); + } AppendInstruction(new (allocator_) HReturn(value, dex_pc)); } current_block_ = nullptr; diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc index 17080f0056..24786931f2 100644 --- a/compiler/optimizing/nodes.cc +++ b/compiler/optimizing/nodes.cc @@ -2913,7 +2913,10 @@ HInstruction* HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) { } else if (current->IsCurrentMethod()) { replacement = outer_graph->GetCurrentMethod(); } else { - DCHECK(current->IsGoto() || current->IsSuspendCheck()); + // It is OK to ignore MethodEntryHook for inlined functions. + // In debug mode we don't inline and in release mode method + // tracing is best effort so OK to ignore them. + DCHECK(current->IsGoto() || current->IsSuspendCheck() || current->IsMethodEntryHook()); entry_block_->RemoveInstruction(current); } if (replacement != nullptr) { diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h index 6ef29bf93e..16e26dc7bc 100644 --- a/compiler/optimizing/nodes.h +++ b/compiler/optimizing/nodes.h @@ -21,6 +21,7 @@ #include <array> #include <type_traits> +#include "art_method.h" #include "base/arena_allocator.h" #include "base/arena_bit_vector.h" #include "base/arena_containers.h" @@ -32,7 +33,6 @@ #include "base/quasi_atomic.h" #include "base/stl_util.h" #include "base/transform_array_ref.h" -#include "art_method.h" #include "block_namer.h" #include "class_root.h" #include "compilation_kind.h" @@ -680,7 +680,7 @@ class HGraph : public ArenaObject<kArenaAllocGraph> { } bool HasShouldDeoptimizeFlag() const { - return number_of_cha_guards_ != 0; + return number_of_cha_guards_ != 0 || debuggable_; } bool HasTryCatch() const { return has_try_catch_; } @@ -1530,6 +1530,8 @@ class HLoopInformationOutwardIterator : public ValueObject { M(LongConstant, Constant) \ M(Max, Instruction) \ M(MemoryBarrier, Instruction) \ + M(MethodEntryHook, Instruction) \ + M(MethodExitHook, Instruction) \ M(Min, BinaryOperation) \ M(MonitorOperation, Instruction) \ M(Mul, BinaryOperation) \ @@ -2994,6 +2996,38 @@ class HExpression<0> : public HInstruction { friend class SsaBuilder; }; +class HMethodEntryHook : public HExpression<0> { + public: + explicit HMethodEntryHook(uint32_t dex_pc) + : HExpression(kMethodEntryHook, SideEffects::All(), dex_pc) {} + + bool NeedsEnvironment() const override { + return true; + } + + DECLARE_INSTRUCTION(MethodEntryHook); + + protected: + DEFAULT_COPY_CONSTRUCTOR(MethodEntryHook); +}; + +class HMethodExitHook : public HExpression<1> { + public: + HMethodExitHook(HInstruction* value, uint32_t dex_pc) + : HExpression(kMethodExitHook, SideEffects::All(), dex_pc) { + SetRawInputAt(0, value); + } + + bool NeedsEnvironment() const override { + return true; + } + + DECLARE_INSTRUCTION(MethodExitHook); + + protected: + DEFAULT_COPY_CONSTRUCTOR(MethodExitHook); +}; + // Represents dex's RETURN_VOID opcode. A HReturnVoid is a control flow // instruction that branches to the exit block. class HReturnVoid final : public HExpression<0> { |