Add runtime reasons for deopt.
Currently to help investigate. Also:
1) Log when deoptimization happens (which method and what reason)
2) Trace when deoptimization happens (to make it visible in systrace)
bug:37655083
Test: test-art-host test-art-target
Change-Id: I0c2d87b40db09e8e475cf97a7c784a034c585e97
diff --git a/compiler/optimizing/bounds_check_elimination.cc b/compiler/optimizing/bounds_check_elimination.cc
index d38d5f8..ed630cd 100644
--- a/compiler/optimizing/bounds_check_elimination.cc
+++ b/compiler/optimizing/bounds_check_elimination.cc
@@ -1550,7 +1550,7 @@
HBasicBlock* block = GetPreHeader(loop, check);
HInstruction* cond =
new (GetGraph()->GetArena()) HEqual(array, GetGraph()->GetNullConstant());
- InsertDeoptInLoop(loop, block, cond);
+ InsertDeoptInLoop(loop, block, cond, /* is_null_check */ true);
ReplaceInstruction(check, array);
return true;
}
@@ -1616,11 +1616,16 @@
}
/** Inserts a deoptimization test in a loop preheader. */
- void InsertDeoptInLoop(HLoopInformation* loop, HBasicBlock* block, HInstruction* condition) {
+ void InsertDeoptInLoop(HLoopInformation* loop,
+ HBasicBlock* block,
+ HInstruction* condition,
+ bool is_null_check = false) {
HInstruction* suspend = loop->GetSuspendCheck();
block->InsertInstructionBefore(condition, block->GetLastInstruction());
+ DeoptimizationKind kind =
+ is_null_check ? DeoptimizationKind::kLoopNullBCE : DeoptimizationKind::kLoopBoundsBCE;
HDeoptimize* deoptimize = new (GetGraph()->GetArena()) HDeoptimize(
- GetGraph()->GetArena(), condition, HDeoptimize::Kind::kBCE, suspend->GetDexPc());
+ GetGraph()->GetArena(), condition, kind, suspend->GetDexPc());
block->InsertInstructionBefore(deoptimize, block->GetLastInstruction());
if (suspend->HasEnvironment()) {
deoptimize->CopyEnvironmentFromWithLoopPhiAdjustment(
@@ -1633,7 +1638,7 @@
HBasicBlock* block = bounds_check->GetBlock();
block->InsertInstructionBefore(condition, bounds_check);
HDeoptimize* deoptimize = new (GetGraph()->GetArena()) HDeoptimize(
- GetGraph()->GetArena(), condition, HDeoptimize::Kind::kBCE, bounds_check->GetDexPc());
+ GetGraph()->GetArena(), condition, DeoptimizationKind::kBlockBCE, bounds_check->GetDexPc());
block->InsertInstructionBefore(deoptimize, bounds_check);
deoptimize->CopyEnvironmentFrom(bounds_check->GetEnvironment());
}
diff --git a/compiler/optimizing/cha_guard_optimization.cc b/compiler/optimizing/cha_guard_optimization.cc
index 048073e..c806dbf 100644
--- a/compiler/optimizing/cha_guard_optimization.cc
+++ b/compiler/optimizing/cha_guard_optimization.cc
@@ -203,7 +203,7 @@
// Need a new deoptimize instruction that copies the environment
// of the suspend instruction for the loop.
HDeoptimize* deoptimize = new (GetGraph()->GetArena()) HDeoptimize(
- GetGraph()->GetArena(), compare, HDeoptimize::Kind::kInline, suspend->GetDexPc());
+ GetGraph()->GetArena(), compare, DeoptimizationKind::kCHA, suspend->GetDexPc());
pre_header->InsertInstructionBefore(deoptimize, pre_header->GetLastInstruction());
deoptimize->CopyEnvironmentFromWithLoopPhiAdjustment(
suspend->GetEnvironment(), loop_info->GetHeader());
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index ebd578c..d037b94 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -585,8 +585,13 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
+ LocationSummary* locations = instruction_->GetLocations();
+ SaveLiveRegisters(codegen, locations);
+ InvokeRuntimeCallingConvention calling_convention;
+ __ LoadImmediate(calling_convention.GetRegisterAt(0),
+ static_cast<uint32_t>(instruction_->AsDeoptimize()->GetDeoptimizationKind()));
arm_codegen->InvokeRuntime(kQuickDeoptimize, instruction_, instruction_->GetDexPc(), this);
- CheckEntrypointTypes<kQuickDeoptimize, void, void>();
+ CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
}
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM"; }
@@ -2672,7 +2677,10 @@
void LocationsBuilderARM::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
+ InvokeRuntimeCallingConvention calling_convention;
+ RegisterSet caller_saves = RegisterSet::Empty();
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetCustomSlowPathCallerSaves(caller_saves);
if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 78b627a..0c198b1 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -587,8 +587,13 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
__ Bind(GetEntryLabel());
+ LocationSummary* locations = instruction_->GetLocations();
+ SaveLiveRegisters(codegen, locations);
+ InvokeRuntimeCallingConvention calling_convention;
+ __ Mov(calling_convention.GetRegisterAt(0),
+ static_cast<uint32_t>(instruction_->AsDeoptimize()->GetDeoptimizationKind()));
arm64_codegen->InvokeRuntime(kQuickDeoptimize, instruction_, instruction_->GetDexPc(), this);
- CheckEntrypointTypes<kQuickDeoptimize, void, void>();
+ CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
}
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM64"; }
@@ -3693,7 +3698,10 @@
void LocationsBuilderARM64::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
+ InvokeRuntimeCallingConvention calling_convention;
+ RegisterSet caller_saves = RegisterSet::Empty();
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode()));
+ locations->SetCustomSlowPathCallerSaves(caller_saves);
if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index d65b327..b29f5a9 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -608,8 +608,14 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
__ Bind(GetEntryLabel());
+ LocationSummary* locations = instruction_->GetLocations();
+ SaveLiveRegisters(codegen, locations);
+ InvokeRuntimeCallingConventionARMVIXL calling_convention;
+ __ Mov(calling_convention.GetRegisterAt(0),
+ static_cast<uint32_t>(instruction_->AsDeoptimize()->GetDeoptimizationKind()));
+
arm_codegen->InvokeRuntime(kQuickDeoptimize, instruction_, instruction_->GetDexPc(), this);
- CheckEntrypointTypes<kQuickDeoptimize, void, void>();
+ CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
}
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARMVIXL"; }
@@ -2704,7 +2710,10 @@
void LocationsBuilderARMVIXL::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
+ InvokeRuntimeCallingConventionARMVIXL calling_convention;
+ RegisterSet caller_saves = RegisterSet::Empty();
+ caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(0)));
+ locations->SetCustomSlowPathCallerSaves(caller_saves);
if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index 357df97..3a7108b 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -493,8 +493,13 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
__ Bind(GetEntryLabel());
+ LocationSummary* locations = instruction_->GetLocations();
+ SaveLiveRegisters(codegen, locations);
+ InvokeRuntimeCallingConvention calling_convention;
+ __ LoadConst32(calling_convention.GetRegisterAt(0),
+ static_cast<uint32_t>(instruction_->AsDeoptimize()->GetDeoptimizationKind()));
mips_codegen->InvokeRuntime(kQuickDeoptimize, instruction_, instruction_->GetDexPc(), this);
- CheckEntrypointTypes<kQuickDeoptimize, void, void>();
+ CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
}
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS"; }
@@ -5199,7 +5204,10 @@
void LocationsBuilderMIPS::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
+ InvokeRuntimeCallingConvention calling_convention;
+ RegisterSet caller_saves = RegisterSet::Empty();
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetCustomSlowPathCallerSaves(caller_saves);
if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index a9c4964..f04e384 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -393,8 +393,13 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
__ Bind(GetEntryLabel());
+ LocationSummary* locations = instruction_->GetLocations();
+ SaveLiveRegisters(codegen, locations);
+ InvokeRuntimeCallingConvention calling_convention;
+ __ LoadConst32(calling_convention.GetRegisterAt(0),
+ static_cast<uint32_t>(instruction_->AsDeoptimize()->GetDeoptimizationKind()));
mips64_codegen->InvokeRuntime(kQuickDeoptimize, instruction_, instruction_->GetDexPc(), this);
- CheckEntrypointTypes<kQuickDeoptimize, void, void>();
+ CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
}
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS64"; }
@@ -3905,7 +3910,10 @@
void LocationsBuilderMIPS64::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
+ InvokeRuntimeCallingConvention calling_convention;
+ RegisterSet caller_saves = RegisterSet::Empty();
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetCustomSlowPathCallerSaves(caller_saves);
if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
locations->SetInAt(0, Location::RequiresRegister());
}
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 1e867dd..cf2d5cb 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -384,8 +384,14 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
+ LocationSummary* locations = instruction_->GetLocations();
+ SaveLiveRegisters(codegen, locations);
+ InvokeRuntimeCallingConvention calling_convention;
+ x86_codegen->Load32BitValue(
+ calling_convention.GetRegisterAt(0),
+ static_cast<uint32_t>(instruction_->AsDeoptimize()->GetDeoptimizationKind()));
x86_codegen->InvokeRuntime(kQuickDeoptimize, instruction_, instruction_->GetDexPc(), this);
- CheckEntrypointTypes<kQuickDeoptimize, void, void>();
+ CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
}
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86"; }
@@ -1688,7 +1694,10 @@
void LocationsBuilderX86::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
+ InvokeRuntimeCallingConvention calling_convention;
+ RegisterSet caller_saves = RegisterSet::Empty();
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetCustomSlowPathCallerSaves(caller_saves);
if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
locations->SetInAt(0, Location::Any());
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index f413739..f2ed52b 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -397,8 +397,14 @@
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
CodeGeneratorX86_64* x86_64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
+ LocationSummary* locations = instruction_->GetLocations();
+ SaveLiveRegisters(codegen, locations);
+ InvokeRuntimeCallingConvention calling_convention;
+ x86_64_codegen->Load32BitValue(
+ CpuRegister(calling_convention.GetRegisterAt(0)),
+ static_cast<uint32_t>(instruction_->AsDeoptimize()->GetDeoptimizationKind()));
x86_64_codegen->InvokeRuntime(kQuickDeoptimize, instruction_, instruction_->GetDexPc(), this);
- CheckEntrypointTypes<kQuickDeoptimize, void, void>();
+ CheckEntrypointTypes<kQuickDeoptimize, void, DeoptimizationKind>();
}
const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86_64"; }
@@ -1710,7 +1716,10 @@
void LocationsBuilderX86_64::VisitDeoptimize(HDeoptimize* deoptimize) {
LocationSummary* locations = new (GetGraph()->GetArena())
LocationSummary(deoptimize, LocationSummary::kCallOnSlowPath);
- locations->SetCustomSlowPathCallerSaves(RegisterSet::Empty()); // No caller-save registers.
+ InvokeRuntimeCallingConvention calling_convention;
+ RegisterSet caller_saves = RegisterSet::Empty();
+ caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
+ locations->SetCustomSlowPathCallerSaves(caller_saves);
if (IsBooleanValueOrMaterializedCondition(deoptimize->InputAt(0))) {
locations->SetInAt(0, Location::Any());
}
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 3b681c1..8674e72 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -783,7 +783,7 @@
HInstruction* compare = new (graph_->GetArena()) HNotEqual(
deopt_flag, graph_->GetIntConstant(0, dex_pc));
HInstruction* deopt = new (graph_->GetArena()) HDeoptimize(
- graph_->GetArena(), compare, HDeoptimize::Kind::kInline, dex_pc);
+ graph_->GetArena(), compare, DeoptimizationKind::kCHA, dex_pc);
if (cursor != nullptr) {
bb_cursor->InsertInstructionAfter(deopt_flag, cursor);
@@ -860,7 +860,9 @@
graph_->GetArena(),
compare,
receiver,
- HDeoptimize::Kind::kInline,
+ Runtime::Current()->IsAotCompiler()
+ ? DeoptimizationKind::kAotInlineCache
+ : DeoptimizationKind::kJitInlineCache,
invoke_instruction->GetDexPc());
bb_cursor->InsertInstructionAfter(deoptimize, compare);
deoptimize->CopyEnvironmentFrom(invoke_instruction->GetEnvironment());
@@ -1147,7 +1149,7 @@
graph_->GetArena(),
compare,
receiver,
- HDeoptimize::Kind::kInline,
+ DeoptimizationKind::kJitSameTarget,
invoke_instruction->GetDexPc());
bb_cursor->InsertInstructionAfter(deoptimize, compare);
deoptimize->CopyEnvironmentFrom(invoke_instruction->GetEnvironment());
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index f250c1a..a8bfe61 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -1422,18 +1422,6 @@
}
}
-std::ostream& operator<<(std::ostream& os, const HDeoptimize::Kind& rhs) {
- switch (rhs) {
- case HDeoptimize::Kind::kBCE:
- return os << "bce";
- case HDeoptimize::Kind::kInline:
- return os << "inline";
- default:
- LOG(FATAL) << "Unknown Deoptimization kind: " << static_cast<int>(rhs);
- UNREACHABLE();
- }
-}
-
bool HCondition::IsBeforeWhenDisregardMoves(HInstruction* instruction) const {
return this == instruction->GetPreviousDisregardingMoves();
}
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index e40361e..b4da20b 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -30,6 +30,7 @@
#include "base/transform_array_ref.h"
#include "dex_file.h"
#include "dex_file_types.h"
+#include "deoptimization_kind.h"
#include "entrypoints/quick/quick_entrypoints_enum.h"
#include "handle.h"
#include "handle_scope.h"
@@ -2991,15 +2992,9 @@
// Deoptimize to interpreter, upon checking a condition.
class HDeoptimize FINAL : public HVariableInputSizeInstruction {
public:
- enum class Kind {
- kBCE,
- kInline,
- kLast = kInline
- };
-
// Use this constructor when the `HDeoptimize` acts as a barrier, where no code can move
// across.
- HDeoptimize(ArenaAllocator* arena, HInstruction* cond, Kind kind, uint32_t dex_pc)
+ HDeoptimize(ArenaAllocator* arena, HInstruction* cond, DeoptimizationKind kind, uint32_t dex_pc)
: HVariableInputSizeInstruction(
SideEffects::All(),
dex_pc,
@@ -3019,7 +3014,7 @@
HDeoptimize(ArenaAllocator* arena,
HInstruction* cond,
HInstruction* guard,
- Kind kind,
+ DeoptimizationKind kind,
uint32_t dex_pc)
: HVariableInputSizeInstruction(
SideEffects::CanTriggerGC(),
@@ -3043,7 +3038,7 @@
bool CanThrow() const OVERRIDE { return true; }
- Kind GetKind() const { return GetPackedField<DeoptimizeKindField>(); }
+ DeoptimizationKind GetDeoptimizationKind() const { return GetPackedField<DeoptimizeKindField>(); }
Primitive::Type GetType() const OVERRIDE {
return GuardsAnInput() ? GuardedInput()->GetType() : Primitive::kPrimVoid;
@@ -3068,18 +3063,17 @@
static constexpr size_t kFieldCanBeMoved = kNumberOfGenericPackedBits;
static constexpr size_t kFieldDeoptimizeKind = kNumberOfGenericPackedBits + 1;
static constexpr size_t kFieldDeoptimizeKindSize =
- MinimumBitsToStore(static_cast<size_t>(Kind::kLast));
+ MinimumBitsToStore(static_cast<size_t>(DeoptimizationKind::kLast));
static constexpr size_t kNumberOfDeoptimizePackedBits =
kFieldDeoptimizeKind + kFieldDeoptimizeKindSize;
static_assert(kNumberOfDeoptimizePackedBits <= kMaxNumberOfPackedBits,
"Too many packed fields.");
- using DeoptimizeKindField = BitField<Kind, kFieldDeoptimizeKind, kFieldDeoptimizeKindSize>;
+ using DeoptimizeKindField =
+ BitField<DeoptimizationKind, kFieldDeoptimizeKind, kFieldDeoptimizeKindSize>;
DISALLOW_COPY_AND_ASSIGN(HDeoptimize);
};
-std::ostream& operator<<(std::ostream& os, const HDeoptimize::Kind& rhs);
-
// Represents a should_deoptimize flag. Currently used for CHA-based devirtualization.
// The compiled code checks this flag value in a guard before devirtualized call and
// if it's true, starts to do deoptimization.
diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc
index a1016d1..029eb4b 100644
--- a/compiler/optimizing/ssa_liveness_analysis_test.cc
+++ b/compiler/optimizing/ssa_liveness_analysis_test.cc
@@ -190,7 +190,7 @@
HInstruction* ae = new (&allocator_) HAboveOrEqual(index, length);
block->AddInstruction(ae);
HInstruction* deoptimize =
- new(&allocator_) HDeoptimize(&allocator_, ae, HDeoptimize::Kind::kBCE, /* dex_pc */ 0u);
+ new(&allocator_) HDeoptimize(&allocator_, ae, DeoptimizationKind::kBlockBCE, /* dex_pc */ 0u);
block->AddInstruction(deoptimize);
HEnvironment* deoptimize_env = new (&allocator_) HEnvironment(&allocator_,
/* number_of_vregs */ 5,