summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/bounds_check_elimination.h5
-rw-r--r--compiler/optimizing/cha_guard_optimization.h5
-rw-r--r--compiler/optimizing/cloner_test.cc185
-rw-r--r--compiler/optimizing/code_generator.cc4
-rw-r--r--compiler/optimizing/code_generator_arm64.cc101
-rw-r--r--compiler/optimizing/code_generator_arm_vixl.cc86
-rw-r--r--compiler/optimizing/code_generator_mips.cc131
-rw-r--r--compiler/optimizing/code_generator_mips64.cc131
-rw-r--r--compiler/optimizing/code_generator_x86.cc17
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc15
-rw-r--r--compiler/optimizing/code_sinking.h6
-rw-r--r--compiler/optimizing/constant_folding.cc2
-rw-r--r--compiler/optimizing/constructor_fence_redundancy_elimination.h7
-rw-r--r--compiler/optimizing/data_type.h13
-rw-r--r--compiler/optimizing/induction_var_analysis.cc244
-rw-r--r--compiler/optimizing/induction_var_analysis.h15
-rw-r--r--compiler/optimizing/inliner.cc76
-rw-r--r--compiler/optimizing/inliner.h5
-rw-r--r--compiler/optimizing/instruction_builder.cc4
-rw-r--r--compiler/optimizing/instruction_simplifier.cc15
-rw-r--r--compiler/optimizing/instruction_simplifier_arm.cc4
-rw-r--r--compiler/optimizing/instruction_simplifier_arm64.cc4
-rw-r--r--compiler/optimizing/instruction_simplifier_mips.cc4
-rw-r--r--compiler/optimizing/instruction_simplifier_mips.h2
-rw-r--r--compiler/optimizing/intrinsics.cc61
-rw-r--r--compiler/optimizing/intrinsics.h12
-rw-r--r--compiler/optimizing/licm.h7
-rw-r--r--compiler/optimizing/load_store_analysis.h4
-rw-r--r--compiler/optimizing/load_store_elimination.h5
-rw-r--r--compiler/optimizing/loop_optimization.cc5
-rw-r--r--compiler/optimizing/loop_optimization.h3
-rw-r--r--compiler/optimizing/nodes.cc45
-rw-r--r--compiler/optimizing/nodes.h554
-rw-r--r--compiler/optimizing/nodes_mips.h13
-rw-r--r--compiler/optimizing/nodes_shared.h23
-rw-r--r--compiler/optimizing/nodes_vector.h127
-rw-r--r--compiler/optimizing/nodes_x86.h17
-rw-r--r--compiler/optimizing/optimization.cc312
-rw-r--r--compiler/optimizing/optimization.h79
-rw-r--r--compiler/optimizing/optimizing_compiler.cc706
-rw-r--r--compiler/optimizing/optimizing_compiler_stats.h136
-rw-r--r--compiler/optimizing/pc_relative_fixups_mips.h2
-rw-r--r--compiler/optimizing/scheduler.h9
-rw-r--r--compiler/optimizing/select_generator.cc5
-rw-r--r--compiler/optimizing/select_generator.h3
-rw-r--r--compiler/optimizing/sharpening.h5
46 files changed, 1882 insertions, 1332 deletions
diff --git a/compiler/optimizing/bounds_check_elimination.h b/compiler/optimizing/bounds_check_elimination.h
index 6dc53207ea..79c67a8c7a 100644
--- a/compiler/optimizing/bounds_check_elimination.h
+++ b/compiler/optimizing/bounds_check_elimination.h
@@ -28,8 +28,9 @@ class BoundsCheckElimination : public HOptimization {
public:
BoundsCheckElimination(HGraph* graph,
const SideEffectsAnalysis& side_effects,
- HInductionVarAnalysis* induction_analysis)
- : HOptimization(graph, kBoundsCheckEliminationPassName),
+ HInductionVarAnalysis* induction_analysis,
+ const char* name = kBoundsCheckEliminationPassName)
+ : HOptimization(graph, name),
side_effects_(side_effects),
induction_analysis_(induction_analysis) {}
diff --git a/compiler/optimizing/cha_guard_optimization.h b/compiler/optimizing/cha_guard_optimization.h
index ba0cdb81fd..f14e07bd6c 100644
--- a/compiler/optimizing/cha_guard_optimization.h
+++ b/compiler/optimizing/cha_guard_optimization.h
@@ -26,8 +26,9 @@ namespace art {
*/
class CHAGuardOptimization : public HOptimization {
public:
- explicit CHAGuardOptimization(HGraph* graph)
- : HOptimization(graph, kCHAGuardOptimizationPassName) {}
+ explicit CHAGuardOptimization(HGraph* graph,
+ const char* name = kCHAGuardOptimizationPassName)
+ : HOptimization(graph, name) {}
void Run() OVERRIDE;
diff --git a/compiler/optimizing/cloner_test.cc b/compiler/optimizing/cloner_test.cc
new file mode 100644
index 0000000000..d34dd81767
--- /dev/null
+++ b/compiler/optimizing/cloner_test.cc
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2017 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "graph_checker.h"
+#include "nodes.h"
+#include "optimizing_unit_test.h"
+
+#include "gtest/gtest.h"
+
+namespace art {
+
+// This class provides methods and helpers for testing various cloning and copying routines:
+// individual instruction cloning and cloning of the more coarse-grain structures.
+class ClonerTest : public OptimizingUnitTest {
+ public:
+ ClonerTest()
+ : graph_(CreateGraph()), entry_block_(nullptr), exit_block_(nullptr), parameter_(nullptr) {}
+
+ void CreateBasicLoopControlFlow(/* out */ HBasicBlock** header_p,
+ /* out */ HBasicBlock** body_p) {
+ entry_block_ = new (GetAllocator()) HBasicBlock(graph_);
+ graph_->AddBlock(entry_block_);
+ graph_->SetEntryBlock(entry_block_);
+
+ HBasicBlock* loop_preheader = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* loop_header = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* loop_body = new (GetAllocator()) HBasicBlock(graph_);
+ HBasicBlock* loop_exit = new (GetAllocator()) HBasicBlock(graph_);
+
+ graph_->AddBlock(loop_preheader);
+ graph_->AddBlock(loop_header);
+ graph_->AddBlock(loop_body);
+ graph_->AddBlock(loop_exit);
+
+ exit_block_ = new (GetAllocator()) HBasicBlock(graph_);
+ graph_->AddBlock(exit_block_);
+ graph_->SetExitBlock(exit_block_);
+
+ entry_block_->AddSuccessor(loop_preheader);
+ loop_preheader->AddSuccessor(loop_header);
+ // Loop exit first to have a proper exit condition/target for HIf.
+ loop_header->AddSuccessor(loop_exit);
+ loop_header->AddSuccessor(loop_body);
+ loop_body->AddSuccessor(loop_header);
+ loop_exit->AddSuccessor(exit_block_);
+
+ *header_p = loop_header;
+ *body_p = loop_body;
+
+ parameter_ = new (GetAllocator()) HParameterValue(graph_->GetDexFile(),
+ dex::TypeIndex(0),
+ 0,
+ DataType::Type::kInt32);
+ entry_block_->AddInstruction(parameter_);
+ loop_exit->AddInstruction(new (GetAllocator()) HReturnVoid());
+ exit_block_->AddInstruction(new (GetAllocator()) HExit());
+ }
+
+ void CreateBasicLoopDataFlow(HBasicBlock* loop_header, HBasicBlock* loop_body) {
+ uint32_t dex_pc = 0;
+
+ // Entry block.
+ HIntConstant* const_0 = graph_->GetIntConstant(0);
+ HIntConstant* const_1 = graph_->GetIntConstant(1);
+ HIntConstant* const_128 = graph_->GetIntConstant(128);
+
+ // Header block.
+ HPhi* phi = new (GetAllocator()) HPhi(GetAllocator(), 0, 0, DataType::Type::kInt32);
+ HInstruction* suspend_check = new (GetAllocator()) HSuspendCheck();
+
+ loop_header->AddPhi(phi);
+ loop_header->AddInstruction(suspend_check);
+ loop_header->AddInstruction(new (GetAllocator()) HGreaterThanOrEqual(phi, const_128));
+ loop_header->AddInstruction(new (GetAllocator()) HIf(parameter_));
+
+ // Loop body block.
+ HInstruction* null_check = new (GetAllocator()) HNullCheck(parameter_, dex_pc);
+ HInstruction* array_length = new (GetAllocator()) HArrayLength(null_check, dex_pc);
+ HInstruction* bounds_check = new (GetAllocator()) HBoundsCheck(phi, array_length, dex_pc);
+ HInstruction* array_get =
+ new (GetAllocator()) HArrayGet(null_check, bounds_check, DataType::Type::kInt32, dex_pc);
+ HInstruction* add = new (GetAllocator()) HAdd(DataType::Type::kInt32, array_get, const_1);
+ HInstruction* array_set =
+ new (GetAllocator()) HArraySet(null_check, bounds_check, add, DataType::Type::kInt32, dex_pc);
+ HInstruction* induction_inc = new (GetAllocator()) HAdd(DataType::Type::kInt32, phi, const_1);
+
+ loop_body->AddInstruction(null_check);
+ loop_body->AddInstruction(array_length);
+ loop_body->AddInstruction(bounds_check);
+ loop_body->AddInstruction(array_get);
+ loop_body->AddInstruction(add);
+ loop_body->AddInstruction(array_set);
+ loop_body->AddInstruction(induction_inc);
+ loop_body->AddInstruction(new (GetAllocator()) HGoto());
+
+ phi->AddInput(const_0);
+ phi->AddInput(induction_inc);
+
+ graph_->SetHasBoundsChecks(true);
+
+ // Adjust HEnvironment for each instruction which require that.
+ ArenaVector<HInstruction*> current_locals({phi, const_128, parameter_},
+ GetAllocator()->Adapter(kArenaAllocInstruction));
+
+ HEnvironment* env = ManuallyBuildEnvFor(suspend_check, &current_locals);
+ null_check->CopyEnvironmentFrom(env);
+ bounds_check->CopyEnvironmentFrom(env);
+ }
+
+ HEnvironment* ManuallyBuildEnvFor(HInstruction* instruction,
+ ArenaVector<HInstruction*>* current_locals) {
+ HEnvironment* environment = new (GetAllocator()) HEnvironment(
+ (GetAllocator()),
+ current_locals->size(),
+ graph_->GetArtMethod(),
+ instruction->GetDexPc(),
+ instruction);
+
+ environment->CopyFrom(ArrayRef<HInstruction* const>(*current_locals));
+ instruction->SetRawEnvironment(environment);
+ return environment;
+ }
+
+ bool CheckGraph() {
+ GraphChecker checker(graph_);
+ checker.Run();
+ if (!checker.IsValid()) {
+ for (const std::string& error : checker.GetErrors()) {
+ std::cout << error << std::endl;
+ }
+ return false;
+ }
+ return true;
+ }
+
+ HGraph* graph_;
+
+ HBasicBlock* entry_block_;
+ HBasicBlock* exit_block_;
+
+ HInstruction* parameter_;
+};
+
+TEST_F(ClonerTest, IndividualInstrCloner) {
+ HBasicBlock* header = nullptr;
+ HBasicBlock* loop_body = nullptr;
+
+ CreateBasicLoopControlFlow(&header, &loop_body);
+ CreateBasicLoopDataFlow(header, loop_body);
+ graph_->BuildDominatorTree();
+ ASSERT_TRUE(CheckGraph());
+
+ HSuspendCheck* old_suspend_check = header->GetLoopInformation()->GetSuspendCheck();
+ CloneAndReplaceInstructionVisitor visitor(graph_);
+ // Do instruction cloning and replacement twice with different visiting order.
+
+ visitor.VisitInsertionOrder();
+ size_t instr_replaced_by_clones_count = visitor.GetInstrReplacedByClonesCount();
+ EXPECT_EQ(instr_replaced_by_clones_count, 12u);
+ EXPECT_TRUE(CheckGraph());
+
+ visitor.VisitReversePostOrder();
+ instr_replaced_by_clones_count = visitor.GetInstrReplacedByClonesCount();
+ EXPECT_EQ(instr_replaced_by_clones_count, 24u);
+ EXPECT_TRUE(CheckGraph());
+
+ HSuspendCheck* new_suspend_check = header->GetLoopInformation()->GetSuspendCheck();
+ EXPECT_NE(new_suspend_check, old_suspend_check);
+ EXPECT_NE(new_suspend_check, nullptr);
+}
+
+} // namespace art
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 0bd3ce937a..aff6f9f64f 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1411,10 +1411,10 @@ LocationSummary* CodeGenerator::CreateThrowingSlowPathLocations(HInstruction* in
void CodeGenerator::GenerateNullCheck(HNullCheck* instruction) {
if (compiler_options_.GetImplicitNullChecks()) {
- MaybeRecordStat(stats_, kImplicitNullCheckGenerated);
+ MaybeRecordStat(stats_, MethodCompilationStat::kImplicitNullCheckGenerated);
GenerateImplicitNullCheck(instruction);
} else {
- MaybeRecordStat(stats_, kExplicitNullCheckGenerated);
+ MaybeRecordStat(stats_, MethodCompilationStat::kExplicitNullCheckGenerated);
GenerateExplicitNullCheck(instruction);
}
}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index a0cb43ee01..5054a299d3 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -311,40 +311,23 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
LoadClassSlowPathARM64(HLoadClass* cls,
HInstruction* at,
uint32_t dex_pc,
- bool do_clinit,
- vixl::aarch64::Register bss_entry_temp = vixl::aarch64::Register(),
- vixl::aarch64::Label* bss_entry_adrp_label = nullptr)
+ bool do_clinit)
: SlowPathCodeARM64(at),
cls_(cls),
dex_pc_(dex_pc),
- do_clinit_(do_clinit),
- bss_entry_temp_(bss_entry_temp),
- bss_entry_adrp_label_(bss_entry_adrp_label) {
+ do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
- constexpr bool call_saves_everything_except_r0_ip0 = (!kUseReadBarrier || kUseBakerReadBarrier);
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
- InvokeRuntimeCallingConvention calling_convention;
- // For HLoadClass/kBssEntry/kSaveEverything, the page address of the entry is in a temp
- // register, make sure it's not clobbered by the call or by saving/restoring registers.
- DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
- bool is_load_class_bss_entry =
- (cls_ == instruction_) && (cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry);
- if (is_load_class_bss_entry) {
- DCHECK(bss_entry_temp_.IsValid());
- DCHECK(!bss_entry_temp_.Is(calling_convention.GetRegisterAt(0)));
- DCHECK(
- !UseScratchRegisterScope(arm64_codegen->GetVIXLAssembler()).IsAvailable(bss_entry_temp_));
- }
-
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
+ InvokeRuntimeCallingConvention calling_convention;
dex::TypeIndex type_index = cls_->GetTypeIndex();
__ Mov(calling_convention.GetRegisterAt(0).W(), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
@@ -363,26 +346,6 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
arm64_codegen->MoveLocation(out, calling_convention.GetReturnLocation(type), type);
}
RestoreLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
- if (is_load_class_bss_entry) {
- DCHECK(out.IsValid());
- const DexFile& dex_file = cls_->GetDexFile();
- if (call_saves_everything_except_r0_ip0) {
- // The class entry page address was preserved in bss_entry_temp_ thanks to kSaveEverything.
- } else {
- // For non-Baker read barrier, we need to re-calculate the address of the class entry page.
- bss_entry_adrp_label_ = arm64_codegen->NewBssEntryTypePatch(dex_file, type_index);
- arm64_codegen->EmitAdrpPlaceholder(bss_entry_adrp_label_, bss_entry_temp_);
- }
- vixl::aarch64::Label* strp_label =
- arm64_codegen->NewBssEntryTypePatch(dex_file, type_index, bss_entry_adrp_label_);
- {
- SingleEmissionCheckScope guard(arm64_codegen->GetVIXLAssembler());
- __ Bind(strp_label);
- __ str(RegisterFrom(locations->Out(), DataType::Type::kReference),
- MemOperand(bss_entry_temp_, /* offset placeholder */ 0));
- }
- }
__ B(GetExitLabel());
}
@@ -398,34 +361,23 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 {
// Whether to initialize the class.
const bool do_clinit_;
- // For HLoadClass/kBssEntry, the temp register and the label of the ADRP where it was loaded.
- vixl::aarch64::Register bss_entry_temp_;
- vixl::aarch64::Label* bss_entry_adrp_label_;
-
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM64);
};
class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
public:
- LoadStringSlowPathARM64(HLoadString* instruction, Register temp, vixl::aarch64::Label* adrp_label)
- : SlowPathCodeARM64(instruction),
- temp_(temp),
- adrp_label_(adrp_label) {}
+ explicit LoadStringSlowPathARM64(HLoadString* instruction)
+ : SlowPathCodeARM64(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
CodeGeneratorARM64* arm64_codegen = down_cast<CodeGeneratorARM64*>(codegen);
- InvokeRuntimeCallingConvention calling_convention;
- // Make sure `temp_` is not clobbered by the call or by saving/restoring registers.
- DCHECK(temp_.IsValid());
- DCHECK(!temp_.Is(calling_convention.GetRegisterAt(0)));
- DCHECK(!UseScratchRegisterScope(arm64_codegen->GetVIXLAssembler()).IsAvailable(temp_));
-
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
+ InvokeRuntimeCallingConvention calling_convention;
const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
__ Mov(calling_convention.GetRegisterAt(0).W(), string_index.index_);
arm64_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
@@ -435,33 +387,12 @@ class LoadStringSlowPathARM64 : public SlowPathCodeARM64 {
RestoreLiveRegisters(codegen, locations);
- // Store the resolved String to the BSS entry.
- const DexFile& dex_file = instruction_->AsLoadString()->GetDexFile();
- if (!kUseReadBarrier || kUseBakerReadBarrier) {
- // The string entry page address was preserved in temp_ thanks to kSaveEverything.
- } else {
- // For non-Baker read barrier, we need to re-calculate the address of the string entry page.
- adrp_label_ = arm64_codegen->NewStringBssEntryPatch(dex_file, string_index);
- arm64_codegen->EmitAdrpPlaceholder(adrp_label_, temp_);
- }
- vixl::aarch64::Label* strp_label =
- arm64_codegen->NewStringBssEntryPatch(dex_file, string_index, adrp_label_);
- {
- SingleEmissionCheckScope guard(arm64_codegen->GetVIXLAssembler());
- __ Bind(strp_label);
- __ str(RegisterFrom(locations->Out(), DataType::Type::kReference),
- MemOperand(temp_, /* offset placeholder */ 0));
- }
-
__ B(GetExitLabel());
}
const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM64"; }
private:
- const Register temp_;
- vixl::aarch64::Label* adrp_label_;
-
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64);
};
@@ -4883,7 +4814,6 @@ void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
if (cls->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the type resolution or initialization and marking to save everything we need.
- locations->AddTemp(FixedTempLocation());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode()));
@@ -4910,8 +4840,6 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
Location out_loc = cls->GetLocations()->Out();
Register out = OutputRegister(cls);
- Register bss_entry_temp;
- vixl::aarch64::Label* bss_entry_adrp_label = nullptr;
const ReadBarrierOption read_barrier_option = cls->IsInBootImage()
? kWithoutReadBarrier
@@ -4975,16 +4903,16 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
// Add ADRP with its PC-relative Class .bss entry patch.
const DexFile& dex_file = cls->GetDexFile();
dex::TypeIndex type_index = cls->GetTypeIndex();
- bss_entry_temp = XRegisterFrom(cls->GetLocations()->GetTemp(0));
- bss_entry_adrp_label = codegen_->NewBssEntryTypePatch(dex_file, type_index);
- codegen_->EmitAdrpPlaceholder(bss_entry_adrp_label, bss_entry_temp);
+ vixl::aarch64::Register temp = XRegisterFrom(out_loc);
+ vixl::aarch64::Label* adrp_label = codegen_->NewBssEntryTypePatch(dex_file, type_index);
+ codegen_->EmitAdrpPlaceholder(adrp_label, temp);
// Add LDR with its PC-relative Class patch.
vixl::aarch64::Label* ldr_label =
- codegen_->NewBssEntryTypePatch(dex_file, type_index, bss_entry_adrp_label);
+ codegen_->NewBssEntryTypePatch(dex_file, type_index, adrp_label);
// /* GcRoot<mirror::Class> */ out = *(base_address + offset) /* PC-relative */
GenerateGcRootFieldLoad(cls,
out_loc,
- bss_entry_temp,
+ temp,
/* offset placeholder */ 0u,
ldr_label,
read_barrier_option);
@@ -5013,7 +4941,7 @@ void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) NO_THREAD_SA
if (generate_null_check || do_clinit) {
DCHECK(cls->CanCallRuntime());
SlowPathCodeARM64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathARM64(
- cls, cls, cls->GetDexPc(), do_clinit, bss_entry_temp, bss_entry_adrp_label);
+ cls, cls, cls->GetDexPc(), do_clinit);
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
__ Cbz(out, slow_path->GetEntryLabel());
@@ -5078,7 +5006,6 @@ void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
if (load->GetLoadKind() == HLoadString::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the pResolveString and marking to save everything we need.
- locations->AddTemp(FixedTempLocation());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0).GetCode()));
@@ -5138,7 +5065,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD
const DexFile& dex_file = load->GetDexFile();
const dex::StringIndex string_index = load->GetStringIndex();
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
- Register temp = XRegisterFrom(load->GetLocations()->GetTemp(0));
+ Register temp = XRegisterFrom(out_loc);
vixl::aarch64::Label* adrp_label = codegen_->NewStringBssEntryPatch(dex_file, string_index);
codegen_->EmitAdrpPlaceholder(adrp_label, temp);
// Add LDR with its .bss entry String patch.
@@ -5152,7 +5079,7 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) NO_THREAD
ldr_label,
kCompilerReadBarrierOption);
SlowPathCodeARM64* slow_path =
- new (codegen_->GetScopedAllocator()) LoadStringSlowPathARM64(load, temp, adrp_label);
+ new (codegen_->GetScopedAllocator()) LoadStringSlowPathARM64(load);
codegen_->AddSlowPath(slow_path);
__ Cbz(out.X(), slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 9e7455d488..3f8f0c44f3 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -532,29 +532,12 @@ class LoadClassSlowPathARMVIXL : public SlowPathCodeARMVIXL {
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
- constexpr bool call_saves_everything_except_r0 = (!kUseReadBarrier || kUseBakerReadBarrier);
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
- // For HLoadClass/kBssEntry/kSaveEverything, make sure we preserve the address of the entry.
- DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
- bool is_load_class_bss_entry =
- (cls_ == instruction_) && (cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry);
- vixl32::Register entry_address;
- if (is_load_class_bss_entry && call_saves_everything_except_r0) {
- vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
- // In the unlucky case that the `temp` is R0, we preserve the address in `out` across
- // the kSaveEverything call.
- bool temp_is_r0 = temp.Is(calling_convention.GetRegisterAt(0));
- entry_address = temp_is_r0 ? RegisterFrom(out) : temp;
- DCHECK(!entry_address.Is(calling_convention.GetRegisterAt(0)));
- if (temp_is_r0) {
- __ Mov(entry_address, temp);
- }
- }
dex::TypeIndex type_index = cls_->GetTypeIndex();
__ Mov(calling_convention.GetRegisterAt(0), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
@@ -566,22 +549,6 @@ class LoadClassSlowPathARMVIXL : public SlowPathCodeARMVIXL {
CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
}
- // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
- if (is_load_class_bss_entry) {
- if (call_saves_everything_except_r0) {
- // The class entry address was preserved in `entry_address` thanks to kSaveEverything.
- __ Str(r0, MemOperand(entry_address));
- } else {
- // For non-Baker read barrier, we need to re-calculate the address of the string entry.
- UseScratchRegisterScope temps(
- down_cast<CodeGeneratorARMVIXL*>(codegen)->GetVIXLAssembler());
- vixl32::Register temp = temps.Acquire();
- CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
- arm_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
- arm_codegen->EmitMovwMovtPlaceholder(labels, temp);
- __ Str(r0, MemOperand(temp));
- }
- }
// Move the class to the desired location.
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
@@ -616,48 +583,17 @@ class LoadStringSlowPathARMVIXL : public SlowPathCodeARMVIXL {
DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
- HLoadString* load = instruction_->AsLoadString();
- const dex::StringIndex string_index = load->GetStringIndex();
- vixl32::Register out = OutputRegister(load);
- constexpr bool call_saves_everything_except_r0 = (!kUseReadBarrier || kUseBakerReadBarrier);
+ const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
CodeGeneratorARMVIXL* arm_codegen = down_cast<CodeGeneratorARMVIXL*>(codegen);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
InvokeRuntimeCallingConventionARMVIXL calling_convention;
- // In the unlucky case that the `temp` is R0, we preserve the address in `out` across
- // the kSaveEverything call.
- vixl32::Register entry_address;
- if (call_saves_everything_except_r0) {
- vixl32::Register temp = RegisterFrom(locations->GetTemp(0));
- bool temp_is_r0 = (temp.Is(calling_convention.GetRegisterAt(0)));
- entry_address = temp_is_r0 ? out : temp;
- DCHECK(!entry_address.Is(calling_convention.GetRegisterAt(0)));
- if (temp_is_r0) {
- __ Mov(entry_address, temp);
- }
- }
-
__ Mov(calling_convention.GetRegisterAt(0), string_index.index_);
arm_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
- // Store the resolved String to the .bss entry.
- if (call_saves_everything_except_r0) {
- // The string entry address was preserved in `entry_address` thanks to kSaveEverything.
- __ Str(r0, MemOperand(entry_address));
- } else {
- // For non-Baker read barrier, we need to re-calculate the address of the string entry.
- UseScratchRegisterScope temps(
- down_cast<CodeGeneratorARMVIXL*>(codegen)->GetVIXLAssembler());
- vixl32::Register temp = temps.Acquire();
- CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
- arm_codegen->NewStringBssEntryPatch(load->GetDexFile(), string_index);
- arm_codegen->EmitMovwMovtPlaceholder(labels, temp);
- __ Str(r0, MemOperand(temp));
- }
-
arm_codegen->Move32(locations->Out(), LocationFrom(r0));
RestoreLiveRegisters(codegen, locations);
@@ -7104,9 +7040,6 @@ void LocationsBuilderARMVIXL::VisitLoadClass(HLoadClass* cls) {
if (load_kind == HLoadClass::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the type resolution or initialization and marking to save everything we need.
- // Note that IP may be clobbered by saving/restoring the live register (only one thanks
- // to the custom calling convention) or by marking, so we request a different temp.
- locations->AddTemp(Location::RequiresRegister());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConventionARMVIXL calling_convention;
caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(0)));
@@ -7189,13 +7122,10 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadClass(HLoadClass* cls) NO_THREAD_
break;
}
case HLoadClass::LoadKind::kBssEntry: {
- vixl32::Register temp = (!kUseReadBarrier || kUseBakerReadBarrier)
- ? RegisterFrom(locations->GetTemp(0))
- : out;
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
- codegen_->EmitMovwMovtPlaceholder(labels, temp);
- GenerateGcRootFieldLoad(cls, out_loc, temp, /* offset */ 0, read_barrier_option);
+ codegen_->EmitMovwMovtPlaceholder(labels, out);
+ GenerateGcRootFieldLoad(cls, out_loc, out, /* offset */ 0, read_barrier_option);
generate_null_check = true;
break;
}
@@ -7296,9 +7226,6 @@ void LocationsBuilderARMVIXL::VisitLoadString(HLoadString* load) {
if (load_kind == HLoadString::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the pResolveString and marking to save everything we need, including temps.
- // Note that IP may be clobbered by saving/restoring the live register (only one thanks
- // to the custom calling convention) or by marking, so we request a different temp.
- locations->AddTemp(Location::RequiresRegister());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConventionARMVIXL calling_convention;
caller_saves.Add(LocationFrom(calling_convention.GetRegisterAt(0)));
@@ -7348,13 +7275,10 @@ void InstructionCodeGeneratorARMVIXL::VisitLoadString(HLoadString* load) NO_THRE
}
case HLoadString::LoadKind::kBssEntry: {
DCHECK(!codegen_->GetCompilerOptions().IsBootImage());
- vixl32::Register temp = (!kUseReadBarrier || kUseBakerReadBarrier)
- ? RegisterFrom(locations->GetTemp(0))
- : out;
CodeGeneratorARMVIXL::PcRelativePatchInfo* labels =
codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex());
- codegen_->EmitMovwMovtPlaceholder(labels, temp);
- GenerateGcRootFieldLoad(load, out_loc, temp, /* offset */ 0, kCompilerReadBarrierOption);
+ codegen_->EmitMovwMovtPlaceholder(labels, out);
+ GenerateGcRootFieldLoad(load, out_loc, out, /* offset */ 0, kCompilerReadBarrierOption);
LoadStringSlowPathARMVIXL* slow_path =
new (codegen_->GetScopedAllocator()) LoadStringSlowPathARMVIXL(load);
codegen_->AddSlowPath(slow_path);
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index ddec0cc453..d6922d2f3f 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -220,13 +220,11 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
LoadClassSlowPathMIPS(HLoadClass* cls,
HInstruction* at,
uint32_t dex_pc,
- bool do_clinit,
- const CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high = nullptr)
+ bool do_clinit)
: SlowPathCodeMIPS(at),
cls_(cls),
dex_pc_(dex_pc),
- do_clinit_(do_clinit),
- bss_info_high_(bss_info_high) {
+ do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
@@ -234,28 +232,11 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
- const bool baker_or_no_read_barriers = (!kUseReadBarrier || kUseBakerReadBarrier);
InvokeRuntimeCallingConvention calling_convention;
DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
- const bool is_load_class_bss_entry =
- (cls_ == instruction_) && (cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry/kSaveEverything, make sure we preserve the address of the entry.
- Register entry_address = kNoRegister;
- if (is_load_class_bss_entry && baker_or_no_read_barriers) {
- Register temp = locations->GetTemp(0).AsRegister<Register>();
- bool temp_is_a0 = (temp == calling_convention.GetRegisterAt(0));
- // In the unlucky case that `temp` is A0, we preserve the address in `out` across the
- // kSaveEverything call.
- entry_address = temp_is_a0 ? out.AsRegister<Register>() : temp;
- DCHECK_NE(entry_address, calling_convention.GetRegisterAt(0));
- if (temp_is_a0) {
- __ Move(entry_address, temp);
- }
- }
-
dex::TypeIndex type_index = cls_->GetTypeIndex();
__ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
@@ -267,18 +248,6 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
}
- // For HLoadClass/kBssEntry, store the resolved class to the BSS entry.
- if (is_load_class_bss_entry && baker_or_no_read_barriers) {
- // The class entry address was preserved in `entry_address` thanks to kSaveEverything.
- DCHECK(bss_info_high_);
- CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
- mips_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index, bss_info_high_);
- __ Sw(calling_convention.GetRegisterAt(0),
- entry_address,
- /* placeholder */ 0x5678,
- &info_low->label);
- }
-
// Move the class to the desired location.
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
@@ -289,21 +258,6 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
}
RestoreLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry, store the resolved class to the BSS entry.
- if (is_load_class_bss_entry && !baker_or_no_read_barriers) {
- // For non-Baker read barriers we need to re-calculate the address of
- // the class entry.
- const bool isR6 = mips_codegen->GetInstructionSetFeatures().IsR6();
- const bool has_irreducible_loops = codegen->GetGraph()->HasIrreducibleLoops();
- Register base =
- (isR6 || has_irreducible_loops) ? ZERO : locations->InAt(0).AsRegister<Register>();
- CodeGeneratorMIPS::PcRelativePatchInfo* info_high =
- mips_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
- CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
- mips_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index, info_high);
- mips_codegen->EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base);
- __ Sw(out.AsRegister<Register>(), TMP, /* placeholder */ 0x5678, &info_low->label);
- }
__ B(GetExitLabel());
}
@@ -319,92 +273,41 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS {
// Whether to initialize the class.
const bool do_clinit_;
- // Pointer to the high half PC-relative patch info for HLoadClass/kBssEntry.
- const CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high_;
-
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS);
};
class LoadStringSlowPathMIPS : public SlowPathCodeMIPS {
public:
- explicit LoadStringSlowPathMIPS(HLoadString* instruction,
- const CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high)
- : SlowPathCodeMIPS(instruction), bss_info_high_(bss_info_high) {}
+ explicit LoadStringSlowPathMIPS(HLoadString* instruction)
+ : SlowPathCodeMIPS(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
DCHECK(instruction_->IsLoadString());
DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
- HLoadString* load = instruction_->AsLoadString();
- const dex::StringIndex string_index = load->GetStringIndex();
- Register out = locations->Out().AsRegister<Register>();
+ const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
CodeGeneratorMIPS* mips_codegen = down_cast<CodeGeneratorMIPS*>(codegen);
- const bool baker_or_no_read_barriers = (!kUseReadBarrier || kUseBakerReadBarrier);
InvokeRuntimeCallingConvention calling_convention;
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
- // For HLoadString/kBssEntry/kSaveEverything, make sure we preserve the address of the entry.
- Register entry_address = kNoRegister;
- if (baker_or_no_read_barriers) {
- Register temp = locations->GetTemp(0).AsRegister<Register>();
- bool temp_is_a0 = (temp == calling_convention.GetRegisterAt(0));
- // In the unlucky case that `temp` is A0, we preserve the address in `out` across the
- // kSaveEverything call.
- entry_address = temp_is_a0 ? out : temp;
- DCHECK_NE(entry_address, calling_convention.GetRegisterAt(0));
- if (temp_is_a0) {
- __ Move(entry_address, temp);
- }
- }
-
__ LoadConst32(calling_convention.GetRegisterAt(0), string_index.index_);
mips_codegen->InvokeRuntime(kQuickResolveString, instruction_, instruction_->GetDexPc(), this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
- // Store the resolved string to the BSS entry.
- if (baker_or_no_read_barriers) {
- // The string entry address was preserved in `entry_address` thanks to kSaveEverything.
- DCHECK(bss_info_high_);
- CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
- mips_codegen->NewStringBssEntryPatch(load->GetDexFile(), string_index, bss_info_high_);
- __ Sw(calling_convention.GetRegisterAt(0),
- entry_address,
- /* placeholder */ 0x5678,
- &info_low->label);
- }
-
DataType::Type type = instruction_->GetType();
mips_codegen->MoveLocation(locations->Out(),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
type);
RestoreLiveRegisters(codegen, locations);
- // Store the resolved string to the BSS entry.
- if (!baker_or_no_read_barriers) {
- // For non-Baker read barriers we need to re-calculate the address of
- // the string entry.
- const bool isR6 = mips_codegen->GetInstructionSetFeatures().IsR6();
- const bool has_irreducible_loops = codegen->GetGraph()->HasIrreducibleLoops();
- Register base =
- (isR6 || has_irreducible_loops) ? ZERO : locations->InAt(0).AsRegister<Register>();
- CodeGeneratorMIPS::PcRelativePatchInfo* info_high =
- mips_codegen->NewStringBssEntryPatch(load->GetDexFile(), string_index);
- CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
- mips_codegen->NewStringBssEntryPatch(load->GetDexFile(), string_index, info_high);
- mips_codegen->EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, base);
- __ Sw(out, TMP, /* placeholder */ 0x5678, &info_low->label);
- }
__ B(GetExitLabel());
}
const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS"; }
private:
- // Pointer to the high half PC-relative patch info.
- const CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high_;
-
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS);
};
@@ -7736,8 +7639,6 @@ void LocationsBuilderMIPS::VisitLoadClass(HLoadClass* cls) {
if (load_kind == HLoadClass::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the type resolution or initialization and marking to save everything we need.
- // Request a temp to hold the BSS entry location for the slow path.
- locations->AddTemp(Location::RequiresRegister());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -7786,7 +7687,6 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
? kWithoutReadBarrier
: kCompilerReadBarrierOption;
bool generate_null_check = false;
- CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high = nullptr;
switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass: {
DCHECK(!cls->CanCallRuntime());
@@ -7845,17 +7745,16 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
break;
}
case HLoadClass::LoadKind::kBssEntry: {
- bss_info_high = codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
+ CodeGeneratorMIPS::PcRelativePatchInfo* bss_info_high =
+ codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex(), bss_info_high);
- constexpr bool non_baker_read_barrier = kUseReadBarrier && !kUseBakerReadBarrier;
- Register temp = non_baker_read_barrier ? out : locations->GetTemp(0).AsRegister<Register>();
codegen_->EmitPcRelativeAddressPlaceholderHigh(bss_info_high,
- temp,
+ out,
base_or_current_method_reg);
GenerateGcRootFieldLoad(cls,
out_loc,
- temp,
+ out,
/* placeholder */ 0x5678,
read_barrier_option,
&info_low->label);
@@ -7887,7 +7786,7 @@ void InstructionCodeGeneratorMIPS::VisitLoadClass(HLoadClass* cls) NO_THREAD_SAF
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
SlowPathCodeMIPS* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS(
- cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck(), bss_info_high);
+ cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
__ Beqz(out, slow_path->GetEntryLabel());
@@ -7960,8 +7859,6 @@ void LocationsBuilderMIPS::VisitLoadString(HLoadString* load) {
if (load_kind == HLoadString::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the pResolveString and marking to save everything we need.
- // Request a temp to hold the BSS entry location for the slow path.
- locations->AddTemp(Location::RequiresRegister());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -8041,19 +7938,17 @@ void InstructionCodeGeneratorMIPS::VisitLoadString(HLoadString* load) NO_THREAD_
codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex());
CodeGeneratorMIPS::PcRelativePatchInfo* info_low =
codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex(), info_high);
- constexpr bool non_baker_read_barrier = kUseReadBarrier && !kUseBakerReadBarrier;
- Register temp = non_baker_read_barrier ? out : locations->GetTemp(0).AsRegister<Register>();
codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high,
- temp,
+ out,
base_or_current_method_reg);
GenerateGcRootFieldLoad(load,
out_loc,
- temp,
+ out,
/* placeholder */ 0x5678,
kCompilerReadBarrierOption,
&info_low->label);
SlowPathCodeMIPS* slow_path =
- new (codegen_->GetScopedAllocator()) LoadStringSlowPathMIPS(load, info_high);
+ new (codegen_->GetScopedAllocator()) LoadStringSlowPathMIPS(load);
codegen_->AddSlowPath(slow_path);
__ Beqz(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 0a6d9159d1..ee33b3f335 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -175,13 +175,11 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
LoadClassSlowPathMIPS64(HLoadClass* cls,
HInstruction* at,
uint32_t dex_pc,
- bool do_clinit,
- const CodeGeneratorMIPS64::PcRelativePatchInfo* bss_info_high = nullptr)
+ bool do_clinit)
: SlowPathCodeMIPS64(at),
cls_(cls),
dex_pc_(dex_pc),
- do_clinit_(do_clinit),
- bss_info_high_(bss_info_high) {
+ do_clinit_(do_clinit) {
DCHECK(at->IsLoadClass() || at->IsClinitCheck());
}
@@ -189,28 +187,11 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
LocationSummary* locations = instruction_->GetLocations();
Location out = locations->Out();
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
- const bool baker_or_no_read_barriers = (!kUseReadBarrier || kUseBakerReadBarrier);
InvokeRuntimeCallingConvention calling_convention;
DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
- const bool is_load_class_bss_entry =
- (cls_ == instruction_) && (cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry);
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry/kSaveEverything, make sure we preserve the address of the entry.
- GpuRegister entry_address = kNoGpuRegister;
- if (is_load_class_bss_entry && baker_or_no_read_barriers) {
- GpuRegister temp = locations->GetTemp(0).AsRegister<GpuRegister>();
- bool temp_is_a0 = (temp == calling_convention.GetRegisterAt(0));
- // In the unlucky case that `temp` is A0, we preserve the address in `out` across the
- // kSaveEverything call.
- entry_address = temp_is_a0 ? out.AsRegister<GpuRegister>() : temp;
- DCHECK_NE(entry_address, calling_convention.GetRegisterAt(0));
- if (temp_is_a0) {
- __ Move(entry_address, temp);
- }
- }
-
dex::TypeIndex type_index = cls_->GetTypeIndex();
__ LoadConst32(calling_convention.GetRegisterAt(0), type_index.index_);
QuickEntrypointEnum entrypoint = do_clinit_ ? kQuickInitializeStaticStorage
@@ -222,19 +203,6 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
CheckEntrypointTypes<kQuickInitializeType, void*, uint32_t>();
}
- // For HLoadClass/kBssEntry, store the resolved class to the BSS entry.
- if (is_load_class_bss_entry && baker_or_no_read_barriers) {
- // The class entry address was preserved in `entry_address` thanks to kSaveEverything.
- DCHECK(bss_info_high_);
- CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
- mips64_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index, bss_info_high_);
- __ Bind(&info_low->label);
- __ StoreToOffset(kStoreWord,
- calling_convention.GetRegisterAt(0),
- entry_address,
- /* placeholder */ 0x5678);
- }
-
// Move the class to the desired location.
if (out.IsValid()) {
DCHECK(out.IsRegister() && !locations->GetLiveRegisters()->ContainsCoreRegister(out.reg()));
@@ -245,17 +213,6 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
}
RestoreLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry, store the resolved class to the BSS entry.
- if (is_load_class_bss_entry && !baker_or_no_read_barriers) {
- // For non-Baker read barriers we need to re-calculate the address of
- // the class entry.
- CodeGeneratorMIPS64::PcRelativePatchInfo* info_high =
- mips64_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index);
- CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
- mips64_codegen->NewTypeBssEntryPatch(cls_->GetDexFile(), type_index, info_high);
- mips64_codegen->EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, info_low);
- __ StoreToOffset(kStoreWord, out.AsRegister<GpuRegister>(), TMP, /* placeholder */ 0x5678);
- }
__ Bc(GetExitLabel());
}
@@ -271,46 +228,25 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 {
// Whether to initialize the class.
const bool do_clinit_;
- // Pointer to the high half PC-relative patch info for HLoadClass/kBssEntry.
- const CodeGeneratorMIPS64::PcRelativePatchInfo* bss_info_high_;
-
DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathMIPS64);
};
class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
public:
- explicit LoadStringSlowPathMIPS64(HLoadString* instruction,
- const CodeGeneratorMIPS64::PcRelativePatchInfo* bss_info_high)
- : SlowPathCodeMIPS64(instruction), bss_info_high_(bss_info_high) {}
+ explicit LoadStringSlowPathMIPS64(HLoadString* instruction)
+ : SlowPathCodeMIPS64(instruction) {}
void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
DCHECK(instruction_->IsLoadString());
DCHECK_EQ(instruction_->AsLoadString()->GetLoadKind(), HLoadString::LoadKind::kBssEntry);
LocationSummary* locations = instruction_->GetLocations();
DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
- HLoadString* load = instruction_->AsLoadString();
- const dex::StringIndex string_index = load->GetStringIndex();
- GpuRegister out = locations->Out().AsRegister<GpuRegister>();
+ const dex::StringIndex string_index = instruction_->AsLoadString()->GetStringIndex();
CodeGeneratorMIPS64* mips64_codegen = down_cast<CodeGeneratorMIPS64*>(codegen);
- const bool baker_or_no_read_barriers = (!kUseReadBarrier || kUseBakerReadBarrier);
InvokeRuntimeCallingConvention calling_convention;
__ Bind(GetEntryLabel());
SaveLiveRegisters(codegen, locations);
- // For HLoadString/kBssEntry/kSaveEverything, make sure we preserve the address of the entry.
- GpuRegister entry_address = kNoGpuRegister;
- if (baker_or_no_read_barriers) {
- GpuRegister temp = locations->GetTemp(0).AsRegister<GpuRegister>();
- bool temp_is_a0 = (temp == calling_convention.GetRegisterAt(0));
- // In the unlucky case that `temp` is A0, we preserve the address in `out` across the
- // kSaveEverything call.
- entry_address = temp_is_a0 ? out : temp;
- DCHECK_NE(entry_address, calling_convention.GetRegisterAt(0));
- if (temp_is_a0) {
- __ Move(entry_address, temp);
- }
- }
-
__ LoadConst32(calling_convention.GetRegisterAt(0), string_index.index_);
mips64_codegen->InvokeRuntime(kQuickResolveString,
instruction_,
@@ -318,47 +254,18 @@ class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 {
this);
CheckEntrypointTypes<kQuickResolveString, void*, uint32_t>();
- // Store the resolved string to the BSS entry.
- if (baker_or_no_read_barriers) {
- // The string entry address was preserved in `entry_address` thanks to kSaveEverything.
- DCHECK(bss_info_high_);
- CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
- mips64_codegen->NewStringBssEntryPatch(load->GetDexFile(),
- string_index,
- bss_info_high_);
- __ Bind(&info_low->label);
- __ StoreToOffset(kStoreWord,
- calling_convention.GetRegisterAt(0),
- entry_address,
- /* placeholder */ 0x5678);
- }
-
DataType::Type type = instruction_->GetType();
mips64_codegen->MoveLocation(locations->Out(),
Location::RegisterLocation(calling_convention.GetRegisterAt(0)),
type);
RestoreLiveRegisters(codegen, locations);
- // Store the resolved string to the BSS entry.
- if (!baker_or_no_read_barriers) {
- // For non-Baker read barriers we need to re-calculate the address of
- // the string entry.
- CodeGeneratorMIPS64::PcRelativePatchInfo* info_high =
- mips64_codegen->NewStringBssEntryPatch(load->GetDexFile(), string_index);
- CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
- mips64_codegen->NewStringBssEntryPatch(load->GetDexFile(), string_index, info_high);
- mips64_codegen->EmitPcRelativeAddressPlaceholderHigh(info_high, TMP, info_low);
- __ StoreToOffset(kStoreWord, out, TMP, /* placeholder */ 0x5678);
- }
__ Bc(GetExitLabel());
}
const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS64"; }
private:
- // Pointer to the high half PC-relative patch info.
- const CodeGeneratorMIPS64::PcRelativePatchInfo* bss_info_high_;
-
DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS64);
};
@@ -5979,8 +5886,6 @@ void LocationsBuilderMIPS64::VisitLoadClass(HLoadClass* cls) {
if (load_kind == HLoadClass::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the type resolution or initialization and marking to save everything we need.
- // Request a temp to hold the BSS entry location for the slow path.
- locations->AddTemp(Location::RequiresRegister());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -6014,7 +5919,6 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
? kWithoutReadBarrier
: kCompilerReadBarrierOption;
bool generate_null_check = false;
- CodeGeneratorMIPS64::PcRelativePatchInfo* bss_info_high = nullptr;
switch (load_kind) {
case HLoadClass::LoadKind::kReferrersClass:
DCHECK(!cls->CanCallRuntime());
@@ -6064,17 +5968,14 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
break;
}
case HLoadClass::LoadKind::kBssEntry: {
- bss_info_high = codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
+ CodeGeneratorMIPS64::PcRelativePatchInfo* bss_info_high =
+ codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex());
CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
codegen_->NewTypeBssEntryPatch(cls->GetDexFile(), cls->GetTypeIndex(), bss_info_high);
- constexpr bool non_baker_read_barrier = kUseReadBarrier && !kUseBakerReadBarrier;
- GpuRegister temp = non_baker_read_barrier
- ? out
- : locations->GetTemp(0).AsRegister<GpuRegister>();
- codegen_->EmitPcRelativeAddressPlaceholderHigh(bss_info_high, temp);
+ codegen_->EmitPcRelativeAddressPlaceholderHigh(bss_info_high, out);
GenerateGcRootFieldLoad(cls,
out_loc,
- temp,
+ out,
/* placeholder */ 0x5678,
read_barrier_option,
&info_low->label);
@@ -6098,7 +5999,7 @@ void InstructionCodeGeneratorMIPS64::VisitLoadClass(HLoadClass* cls) NO_THREAD_S
if (generate_null_check || cls->MustGenerateClinitCheck()) {
DCHECK(cls->CanCallRuntime());
SlowPathCodeMIPS64* slow_path = new (codegen_->GetScopedAllocator()) LoadClassSlowPathMIPS64(
- cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck(), bss_info_high);
+ cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
codegen_->AddSlowPath(slow_path);
if (generate_null_check) {
__ Beqzc(out, slow_path->GetEntryLabel());
@@ -6146,8 +6047,6 @@ void LocationsBuilderMIPS64::VisitLoadString(HLoadString* load) {
if (load_kind == HLoadString::LoadKind::kBssEntry) {
if (!kUseReadBarrier || kUseBakerReadBarrier) {
// Rely on the pResolveString and marking to save everything we need.
- // Request a temp to hold the BSS entry location for the slow path.
- locations->AddTemp(Location::RequiresRegister());
RegisterSet caller_saves = RegisterSet::Empty();
InvokeRuntimeCallingConvention calling_convention;
caller_saves.Add(Location::RegisterLocation(calling_convention.GetRegisterAt(0)));
@@ -6203,19 +6102,15 @@ void InstructionCodeGeneratorMIPS64::VisitLoadString(HLoadString* load) NO_THREA
codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex());
CodeGeneratorMIPS64::PcRelativePatchInfo* info_low =
codegen_->NewStringBssEntryPatch(load->GetDexFile(), load->GetStringIndex(), info_high);
- constexpr bool non_baker_read_barrier = kUseReadBarrier && !kUseBakerReadBarrier;
- GpuRegister temp = non_baker_read_barrier
- ? out
- : locations->GetTemp(0).AsRegister<GpuRegister>();
- codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, temp);
+ codegen_->EmitPcRelativeAddressPlaceholderHigh(info_high, out);
GenerateGcRootFieldLoad(load,
out_loc,
- temp,
+ out,
/* placeholder */ 0x5678,
kCompilerReadBarrierOption,
&info_low->label);
SlowPathCodeMIPS64* slow_path =
- new (codegen_->GetScopedAllocator()) LoadStringSlowPathMIPS64(load, info_high);
+ new (codegen_->GetScopedAllocator()) LoadStringSlowPathMIPS64(load);
codegen_->AddSlowPath(slow_path);
__ Beqzc(out, slow_path->GetEntryLabel());
__ Bind(slow_path->GetExitLabel());
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index ad0e71aaf4..2e8170ecc4 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -240,13 +240,6 @@ class LoadStringSlowPathX86 : public SlowPathCode {
x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
RestoreLiveRegisters(codegen, locations);
- // Store the resolved String to the BSS entry.
- Register method_address = locations->InAt(0).AsRegister<Register>();
- __ movl(Address(method_address, CodeGeneratorX86::kDummy32BitOffset),
- locations->Out().AsRegister<Register>());
- Label* fixup_label = x86_codegen->NewStringBssEntryPatch(instruction_->AsLoadString());
- __ Bind(fixup_label);
-
__ jmp(GetExitLabel());
}
@@ -293,16 +286,6 @@ class LoadClassSlowPathX86 : public SlowPathCode {
x86_codegen->Move32(out, Location::RegisterLocation(EAX));
}
RestoreLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
- DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
- if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
- DCHECK(out.IsValid());
- Register method_address = locations->InAt(0).AsRegister<Register>();
- __ movl(Address(method_address, CodeGeneratorX86::kDummy32BitOffset),
- locations->Out().AsRegister<Register>());
- Label* fixup_label = x86_codegen->NewTypeBssEntryPatch(cls_);
- __ Bind(fixup_label);
- }
__ jmp(GetExitLabel());
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index d64a49704e..e25688c9a3 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -273,15 +273,6 @@ class LoadClassSlowPathX86_64 : public SlowPathCode {
}
RestoreLiveRegisters(codegen, locations);
- // For HLoadClass/kBssEntry, store the resolved Class to the BSS entry.
- DCHECK_EQ(instruction_->IsLoadClass(), cls_ == instruction_);
- if (cls_ == instruction_ && cls_->GetLoadKind() == HLoadClass::LoadKind::kBssEntry) {
- DCHECK(out.IsValid());
- __ movl(Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false),
- locations->Out().AsRegister<CpuRegister>());
- Label* fixup_label = x86_64_codegen->NewTypeBssEntryPatch(cls_);
- __ Bind(fixup_label);
- }
__ jmp(GetExitLabel());
}
@@ -323,12 +314,6 @@ class LoadStringSlowPathX86_64 : public SlowPathCode {
x86_64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
RestoreLiveRegisters(codegen, locations);
- // Store the resolved String to the BSS entry.
- __ movl(Address::Absolute(CodeGeneratorX86_64::kDummy32BitOffset, /* no_rip */ false),
- locations->Out().AsRegister<CpuRegister>());
- Label* fixup_label = x86_64_codegen->NewStringBssEntryPatch(instruction_->AsLoadString());
- __ Bind(fixup_label);
-
__ jmp(GetExitLabel());
}
diff --git a/compiler/optimizing/code_sinking.h b/compiler/optimizing/code_sinking.h
index 59cda52a8c..836d9d4f67 100644
--- a/compiler/optimizing/code_sinking.h
+++ b/compiler/optimizing/code_sinking.h
@@ -28,8 +28,10 @@ namespace art {
*/
class CodeSinking : public HOptimization {
public:
- CodeSinking(HGraph* graph, OptimizingCompilerStats* stats)
- : HOptimization(graph, kCodeSinkingPassName, stats) {}
+ CodeSinking(HGraph* graph,
+ OptimizingCompilerStats* stats,
+ const char* name = kCodeSinkingPassName)
+ : HOptimization(graph, name, stats) {}
void Run() OVERRIDE;
diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc
index bb586bf096..6f11e628ee 100644
--- a/compiler/optimizing/constant_folding.cc
+++ b/compiler/optimizing/constant_folding.cc
@@ -113,7 +113,7 @@ void HConstantFoldingVisitor::VisitBinaryOperation(HBinaryOperation* inst) {
void HConstantFoldingVisitor::VisitTypeConversion(HTypeConversion* inst) {
// Constant folding: replace `TypeConversion(a)' with a constant at
// compile time if `a' is a constant.
- HConstant* constant = inst->AsTypeConversion()->TryStaticEvaluation();
+ HConstant* constant = inst->TryStaticEvaluation();
if (constant != nullptr) {
inst->ReplaceWith(constant);
inst->GetBlock()->RemoveInstruction(inst);
diff --git a/compiler/optimizing/constructor_fence_redundancy_elimination.h b/compiler/optimizing/constructor_fence_redundancy_elimination.h
index d89210cd1c..f4b06d5544 100644
--- a/compiler/optimizing/constructor_fence_redundancy_elimination.h
+++ b/compiler/optimizing/constructor_fence_redundancy_elimination.h
@@ -48,12 +48,13 @@ namespace art {
class ConstructorFenceRedundancyElimination : public HOptimization {
public:
ConstructorFenceRedundancyElimination(HGraph* graph,
- OptimizingCompilerStats* stats)
- : HOptimization(graph, kPassName, stats) {}
+ OptimizingCompilerStats* stats,
+ const char* name = kCFREPassName)
+ : HOptimization(graph, name, stats) {}
void Run() OVERRIDE;
- static constexpr const char* kPassName = "constructor_fence_redundancy_elimination";
+ static constexpr const char* kCFREPassName = "constructor_fence_redundancy_elimination";
private:
DISALLOW_COPY_AND_ASSIGN(ConstructorFenceRedundancyElimination);
diff --git a/compiler/optimizing/data_type.h b/compiler/optimizing/data_type.h
index 75a7fbe6ca..d253036479 100644
--- a/compiler/optimizing/data_type.h
+++ b/compiler/optimizing/data_type.h
@@ -186,6 +186,7 @@ class DataType {
}
static bool IsTypeConversionImplicit(Type input_type, Type result_type);
+ static bool IsTypeConversionImplicit(int64_t value, Type result_type);
static const char* PrettyDescriptor(Type type);
@@ -213,6 +214,18 @@ inline bool DataType::IsTypeConversionImplicit(Type input_type, Type result_type
MaxValueOfIntegralType(input_type) <= MaxValueOfIntegralType(result_type));
}
+inline bool DataType::IsTypeConversionImplicit(int64_t value, Type result_type) {
+ if (IsIntegralType(result_type) && result_type != Type::kInt64) {
+ // If the constant value falls in the range of the result_type, type
+ // conversion isn't needed.
+ return value >= MinValueOfIntegralType(result_type) &&
+ value <= MaxValueOfIntegralType(result_type);
+ }
+ // Conversion isn't implicit if it's into non-integer types, or 64-bit int
+ // which may have different number of registers.
+ return false;
+}
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_DATA_TYPE_H_
diff --git a/compiler/optimizing/induction_var_analysis.cc b/compiler/optimizing/induction_var_analysis.cc
index e2747afd85..d270c6a28e 100644
--- a/compiler/optimizing/induction_var_analysis.cc
+++ b/compiler/optimizing/induction_var_analysis.cc
@@ -93,12 +93,142 @@ static DataType::Type ImplicitConversion(DataType::Type type) {
}
}
+/**
+ * Returns true if loop is guarded by "a cmp b" on entry.
+ */
+static bool IsGuardedBy(HLoopInformation* loop,
+ IfCondition cmp,
+ HInstruction* a,
+ HInstruction* b) {
+ // Chase back through straightline code to the first potential
+ // block that has a control dependence.
+ // guard: if (x) bypass
+ // |
+ // entry: straightline code
+ // |
+ // preheader
+ // |
+ // header
+ HBasicBlock* guard = loop->GetPreHeader();
+ HBasicBlock* entry = loop->GetHeader();
+ while (guard->GetPredecessors().size() == 1 &&
+ guard->GetSuccessors().size() == 1) {
+ entry = guard;
+ guard = guard->GetSinglePredecessor();
+ }
+ // Find guard.
+ HInstruction* control = guard->GetLastInstruction();
+ if (!control->IsIf()) {
+ return false;
+ }
+ HIf* ifs = control->AsIf();
+ HInstruction* if_expr = ifs->InputAt(0);
+ if (if_expr->IsCondition()) {
+ IfCondition other_cmp = ifs->IfTrueSuccessor() == entry
+ ? if_expr->AsCondition()->GetCondition()
+ : if_expr->AsCondition()->GetOppositeCondition();
+ if (if_expr->InputAt(0) == a && if_expr->InputAt(1) == b) {
+ return cmp == other_cmp;
+ } else if (if_expr->InputAt(1) == a && if_expr->InputAt(0) == b) {
+ switch (cmp) {
+ case kCondLT: return other_cmp == kCondGT;
+ case kCondLE: return other_cmp == kCondGE;
+ case kCondGT: return other_cmp == kCondLT;
+ case kCondGE: return other_cmp == kCondLE;
+ default: LOG(FATAL) << "unexpected cmp: " << cmp;
+ }
+ }
+ }
+ return false;
+}
+
+/* Finds first loop header phi use. */
+HInstruction* FindFirstLoopHeaderPhiUse(HLoopInformation* loop, HInstruction* instruction) {
+ for (const HUseListNode<HInstruction*>& use : instruction->GetUses()) {
+ if (use.GetUser()->GetBlock() == loop->GetHeader() &&
+ use.GetUser()->IsPhi() &&
+ use.GetUser()->InputAt(1) == instruction) {
+ return use.GetUser();
+ }
+ }
+ return nullptr;
+}
+
+/**
+ * Relinks the Phi structure after break-loop rewriting.
+ */
+bool FixOutsideUse(HLoopInformation* loop,
+ HInstruction* instruction,
+ HInstruction* replacement,
+ bool rewrite) {
+ // Deal with regular uses.
+ const HUseList<HInstruction*>& uses = instruction->GetUses();
+ for (auto it = uses.begin(), end = uses.end(); it != end; ) {
+ HInstruction* user = it->GetUser();
+ size_t index = it->GetIndex();
+ ++it; // increment prior to potential removal
+ if (user->GetBlock()->GetLoopInformation() != loop) {
+ if (replacement == nullptr) {
+ return false;
+ } else if (rewrite) {
+ user->ReplaceInput(replacement, index);
+ }
+ }
+ }
+ // Deal with environment uses.
+ const HUseList<HEnvironment*>& env_uses = instruction->GetEnvUses();
+ for (auto it = env_uses.begin(), end = env_uses.end(); it != end;) {
+ HEnvironment* user = it->GetUser();
+ size_t index = it->GetIndex();
+ ++it; // increment prior to potential removal
+ if (user->GetHolder()->GetBlock()->GetLoopInformation() != loop) {
+ if (replacement == nullptr) {
+ return false;
+ } else if (rewrite) {
+ user->RemoveAsUserOfInput(index);
+ user->SetRawEnvAt(index, replacement);
+ replacement->AddEnvUseAt(user, index);
+ }
+ }
+ }
+ return true;
+}
+
+/**
+ * Test and rewrite the loop body of a break-loop. Returns true on success.
+ */
+bool RewriteBreakLoopBody(HLoopInformation* loop,
+ HBasicBlock* body,
+ HInstruction* cond,
+ HInstruction* index,
+ HInstruction* upper,
+ bool rewrite) {
+ // Deal with Phis. Outside use prohibited, except for index (which gets exit value).
+ for (HInstructionIterator it(loop->GetHeader()->GetPhis()); !it.Done(); it.Advance()) {
+ HInstruction* exit_value = it.Current() == index ? upper : nullptr;
+ if (!FixOutsideUse(loop, it.Current(), exit_value, rewrite)) {
+ return false;
+ }
+ }
+ // Deal with other statements in header.
+ for (HInstruction* m = cond->GetPrevious(), *p = nullptr; m && !m->IsSuspendCheck(); m = p) {
+ p = m->GetPrevious();
+ if (rewrite) {
+ m->MoveBefore(body->GetFirstInstruction(), false);
+ }
+ if (!FixOutsideUse(loop, m, FindFirstLoopHeaderPhiUse(loop, m), rewrite)) {
+ return false;
+ }
+ }
+ return true;
+}
+
//
// Class methods.
//
-HInductionVarAnalysis::HInductionVarAnalysis(HGraph* graph)
- : HOptimization(graph, kInductionPassName),
+HInductionVarAnalysis::HInductionVarAnalysis(HGraph* graph, const char* name)
+ : HOptimization(graph, name),
global_depth_(0),
stack_(graph->GetAllocator()->Adapter(kArenaAllocInductionVarAnalysis)),
map_(std::less<HInstruction*>(),
@@ -754,6 +884,10 @@ HInductionVarAnalysis::InductionInfo* HInductionVarAnalysis::SolveConversion(
return nullptr;
}
+//
+// Loop trip count analysis methods.
+//
+
void HInductionVarAnalysis::VisitControl(HLoopInformation* loop) {
HInstruction* control = loop->GetHeader()->GetLastInstruction();
if (control->IsIf()) {
@@ -774,15 +908,16 @@ void HInductionVarAnalysis::VisitControl(HLoopInformation* loop) {
if (a == nullptr || b == nullptr) {
return; // Loop control is not a sequence.
} else if (if_true->GetLoopInformation() != loop && if_false->GetLoopInformation() == loop) {
- VisitCondition(loop, a, b, type, condition->GetOppositeCondition());
+ VisitCondition(loop, if_false, a, b, type, condition->GetOppositeCondition());
} else if (if_true->GetLoopInformation() == loop && if_false->GetLoopInformation() != loop) {
- VisitCondition(loop, a, b, type, condition->GetCondition());
+ VisitCondition(loop, if_true, a, b, type, condition->GetCondition());
}
}
}
}
void HInductionVarAnalysis::VisitCondition(HLoopInformation* loop,
+ HBasicBlock* body,
InductionInfo* a,
InductionInfo* b,
DataType::Type type,
@@ -790,11 +925,11 @@ void HInductionVarAnalysis::VisitCondition(HLoopInformation* loop,
if (a->induction_class == kInvariant && b->induction_class == kLinear) {
// Swap condition if induction is at right-hand-side (e.g. U > i is same as i < U).
switch (cmp) {
- case kCondLT: VisitCondition(loop, b, a, type, kCondGT); break;
- case kCondLE: VisitCondition(loop, b, a, type, kCondGE); break;
- case kCondGT: VisitCondition(loop, b, a, type, kCondLT); break;
- case kCondGE: VisitCondition(loop, b, a, type, kCondLE); break;
- case kCondNE: VisitCondition(loop, b, a, type, kCondNE); break;
+ case kCondLT: VisitCondition(loop, body, b, a, type, kCondGT); break;
+ case kCondLE: VisitCondition(loop, body, b, a, type, kCondGE); break;
+ case kCondGT: VisitCondition(loop, body, b, a, type, kCondLT); break;
+ case kCondGE: VisitCondition(loop, body, b, a, type, kCondLE); break;
+ case kCondNE: VisitCondition(loop, body, b, a, type, kCondNE); break;
default: break;
}
} else if (a->induction_class == kLinear && b->induction_class == kInvariant) {
@@ -802,24 +937,30 @@ void HInductionVarAnalysis::VisitCondition(HLoopInformation* loop,
InductionInfo* lower_expr = a->op_b;
InductionInfo* upper_expr = b;
InductionInfo* stride_expr = a->op_a;
- // Constant stride?
+ // Test for constant stride and integral condition.
int64_t stride_value = 0;
if (!IsExact(stride_expr, &stride_value)) {
- return;
+ return; // unknown stride
+ } else if (type != DataType::Type::kInt32 && type != DataType::Type::kInt64) {
+ return; // not integral
}
- // Rewrite condition i != U into strict end condition i < U or i > U if this end condition
- // is reached exactly (tested by verifying if the loop has a unit stride and the non-strict
- // condition would be always taken).
+ // Since loops with a i != U condition will not be normalized by the method below, first
+ // try to rewrite a break-loop with terminating condition i != U into an equivalent loop
+ // with non-strict end condition i <= U or i >= U if such a rewriting is possible and safe.
+ if (cmp == kCondNE && RewriteBreakLoop(loop, body, stride_value, type)) {
+ cmp = stride_value > 0 ? kCondLE : kCondGE;
+ }
+ // If this rewriting failed, try to rewrite condition i != U into strict end condition i < U
+ // or i > U if this end condition is reached exactly (tested by verifying if the loop has a
+ // unit stride and the non-strict condition would be always taken).
if (cmp == kCondNE && ((stride_value == +1 && IsTaken(lower_expr, upper_expr, kCondLE)) ||
(stride_value == -1 && IsTaken(lower_expr, upper_expr, kCondGE)))) {
cmp = stride_value > 0 ? kCondLT : kCondGT;
}
- // Only accept integral condition. A mismatch between the type of condition and the induction
- // is only allowed if the, necessarily narrower, induction range fits the narrower control.
- if (type != DataType::Type::kInt32 && type != DataType::Type::kInt64) {
- return; // not integral
- } else if (type != a->type &&
- !FitsNarrowerControl(lower_expr, upper_expr, stride_value, a->type, cmp)) {
+ // A mismatch between the type of condition and the induction is only allowed if the,
+ // necessarily narrower, induction range fits the narrower control.
+ if (type != a->type &&
+ !FitsNarrowerControl(lower_expr, upper_expr, stride_value, a->type, cmp)) {
return; // mismatched type
}
// Normalize a linear loop control with a nonzero stride:
@@ -984,6 +1125,69 @@ bool HInductionVarAnalysis::FitsNarrowerControl(InductionInfo* lower_expr,
IsAtMost(upper_expr, &value) && value <= max;
}
+bool HInductionVarAnalysis::RewriteBreakLoop(HLoopInformation* loop,
+ HBasicBlock* body,
+ int64_t stride_value,
+ DataType::Type type) {
+ // Only accept unit stride.
+ if (std::abs(stride_value) != 1) {
+ return false;
+ }
+ // Simple terminating i != U condition, used nowhere else.
+ HIf* ifs = loop->GetHeader()->GetLastInstruction()->AsIf();
+ HInstruction* cond = ifs->InputAt(0);
+ if (ifs->GetPrevious() != cond || !cond->HasOnlyOneNonEnvironmentUse()) {
+ return false;
+ }
+ int c = LookupInfo(loop, cond->InputAt(0))->induction_class == kLinear ? 0 : 1;
+ HInstruction* index = cond->InputAt(c);
+ HInstruction* upper = cond->InputAt(1 - c);
+ // Safe to rewrite into i <= U?
+ IfCondition cmp = stride_value > 0 ? kCondLE : kCondGE;
+ if (!index->IsPhi() || !IsFinite(LookupInfo(loop, upper), stride_value, type, cmp)) {
+ return false;
+ }
+ // Body consists of update to index i only, used nowhere else.
+ if (body->GetSuccessors().size() != 1 ||
+ body->GetSingleSuccessor() != loop->GetHeader() ||
+ !body->GetPhis().IsEmpty() ||
+ body->GetInstructions().IsEmpty() ||
+ body->GetFirstInstruction() != index->InputAt(1) ||
+ !body->GetFirstInstruction()->HasOnlyOneNonEnvironmentUse() ||
+ !body->GetFirstInstruction()->GetNext()->IsGoto()) {
+ return false;
+ }
+ // Always taken or guarded by enclosing condition.
+ if (!IsTaken(LookupInfo(loop, index)->op_b, LookupInfo(loop, upper), cmp) &&
+ !IsGuardedBy(loop, cmp, index->InputAt(0), upper)) {
+ return false;
+ }
+ // Test if break-loop body can be written, and do so on success.
+ if (RewriteBreakLoopBody(loop, body, cond, index, upper, /*rewrite*/ false)) {
+ RewriteBreakLoopBody(loop, body, cond, index, upper, /*rewrite*/ true);
+ } else {
+ return false;
+ }
+ // Rewrite condition in HIR.
+ if (ifs->IfTrueSuccessor() != body) {
+ cmp = (cmp == kCondLE) ? kCondGT : kCondLT;
+ }
+ HInstruction* rep = nullptr;
+ switch (cmp) {
+ case kCondLT: rep = new (graph_->GetAllocator()) HLessThan(index, upper); break;
+ case kCondGT: rep = new (graph_->GetAllocator()) HGreaterThan(index, upper); break;
+ case kCondLE: rep = new (graph_->GetAllocator()) HLessThanOrEqual(index, upper); break;
+ case kCondGE: rep = new (graph_->GetAllocator()) HGreaterThanOrEqual(index, upper); break;
+ default: LOG(FATAL) << cmp; UNREACHABLE();
+ }
+ loop->GetHeader()->ReplaceAndRemoveInstructionWith(cond, rep);
+ return true;
+}
+
+//
+// Helper methods.
+//
+
void HInductionVarAnalysis::AssignInfo(HLoopInformation* loop,
HInstruction* instruction,
InductionInfo* info) {
diff --git a/compiler/optimizing/induction_var_analysis.h b/compiler/optimizing/induction_var_analysis.h
index a2d302ae81..acad77d35f 100644
--- a/compiler/optimizing/induction_var_analysis.h
+++ b/compiler/optimizing/induction_var_analysis.h
@@ -35,7 +35,7 @@ namespace art {
*/
class HInductionVarAnalysis : public HOptimization {
public:
- explicit HInductionVarAnalysis(HGraph* graph);
+ explicit HInductionVarAnalysis(HGraph* graph, const char* name = kInductionPassName);
void Run() OVERRIDE;
@@ -195,9 +195,14 @@ class HInductionVarAnalysis : public HOptimization {
HInstruction* entry_phi,
HTypeConversion* conversion);
+ //
+ // Loop trip count analysis methods.
+ //
+
// Trip count information.
void VisitControl(HLoopInformation* loop);
void VisitCondition(HLoopInformation* loop,
+ HBasicBlock* body,
InductionInfo* a,
InductionInfo* b,
DataType::Type type,
@@ -219,6 +224,14 @@ class HInductionVarAnalysis : public HOptimization {
int64_t stride_value,
DataType::Type type,
IfCondition cmp);
+ bool RewriteBreakLoop(HLoopInformation* loop,
+ HBasicBlock* body,
+ int64_t stride_value,
+ DataType::Type type);
+
+ //
+ // Helper methods.
+ //
// Assign and lookup.
void AssignInfo(HLoopInformation* loop, HInstruction* instruction, InductionInfo* info);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index 3f4a3d8b8e..2444e43d64 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -441,9 +441,9 @@ bool HInliner::TryInline(HInvoke* invoke_instruction) {
// Add dependency due to devirtulization. We've assumed resolved_method
// has single implementation.
outermost_graph_->AddCHASingleImplementationDependency(resolved_method);
- MaybeRecordStat(stats_, kCHAInline);
+ MaybeRecordStat(stats_, MethodCompilationStat::kCHAInline);
} else {
- MaybeRecordStat(stats_, kInlinedInvokeVirtualOrInterface);
+ MaybeRecordStat(stats_, MethodCompilationStat::kInlinedInvokeVirtualOrInterface);
}
}
return result;
@@ -533,7 +533,7 @@ bool HInliner::TryInlineFromInlineCache(const DexFile& caller_dex_file,
}
case kInlineCacheMonomorphic: {
- MaybeRecordStat(stats_, kMonomorphicCall);
+ MaybeRecordStat(stats_, MethodCompilationStat::kMonomorphicCall);
if (UseOnlyPolymorphicInliningWithNoDeopt()) {
return TryInlinePolymorphicCall(invoke_instruction, resolved_method, inline_cache);
} else {
@@ -542,7 +542,7 @@ bool HInliner::TryInlineFromInlineCache(const DexFile& caller_dex_file,
}
case kInlineCachePolymorphic: {
- MaybeRecordStat(stats_, kPolymorphicCall);
+ MaybeRecordStat(stats_, MethodCompilationStat::kPolymorphicCall);
return TryInlinePolymorphicCall(invoke_instruction, resolved_method, inline_cache);
}
@@ -551,7 +551,7 @@ bool HInliner::TryInlineFromInlineCache(const DexFile& caller_dex_file,
<< "Interface or virtual call to "
<< caller_dex_file.PrettyMethod(invoke_instruction->GetDexMethodIndex())
<< " is megamorphic and not inlined";
- MaybeRecordStat(stats_, kMegamorphicCall);
+ MaybeRecordStat(stats_, MethodCompilationStat::kMegamorphicCall);
return false;
}
@@ -755,7 +755,7 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
dex::TypeIndex class_index = FindClassIndexIn(
GetMonomorphicType(classes), caller_compilation_unit_);
if (!class_index.IsValid()) {
- LOG_FAIL(stats_, kNotInlinedDexCache)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedDexCache)
<< "Call to " << ArtMethod::PrettyMethod(resolved_method)
<< " from inline cache is not inlined because its class is not"
<< " accessible to the caller";
@@ -804,7 +804,7 @@ bool HInliner::TryInlineMonomorphicCall(HInvoke* invoke_instruction,
/* is_first_run */ false);
rtp_fixup.Run();
- MaybeRecordStat(stats_, kInlinedMonomorphicCall);
+ MaybeRecordStat(stats_, MethodCompilationStat::kInlinedMonomorphicCall);
return true;
}
@@ -994,7 +994,7 @@ bool HInliner::TryInlinePolymorphicCall(HInvoke* invoke_instruction,
return false;
}
- MaybeRecordStat(stats_, kInlinedPolymorphicCall);
+ MaybeRecordStat(stats_, MethodCompilationStat::kInlinedPolymorphicCall);
// Run type propagation to get the guards typed.
ReferenceTypePropagation rtp_fixup(graph_,
@@ -1200,7 +1200,7 @@ bool HInliner::TryInlinePolymorphicCallToSameTarget(
/* is_first_run */ false);
rtp_fixup.Run();
- MaybeRecordStat(stats_, kInlinedPolymorphicCall);
+ MaybeRecordStat(stats_, MethodCompilationStat::kInlinedPolymorphicCall);
LOG_SUCCESS() << "Inlined same polymorphic target " << actual_method->PrettyMethod();
return true;
@@ -1258,6 +1258,13 @@ bool HInliner::TryInlineAndReplace(HInvoke* invoke_instruction,
new_invoke->SetReferenceTypeInfo(invoke_instruction->GetReferenceTypeInfo());
}
return_replacement = new_invoke;
+ // Directly check if the new virtual can be recognized as an intrinsic.
+ // This way, we avoid running a full recognition pass just to detect
+ // these relative rare cases.
+ bool wrong_invoke_type = false;
+ if (IntrinsicsRecognizer::Recognize(new_invoke, &wrong_invoke_type)) {
+ MaybeRecordStat(stats_, MethodCompilationStat::kIntrinsicRecognized);
+ }
} else {
// TODO: Consider sharpening an invoke virtual once it is not dependent on the
// compiler driver.
@@ -1301,14 +1308,14 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
ReferenceTypeInfo receiver_type,
HInstruction** return_replacement) {
if (method->IsProxyMethod()) {
- LOG_FAIL(stats_, kNotInlinedProxy)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedProxy)
<< "Method " << method->PrettyMethod()
<< " is not inlined because of unimplemented inline support for proxy methods.";
return false;
}
if (CountRecursiveCallsOf(method) > kMaximumNumberOfRecursiveCalls) {
- LOG_FAIL(stats_, kNotInlinedRecursiveBudget)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedRecursiveBudget)
<< "Method "
<< method->PrettyMethod()
<< " is not inlined because it has reached its recursive call budget.";
@@ -1322,10 +1329,10 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
if (TryPatternSubstitution(invoke_instruction, method, return_replacement)) {
LOG_SUCCESS() << "Successfully replaced pattern of invoke "
<< method->PrettyMethod();
- MaybeRecordStat(stats_, kReplacedInvokeWithSimplePattern);
+ MaybeRecordStat(stats_, MethodCompilationStat::kReplacedInvokeWithSimplePattern);
return true;
}
- LOG_FAIL(stats_, kNotInlinedWont)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedWont)
<< "Won't inline " << method->PrettyMethod() << " in "
<< outer_compilation_unit_.GetDexFile()->GetLocation() << " ("
<< caller_compilation_unit_.GetDexFile()->GetLocation() << ") from "
@@ -1345,7 +1352,7 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
size_t inline_max_code_units = compiler_driver_->GetCompilerOptions().GetInlineMaxCodeUnits();
if (code_item->insns_size_in_code_units_ > inline_max_code_units) {
- LOG_FAIL(stats_, kNotInlinedCodeItem)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedCodeItem)
<< "Method " << method->PrettyMethod()
<< " is not inlined because its code item is too big: "
<< code_item->insns_size_in_code_units_
@@ -1355,13 +1362,13 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
}
if (code_item->tries_size_ != 0) {
- LOG_FAIL(stats_, kNotInlinedTryCatch)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedTryCatch)
<< "Method " << method->PrettyMethod() << " is not inlined because of try block";
return false;
}
if (!method->IsCompilable()) {
- LOG_FAIL(stats_, kNotInlinedNotVerified)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedNotVerified)
<< "Method " << method->PrettyMethod()
<< " has soft failures un-handled by the compiler, so it cannot be inlined";
}
@@ -1371,7 +1378,7 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
if (Runtime::Current()->UseJitCompilation() ||
!compiler_driver_->IsMethodVerifiedWithoutFailures(
method->GetDexMethodIndex(), class_def_idx, *method->GetDexFile())) {
- LOG_FAIL(stats_, kNotInlinedNotVerified)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedNotVerified)
<< "Method " << method->PrettyMethod()
<< " couldn't be verified, so it cannot be inlined";
return false;
@@ -1382,9 +1389,10 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
invoke_instruction->AsInvokeStaticOrDirect()->IsStaticWithImplicitClinitCheck()) {
// Case of a static method that cannot be inlined because it implicitly
// requires an initialization check of its declaring class.
- LOG_FAIL(stats_, kNotInlinedDexCache) << "Method " << method->PrettyMethod()
- << " is not inlined because it is static and requires a clinit"
- << " check that cannot be emitted due to Dex cache limitations";
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedDexCache)
+ << "Method " << method->PrettyMethod()
+ << " is not inlined because it is static and requires a clinit"
+ << " check that cannot be emitted due to Dex cache limitations";
return false;
}
@@ -1394,7 +1402,7 @@ bool HInliner::TryBuildAndInline(HInvoke* invoke_instruction,
}
LOG_SUCCESS() << method->PrettyMethod();
- MaybeRecordStat(stats_, kInlinedInvoke);
+ MaybeRecordStat(stats_, MethodCompilationStat::kInlinedInvoke);
return true;
}
@@ -1677,7 +1685,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
handles_);
if (builder.BuildGraph() != kAnalysisSuccess) {
- LOG_FAIL(stats_, kNotInlinedCannotBuild)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedCannotBuild)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
<< " could not be built, so cannot be inlined";
return false;
@@ -1685,7 +1693,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
if (!RegisterAllocator::CanAllocateRegistersFor(*callee_graph,
compiler_driver_->GetInstructionSet())) {
- LOG_FAIL(stats_, kNotInlinedRegisterAllocator)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedRegisterAllocator)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
<< " cannot be inlined because of the register allocator";
return false;
@@ -1738,7 +1746,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
HBasicBlock* exit_block = callee_graph->GetExitBlock();
if (exit_block == nullptr) {
- LOG_FAIL(stats_, kNotInlinedInfiniteLoop)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedInfiniteLoop)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
<< " could not be inlined because it has an infinite loop";
return false;
@@ -1749,14 +1757,14 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
if (predecessor->GetLastInstruction()->IsThrow()) {
if (invoke_instruction->GetBlock()->IsTryBlock()) {
// TODO(ngeoffray): Support adding HTryBoundary in Hgraph::InlineInto.
- LOG_FAIL(stats_, kNotInlinedTryCatch)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedTryCatch)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
<< " could not be inlined because one branch always throws and"
<< " caller is in a try/catch block";
return false;
} else if (graph_->GetExitBlock() == nullptr) {
// TODO(ngeoffray): Support adding HExit in the caller graph.
- LOG_FAIL(stats_, kNotInlinedInfiniteLoop)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedInfiniteLoop)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
<< " could not be inlined because one branch always throws and"
<< " caller does not have an exit block";
@@ -1775,7 +1783,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
}
if (!has_one_return) {
- LOG_FAIL(stats_, kNotInlinedAlwaysThrows)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedAlwaysThrows)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
<< " could not be inlined because it always throws";
return false;
@@ -1788,7 +1796,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
if (block->GetLoopInformation()->IsIrreducible()) {
// Don't inline methods with irreducible loops, they could prevent some
// optimizations to run.
- LOG_FAIL(stats_, kNotInlinedIrreducibleLoop)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedIrreducibleLoop)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
<< " could not be inlined because it contains an irreducible loop";
return false;
@@ -1797,7 +1805,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
// Don't inline methods with loops without exit, since they cause the
// loop information to be computed incorrectly when updating after
// inlining.
- LOG_FAIL(stats_, kNotInlinedLoopWithoutExit)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedLoopWithoutExit)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
<< " could not be inlined because it contains a loop with no exit";
return false;
@@ -1808,7 +1816,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
!instr_it.Done();
instr_it.Advance()) {
if (++number_of_instructions >= inlining_budget_) {
- LOG_FAIL(stats_, kNotInlinedInstructionBudget)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedInstructionBudget)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
<< " is not inlined because the outer method has reached"
<< " its instruction budget limit.";
@@ -1817,7 +1825,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
HInstruction* current = instr_it.Current();
if (current->NeedsEnvironment() &&
(total_number_of_dex_registers_ >= kMaximumNumberOfCumulatedDexRegisters)) {
- LOG_FAIL(stats_, kNotInlinedEnvironmentBudget)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedEnvironmentBudget)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
<< " is not inlined because its caller has reached"
<< " its environment budget limit.";
@@ -1827,7 +1835,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
if (current->NeedsEnvironment() &&
!CanEncodeInlinedMethodInStackMap(*caller_compilation_unit_.GetDexFile(),
resolved_method)) {
- LOG_FAIL(stats_, kNotInlinedStackMaps)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedStackMaps)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
<< " could not be inlined because " << current->DebugName()
<< " needs an environment, is in a different dex file"
@@ -1836,7 +1844,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
}
if (!same_dex_file && current->NeedsDexCacheOfDeclaringClass()) {
- LOG_FAIL(stats_, kNotInlinedDexCache)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedDexCache)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
<< " could not be inlined because " << current->DebugName()
<< " it is in a different dex file and requires access to the dex cache";
@@ -1848,7 +1856,7 @@ bool HInliner::TryBuildAndInlineHelper(HInvoke* invoke_instruction,
current->IsUnresolvedStaticFieldSet() ||
current->IsUnresolvedInstanceFieldSet()) {
// Entrypoint for unresolved fields does not handle inlined frames.
- LOG_FAIL(stats_, kNotInlinedUnresolvedEntrypoint)
+ LOG_FAIL(stats_, MethodCompilationStat::kNotInlinedUnresolvedEntrypoint)
<< "Method " << callee_dex_file.PrettyMethod(method_index)
<< " could not be inlined because it is using an unresolved"
<< " entrypoint";
diff --git a/compiler/optimizing/inliner.h b/compiler/optimizing/inliner.h
index c4b3a32d91..042eee3204 100644
--- a/compiler/optimizing/inliner.h
+++ b/compiler/optimizing/inliner.h
@@ -44,8 +44,9 @@ class HInliner : public HOptimization {
size_t total_number_of_dex_registers,
size_t total_number_of_instructions,
HInliner* parent,
- size_t depth = 0)
- : HOptimization(outer_graph, kInlinerPassName, stats),
+ size_t depth = 0,
+ const char* name = kInlinerPassName)
+ : HOptimization(outer_graph, name, stats),
outermost_graph_(outermost_graph),
outer_compilation_unit_(outer_compilation_unit),
caller_compilation_unit_(caller_compilation_unit),
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 61840cc20f..978d0c2225 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -29,6 +29,7 @@
#include "driver/compiler_options.h"
#include "imtable-inl.h"
#include "mirror/dex_cache.h"
+#include "oat_file.h"
#include "optimizing_compiler_stats.h"
#include "quicken_info.h"
#include "scoped_thread_state_change-inl.h"
@@ -447,7 +448,8 @@ ArenaBitVector* HInstructionBuilder::FindNativeDebugInfoLocations() {
/* expandable */ false,
kArenaAllocGraphBuilder);
locations->ClearAllBits();
- dex_file_->DecodeDebugPositionInfo(code_item_, Callback::Position, locations);
+ uint32_t debug_info_offset = OatFile::GetDebugInfoOffset(*dex_file_, code_item_);
+ dex_file_->DecodeDebugPositionInfo(code_item_, debug_info_offset, Callback::Position, locations);
// Instruction-specific tweaks.
IterationRange<DexInstructionIterator> instructions = code_item_->Instructions();
for (const DexInstructionPcPair& inst : instructions) {
diff --git a/compiler/optimizing/instruction_simplifier.cc b/compiler/optimizing/instruction_simplifier.cc
index fbfee12be9..7fa0c2be3d 100644
--- a/compiler/optimizing/instruction_simplifier.cc
+++ b/compiler/optimizing/instruction_simplifier.cc
@@ -27,6 +27,10 @@
namespace art {
+// Whether to run an exhaustive test of individual HInstructions cloning when each instruction
+// is replaced with its copy if it is clonable.
+static constexpr bool kTestInstructionClonerExhaustively = false;
+
class InstructionSimplifierVisitor : public HGraphDelegateVisitor {
public:
InstructionSimplifierVisitor(HGraph* graph,
@@ -44,7 +48,7 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor {
void RecordSimplification() {
simplification_occurred_ = true;
simplifications_at_current_position_++;
- MaybeRecordStat(stats_, kInstructionSimplifications);
+ MaybeRecordStat(stats_, MethodCompilationStat::kInstructionSimplifications);
}
bool ReplaceRotateWithRor(HBinaryOperation* op, HUShr* ushr, HShl* shl);
@@ -130,6 +134,11 @@ class InstructionSimplifierVisitor : public HGraphDelegateVisitor {
};
void InstructionSimplifier::Run() {
+ if (kTestInstructionClonerExhaustively) {
+ CloneAndReplaceInstructionVisitor visitor(graph_);
+ visitor.VisitReversePostOrder();
+ }
+
InstructionSimplifierVisitor visitor(graph_, codegen_, compiler_driver_, stats_);
visitor.Run();
}
@@ -654,7 +663,7 @@ void InstructionSimplifierVisitor::VisitInstanceOf(HInstanceOf* instruction) {
HGraph* graph = GetGraph();
if (object->IsNullConstant()) {
- MaybeRecordStat(stats_, kRemovedInstanceOf);
+ MaybeRecordStat(stats_, MethodCompilationStat::kRemovedInstanceOf);
instruction->ReplaceWith(graph->GetIntConstant(0));
instruction->GetBlock()->RemoveInstruction(instruction);
RecordSimplification();
@@ -665,7 +674,7 @@ void InstructionSimplifierVisitor::VisitInstanceOf(HInstanceOf* instruction) {
// the return value check with the `outcome` check, b/27651442 .
bool outcome = false;
if (TypeCheckHasKnownOutcome(load_class, object, &outcome)) {
- MaybeRecordStat(stats_, kRemovedInstanceOf);
+ MaybeRecordStat(stats_, MethodCompilationStat::kRemovedInstanceOf);
if (outcome && can_be_null) {
// Type test will succeed, we just need a null test.
HNotEqual* test = new (graph->GetAllocator()) HNotEqual(graph->GetNullConstant(), object);
diff --git a/compiler/optimizing/instruction_simplifier_arm.cc b/compiler/optimizing/instruction_simplifier_arm.cc
index d41e49a0f3..92081e30b1 100644
--- a/compiler/optimizing/instruction_simplifier_arm.cc
+++ b/compiler/optimizing/instruction_simplifier_arm.cc
@@ -37,9 +37,7 @@ class InstructionSimplifierArmVisitor : public HGraphVisitor {
private:
void RecordSimplification() {
- if (stats_ != nullptr) {
- stats_->RecordStat(kInstructionSimplificationsArch);
- }
+ MaybeRecordStat(stats_, MethodCompilationStat::kInstructionSimplificationsArch);
}
bool TryMergeIntoUsersShifterOperand(HInstruction* instruction);
diff --git a/compiler/optimizing/instruction_simplifier_arm64.cc b/compiler/optimizing/instruction_simplifier_arm64.cc
index 69e1463ac4..1c44e5ac49 100644
--- a/compiler/optimizing/instruction_simplifier_arm64.cc
+++ b/compiler/optimizing/instruction_simplifier_arm64.cc
@@ -37,9 +37,7 @@ class InstructionSimplifierArm64Visitor : public HGraphVisitor {
private:
void RecordSimplification() {
- if (stats_ != nullptr) {
- stats_->RecordStat(kInstructionSimplificationsArch);
- }
+ MaybeRecordStat(stats_, MethodCompilationStat::kInstructionSimplificationsArch);
}
bool TryMergeIntoUsersShifterOperand(HInstruction* instruction);
diff --git a/compiler/optimizing/instruction_simplifier_mips.cc b/compiler/optimizing/instruction_simplifier_mips.cc
index 6a0d8a60c4..fa97401a0c 100644
--- a/compiler/optimizing/instruction_simplifier_mips.cc
+++ b/compiler/optimizing/instruction_simplifier_mips.cc
@@ -33,9 +33,7 @@ class InstructionSimplifierMipsVisitor : public HGraphVisitor {
private:
void RecordSimplification() {
- if (stats_ != nullptr) {
- stats_->RecordStat(kInstructionSimplificationsArch);
- }
+ MaybeRecordStat(stats_, MethodCompilationStat::kInstructionSimplificationsArch);
}
bool TryExtractArrayAccessIndex(HInstruction* access,
diff --git a/compiler/optimizing/instruction_simplifier_mips.h b/compiler/optimizing/instruction_simplifier_mips.h
index 22cc2efc1a..6cb8affe85 100644
--- a/compiler/optimizing/instruction_simplifier_mips.h
+++ b/compiler/optimizing/instruction_simplifier_mips.h
@@ -30,7 +30,7 @@ namespace mips {
class InstructionSimplifierMips : public HOptimization {
public:
InstructionSimplifierMips(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
- : HOptimization(graph, "instruction_simplifier_mips", stats),
+ : HOptimization(graph, kInstructionSimplifierMipsPassName, stats),
codegen_(down_cast<CodeGeneratorMIPS*>(codegen)) {}
static constexpr const char* kInstructionSimplifierMipsPassName = "instruction_simplifier_mips";
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index dfae534555..77199242f5 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -104,7 +104,8 @@ static inline IntrinsicExceptions GetExceptions(Intrinsics i) {
return kCanThrow;
}
-static bool CheckInvokeType(Intrinsics intrinsic, HInvoke* invoke) {
+static bool CheckInvokeType(Intrinsics intrinsic, HInvoke* invoke)
+ REQUIRES_SHARED(Locks::mutator_lock_) {
// Whenever the intrinsic is marked as static, report an error if we find an InvokeVirtual.
//
// Whenever the intrinsic is marked as direct and we find an InvokeVirtual, a devirtualization
@@ -130,7 +131,6 @@ static bool CheckInvokeType(Intrinsics intrinsic, HInvoke* invoke) {
}
if (invoke_type == kVirtual) {
ArtMethod* art_method = invoke->GetResolvedMethod();
- ScopedObjectAccess soa(Thread::Current());
return (art_method->IsFinal() || art_method->GetDeclaringClass()->IsFinal());
}
return false;
@@ -139,9 +139,39 @@ static bool CheckInvokeType(Intrinsics intrinsic, HInvoke* invoke) {
// Call might be devirtualized.
return (invoke_type == kVirtual || invoke_type == kDirect);
- default:
+ case kSuper:
+ case kInterface:
+ case kPolymorphic:
return false;
}
+ LOG(FATAL) << "Unknown intrinsic invoke type: " << intrinsic_type;
+ UNREACHABLE();
+}
+
+bool IntrinsicsRecognizer::Recognize(HInvoke* invoke, /*out*/ bool* wrong_invoke_type) {
+ ArtMethod* art_method = invoke->GetResolvedMethod();
+ *wrong_invoke_type = false;
+ if (art_method == nullptr || !art_method->IsIntrinsic()) {
+ return false;
+ }
+
+ // TODO: b/65872996 The intent is that polymorphic signature methods should
+ // be compiler intrinsics. At present, they are only interpreter intrinsics.
+ if (art_method->IsPolymorphicSignature()) {
+ return false;
+ }
+
+ Intrinsics intrinsic = static_cast<Intrinsics>(art_method->GetIntrinsic());
+ if (CheckInvokeType(intrinsic, invoke) == false) {
+ *wrong_invoke_type = true;
+ return false;
+ }
+
+ invoke->SetIntrinsic(intrinsic,
+ NeedsEnvironmentOrCache(intrinsic),
+ GetSideEffects(intrinsic),
+ GetExceptions(intrinsic));
+ return true;
}
void IntrinsicsRecognizer::Run() {
@@ -151,23 +181,14 @@ void IntrinsicsRecognizer::Run() {
inst_it.Advance()) {
HInstruction* inst = inst_it.Current();
if (inst->IsInvoke()) {
- HInvoke* invoke = inst->AsInvoke();
- ArtMethod* art_method = invoke->GetResolvedMethod();
- if (art_method != nullptr && art_method->IsIntrinsic()) {
- Intrinsics intrinsic = static_cast<Intrinsics>(art_method->GetIntrinsic());
- if (!CheckInvokeType(intrinsic, invoke)) {
- LOG(WARNING) << "Found an intrinsic with unexpected invoke type: "
- << static_cast<uint32_t>(intrinsic) << " for "
- << art_method->PrettyMethod()
- << invoke->DebugName();
- } else {
- invoke->SetIntrinsic(intrinsic,
- NeedsEnvironmentOrCache(intrinsic),
- GetSideEffects(intrinsic),
- GetExceptions(intrinsic));
- MaybeRecordStat(stats_,
- MethodCompilationStat::kIntrinsicRecognized);
- }
+ bool wrong_invoke_type = false;
+ if (Recognize(inst->AsInvoke(), &wrong_invoke_type)) {
+ MaybeRecordStat(stats_, MethodCompilationStat::kIntrinsicRecognized);
+ } else if (wrong_invoke_type) {
+ LOG(WARNING)
+ << "Found an intrinsic with unexpected invoke type: "
+ << inst->AsInvoke()->GetResolvedMethod()->PrettyMethod() << " "
+ << inst->DebugName();
}
}
}
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 707ff3408e..c07a99032a 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -37,11 +37,19 @@ static constexpr uint64_t kNanDouble = 0x7ff8000000000000;
// Recognize intrinsics from HInvoke nodes.
class IntrinsicsRecognizer : public HOptimization {
public:
- IntrinsicsRecognizer(HGraph* graph, OptimizingCompilerStats* stats)
- : HOptimization(graph, kIntrinsicsRecognizerPassName, stats) {}
+ IntrinsicsRecognizer(HGraph* graph,
+ OptimizingCompilerStats* stats,
+ const char* name = kIntrinsicsRecognizerPassName)
+ : HOptimization(graph, name, stats) {}
void Run() OVERRIDE;
+ // Static helper that recognizes intrinsic call. Returns true on success.
+ // If it fails due to invoke type mismatch, wrong_invoke_type is set.
+ // Useful to recognize intrinsics on individual calls outside this full pass.
+ static bool Recognize(HInvoke* invoke, /*out*/ bool* wrong_invoke_type)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
static constexpr const char* kIntrinsicsRecognizerPassName = "intrinsics_recognition";
private:
diff --git a/compiler/optimizing/licm.h b/compiler/optimizing/licm.h
index bf56f53d46..ee567aeb20 100644
--- a/compiler/optimizing/licm.h
+++ b/compiler/optimizing/licm.h
@@ -26,8 +26,11 @@ class SideEffectsAnalysis;
class LICM : public HOptimization {
public:
- LICM(HGraph* graph, const SideEffectsAnalysis& side_effects, OptimizingCompilerStats* stats)
- : HOptimization(graph, kLoopInvariantCodeMotionPassName, stats),
+ LICM(HGraph* graph,
+ const SideEffectsAnalysis& side_effects,
+ OptimizingCompilerStats* stats,
+ const char* name = kLoopInvariantCodeMotionPassName)
+ : HOptimization(graph, name, stats),
side_effects_(side_effects) {}
void Run() OVERRIDE;
diff --git a/compiler/optimizing/load_store_analysis.h b/compiler/optimizing/load_store_analysis.h
index aa8b5bbdc9..437e6be418 100644
--- a/compiler/optimizing/load_store_analysis.h
+++ b/compiler/optimizing/load_store_analysis.h
@@ -564,8 +564,8 @@ class HeapLocationCollector : public HGraphVisitor {
class LoadStoreAnalysis : public HOptimization {
public:
- explicit LoadStoreAnalysis(HGraph* graph)
- : HOptimization(graph, kLoadStoreAnalysisPassName),
+ explicit LoadStoreAnalysis(HGraph* graph, const char* name = kLoadStoreAnalysisPassName)
+ : HOptimization(graph, name),
heap_location_collector_(graph) {}
const HeapLocationCollector& GetHeapLocationCollector() const {
diff --git a/compiler/optimizing/load_store_elimination.h b/compiler/optimizing/load_store_elimination.h
index 20a8a769c0..7153541baf 100644
--- a/compiler/optimizing/load_store_elimination.h
+++ b/compiler/optimizing/load_store_elimination.h
@@ -29,8 +29,9 @@ class LoadStoreElimination : public HOptimization {
LoadStoreElimination(HGraph* graph,
const SideEffectsAnalysis& side_effects,
const LoadStoreAnalysis& lsa,
- OptimizingCompilerStats* stats)
- : HOptimization(graph, kLoadStoreEliminationPassName, stats),
+ OptimizingCompilerStats* stats,
+ const char* name = kLoadStoreEliminationPassName)
+ : HOptimization(graph, name, stats),
side_effects_(side_effects),
lsa_(lsa) {}
diff --git a/compiler/optimizing/loop_optimization.cc b/compiler/optimizing/loop_optimization.cc
index fcc59ea3f9..1ca096035e 100644
--- a/compiler/optimizing/loop_optimization.cc
+++ b/compiler/optimizing/loop_optimization.cc
@@ -448,8 +448,9 @@ static bool CheckInductionSetFullyRemoved(ScopedArenaSet<HInstruction*>* iset) {
HLoopOptimization::HLoopOptimization(HGraph* graph,
CompilerDriver* compiler_driver,
HInductionVarAnalysis* induction_analysis,
- OptimizingCompilerStats* stats)
- : HOptimization(graph, kLoopOptimizationPassName, stats),
+ OptimizingCompilerStats* stats,
+ const char* name)
+ : HOptimization(graph, name, stats),
compiler_driver_(compiler_driver),
induction_range_(induction_analysis),
loop_allocator_(nullptr),
diff --git a/compiler/optimizing/loop_optimization.h b/compiler/optimizing/loop_optimization.h
index 51e0a986b8..a707ad1358 100644
--- a/compiler/optimizing/loop_optimization.h
+++ b/compiler/optimizing/loop_optimization.h
@@ -37,7 +37,8 @@ class HLoopOptimization : public HOptimization {
HLoopOptimization(HGraph* graph,
CompilerDriver* compiler_driver,
HInductionVarAnalysis* induction_analysis,
- OptimizingCompilerStats* stats);
+ OptimizingCompilerStats* stats,
+ const char* name = kLoopOptimizationPassName);
void Run() OVERRIDE;
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index fff61f5727..4a9da7ece1 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -845,6 +845,13 @@ static void UpdateInputsUsers(HInstruction* instruction) {
DCHECK(!instruction->HasEnvironment());
}
+void HBasicBlock::ReplaceAndRemovePhiWith(HPhi* initial, HPhi* replacement) {
+ DCHECK(initial->GetBlock() == this);
+ InsertPhiAfter(replacement, initial);
+ initial->ReplaceWith(replacement);
+ RemovePhi(initial);
+}
+
void HBasicBlock::ReplaceAndRemoveInstructionWith(HInstruction* initial,
HInstruction* replacement) {
DCHECK(initial->GetBlock() == this);
@@ -1396,6 +1403,14 @@ HConstant* HTypeConversion::TryStaticEvaluation() const {
if (GetInput()->IsIntConstant()) {
int32_t value = GetInput()->AsIntConstant()->GetValue();
switch (GetResultType()) {
+ case DataType::Type::kInt8:
+ return graph->GetIntConstant(static_cast<int8_t>(value), GetDexPc());
+ case DataType::Type::kUint8:
+ return graph->GetIntConstant(static_cast<uint8_t>(value), GetDexPc());
+ case DataType::Type::kInt16:
+ return graph->GetIntConstant(static_cast<int16_t>(value), GetDexPc());
+ case DataType::Type::kUint16:
+ return graph->GetIntConstant(static_cast<uint16_t>(value), GetDexPc());
case DataType::Type::kInt64:
return graph->GetLongConstant(static_cast<int64_t>(value), GetDexPc());
case DataType::Type::kFloat32:
@@ -1408,6 +1423,14 @@ HConstant* HTypeConversion::TryStaticEvaluation() const {
} else if (GetInput()->IsLongConstant()) {
int64_t value = GetInput()->AsLongConstant()->GetValue();
switch (GetResultType()) {
+ case DataType::Type::kInt8:
+ return graph->GetIntConstant(static_cast<int8_t>(value), GetDexPc());
+ case DataType::Type::kUint8:
+ return graph->GetIntConstant(static_cast<uint8_t>(value), GetDexPc());
+ case DataType::Type::kInt16:
+ return graph->GetIntConstant(static_cast<int16_t>(value), GetDexPc());
+ case DataType::Type::kUint16:
+ return graph->GetIntConstant(static_cast<uint16_t>(value), GetDexPc());
case DataType::Type::kInt32:
return graph->GetIntConstant(static_cast<int32_t>(value), GetDexPc());
case DataType::Type::kFloat32:
@@ -2907,6 +2930,28 @@ void HInstruction::RemoveEnvironmentUsers() {
env_uses_.clear();
}
+HInstruction* ReplaceInstrOrPhiByClone(HInstruction* instr) {
+ HInstruction* clone = instr->Clone(instr->GetBlock()->GetGraph()->GetAllocator());
+ HBasicBlock* block = instr->GetBlock();
+
+ if (instr->IsPhi()) {
+ HPhi* phi = instr->AsPhi();
+ DCHECK(!phi->HasEnvironment());
+ HPhi* phi_clone = clone->AsPhi();
+ block->ReplaceAndRemovePhiWith(phi, phi_clone);
+ } else {
+ block->ReplaceAndRemoveInstructionWith(instr, clone);
+ if (instr->HasEnvironment()) {
+ clone->CopyEnvironmentFrom(instr->GetEnvironment());
+ HLoopInformation* loop_info = block->GetLoopInformation();
+ if (instr->IsSuspendCheck() && loop_info != nullptr) {
+ loop_info->SetSuspendCheck(clone->AsSuspendCheck());
+ }
+ }
+ }
+ return clone;
+}
+
// Returns an instruction with the opposite Boolean value from 'cond'.
HInstruction* HGraph::InsertOppositeCondition(HInstruction* cond, HInstruction* cursor) {
ArenaAllocator* allocator = GetAllocator();
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 6672901781..66d5bfea32 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -1160,6 +1160,8 @@ class HBasicBlock : public ArenaObject<kArenaAllocBasicBlock> {
// Insert `instruction` before/after an existing instruction `cursor`.
void InsertInstructionBefore(HInstruction* instruction, HInstruction* cursor);
void InsertInstructionAfter(HInstruction* instruction, HInstruction* cursor);
+ // Replace phi `initial` with `replacement` within this block.
+ void ReplaceAndRemovePhiWith(HPhi* initial, HPhi* replacement);
// Replace instruction `initial` with `replacement` within this block.
void ReplaceAndRemoveInstructionWith(HInstruction* initial,
HInstruction* replacement);
@@ -1480,18 +1482,31 @@ FOR_EACH_INSTRUCTION(FORWARD_DECLARATION)
#undef FORWARD_DECLARATION
#define DECLARE_INSTRUCTION(type) \
+ private: \
+ H##type& operator=(const H##type&) = delete; \
+ public: \
InstructionKind GetKindInternal() const OVERRIDE { return k##type; } \
const char* DebugName() const OVERRIDE { return #type; } \
bool InstructionTypeEquals(const HInstruction* other) const OVERRIDE { \
return other->Is##type(); \
} \
+ HInstruction* Clone(ArenaAllocator* arena) const OVERRIDE { \
+ DCHECK(IsClonable()); \
+ return new (arena) H##type(*this->As##type()); \
+ } \
void Accept(HGraphVisitor* visitor) OVERRIDE
#define DECLARE_ABSTRACT_INSTRUCTION(type) \
+ private: \
+ H##type& operator=(const H##type&) = delete; \
+ public: \
bool Is##type() const { return As##type() != nullptr; } \
const H##type* As##type() const { return this; } \
H##type* As##type() { return this; }
+#define DEFAULT_COPY_CONSTRUCTOR(type) \
+ explicit H##type(const H##type& other) = default;
+
template <typename T>
class HUseListNode : public ArenaObject<kArenaAllocUseListNode>,
public IntrusiveForwardListNode<HUseListNode<T>> {
@@ -2182,6 +2197,25 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
FOR_EACH_ABSTRACT_INSTRUCTION(INSTRUCTION_TYPE_CHECK)
#undef INSTRUCTION_TYPE_CHECK
+ // Return a clone of the instruction if it is clonable (shallow copy by default, custom copy
+ // if a custom copy-constructor is provided for a particular type). If IsClonable() is false for
+ // the instruction then the behaviour of this function is undefined.
+ //
+ // Note: It is semantically valid to create a clone of the instruction only until
+ // prepare_for_register_allocator phase as lifetime, intervals and codegen info are not
+ // copied.
+ //
+ // Note: HEnvironment and some other fields are not copied and are set to default values, see
+ // 'explicit HInstruction(const HInstruction& other)' for details.
+ virtual HInstruction* Clone(ArenaAllocator* arena ATTRIBUTE_UNUSED) const {
+ LOG(FATAL) << "Cloning is not implemented for the instruction " <<
+ DebugName() << " " << GetId();
+ UNREACHABLE();
+ }
+
+ // Return whether instruction can be cloned (copied).
+ virtual bool IsClonable() const { return false; }
+
// Returns whether the instruction can be moved within the graph.
// TODO: this method is used by LICM and GVN with possibly different
// meanings? split and rename?
@@ -2298,6 +2332,30 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
packed_fields_ = BitFieldType::Update(value, packed_fields_);
}
+ // Copy construction for the instruction (used for Clone function).
+ //
+ // Fields (e.g. lifetime, intervals and codegen info) associated with phases starting from
+ // prepare_for_register_allocator are not copied (set to default values).
+ //
+ // Copy constructors must be provided for every HInstruction type; default copy constructor is
+ // fine for most of them. However for some of the instructions a custom copy constructor must be
+ // specified (when instruction has non-trivially copyable fields and must have a special behaviour
+ // for copying them).
+ explicit HInstruction(const HInstruction& other)
+ : previous_(nullptr),
+ next_(nullptr),
+ block_(nullptr),
+ dex_pc_(other.dex_pc_),
+ id_(-1),
+ ssa_index_(-1),
+ packed_fields_(other.packed_fields_),
+ environment_(nullptr),
+ locations_(nullptr),
+ live_interval_(nullptr),
+ lifetime_position_(kNoLifetime),
+ side_effects_(other.side_effects_),
+ reference_type_handle_(other.reference_type_handle_) {}
+
private:
void FixUpUserRecordsAfterUseInsertion(HUseList<HInstruction*>::iterator fixup_end) {
auto before_use_node = uses_.before_begin();
@@ -2387,8 +2445,6 @@ class HInstruction : public ArenaObject<kArenaAllocInstruction> {
friend class HEnvironment;
friend class HGraph;
friend class HInstructionList;
-
- DISALLOW_COPY_AND_ASSIGN(HInstruction);
};
std::ostream& operator<<(std::ostream& os, const HInstruction::InstructionKind& rhs);
@@ -2484,10 +2540,9 @@ class HVariableInputSizeInstruction : public HInstruction {
: HInstruction(side_effects, dex_pc),
inputs_(number_of_inputs, allocator->Adapter(kind)) {}
- ArenaVector<HUserRecord<HInstruction*>> inputs_;
+ DEFAULT_COPY_CONSTRUCTOR(VariableInputSizeInstruction);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVariableInputSizeInstruction);
+ ArenaVector<HUserRecord<HInstruction*>> inputs_;
};
template<size_t N>
@@ -2502,6 +2557,9 @@ class HTemplateInstruction: public HInstruction {
return ArrayRef<HUserRecord<HInstruction*>>(inputs_);
}
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(TemplateInstruction<N>);
+
private:
std::array<HUserRecord<HInstruction*>, N> inputs_;
@@ -2522,6 +2580,9 @@ class HTemplateInstruction<0>: public HInstruction {
return ArrayRef<HUserRecord<HInstruction*>>();
}
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(TemplateInstruction<0>);
+
private:
friend class SsaBuilder;
};
@@ -2547,6 +2608,7 @@ class HExpression : public HTemplateInstruction<N> {
static_assert(kNumberOfExpressionPackedBits <= HInstruction::kMaxNumberOfPackedBits,
"Too many packed fields.");
using TypeField = BitField<DataType::Type, kFieldType, kFieldTypeSize>;
+ DEFAULT_COPY_CONSTRUCTOR(Expression<N>);
};
// Represents dex's RETURN_VOID opcode. A HReturnVoid is a control flow
@@ -2560,8 +2622,8 @@ class HReturnVoid FINAL : public HTemplateInstruction<0> {
DECLARE_INSTRUCTION(ReturnVoid);
- private:
- DISALLOW_COPY_AND_ASSIGN(HReturnVoid);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(ReturnVoid);
};
// Represents dex's RETURN opcodes. A HReturn is a control flow
@@ -2577,8 +2639,8 @@ class HReturn FINAL : public HTemplateInstruction<1> {
DECLARE_INSTRUCTION(Return);
- private:
- DISALLOW_COPY_AND_ASSIGN(HReturn);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Return);
};
class HPhi FINAL : public HVariableInputSizeInstruction {
@@ -2604,6 +2666,8 @@ class HPhi FINAL : public HVariableInputSizeInstruction {
SetPackedFlag<kFlagCanBeNull>(true);
}
+ bool IsClonable() const OVERRIDE { return true; }
+
// Returns a type equivalent to the given `type`, but that a `HPhi` can hold.
static DataType::Type ToPhiType(DataType::Type type) {
return DataType::Kind(type);
@@ -2666,6 +2730,9 @@ class HPhi FINAL : public HVariableInputSizeInstruction {
DECLARE_INSTRUCTION(Phi);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Phi);
+
private:
static constexpr size_t kFieldType = HInstruction::kNumberOfGenericPackedBits;
static constexpr size_t kFieldTypeSize =
@@ -2677,8 +2744,6 @@ class HPhi FINAL : public HVariableInputSizeInstruction {
using TypeField = BitField<DataType::Type, kFieldType, kFieldTypeSize>;
const uint32_t reg_number_;
-
- DISALLOW_COPY_AND_ASSIGN(HPhi);
};
// The exit instruction is the only instruction of the exit block.
@@ -2692,8 +2757,8 @@ class HExit FINAL : public HTemplateInstruction<0> {
DECLARE_INSTRUCTION(Exit);
- private:
- DISALLOW_COPY_AND_ASSIGN(HExit);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Exit);
};
// Jumps from one block to another.
@@ -2701,6 +2766,7 @@ class HGoto FINAL : public HTemplateInstruction<0> {
public:
explicit HGoto(uint32_t dex_pc = kNoDexPc) : HTemplateInstruction(SideEffects::None(), dex_pc) {}
+ bool IsClonable() const OVERRIDE { return true; }
bool IsControlFlow() const OVERRIDE { return true; }
HBasicBlock* GetSuccessor() const {
@@ -2709,8 +2775,8 @@ class HGoto FINAL : public HTemplateInstruction<0> {
DECLARE_INSTRUCTION(Goto);
- private:
- DISALLOW_COPY_AND_ASSIGN(HGoto);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Goto);
};
class HConstant : public HExpression<0> {
@@ -2733,8 +2799,8 @@ class HConstant : public HExpression<0> {
DECLARE_ABSTRACT_INSTRUCTION(Constant);
- private:
- DISALLOW_COPY_AND_ASSIGN(HConstant);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Constant);
};
class HNullConstant FINAL : public HConstant {
@@ -2752,12 +2818,14 @@ class HNullConstant FINAL : public HConstant {
DECLARE_INSTRUCTION(NullConstant);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(NullConstant);
+
private:
explicit HNullConstant(uint32_t dex_pc = kNoDexPc)
: HConstant(DataType::Type::kReference, dex_pc) {}
friend class HGraph;
- DISALLOW_COPY_AND_ASSIGN(HNullConstant);
};
// Constants of the type int. Those can be from Dex instructions, or
@@ -2789,6 +2857,9 @@ class HIntConstant FINAL : public HConstant {
DECLARE_INSTRUCTION(IntConstant);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(IntConstant);
+
private:
explicit HIntConstant(int32_t value, uint32_t dex_pc = kNoDexPc)
: HConstant(DataType::Type::kInt32, dex_pc), value_(value) {}
@@ -2800,7 +2871,6 @@ class HIntConstant FINAL : public HConstant {
friend class HGraph;
ART_FRIEND_TEST(GraphTest, InsertInstructionBefore);
ART_FRIEND_TYPED_TEST(ParallelMoveTest, ConstantLast);
- DISALLOW_COPY_AND_ASSIGN(HIntConstant);
};
class HLongConstant FINAL : public HConstant {
@@ -2823,6 +2893,9 @@ class HLongConstant FINAL : public HConstant {
DECLARE_INSTRUCTION(LongConstant);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(LongConstant);
+
private:
explicit HLongConstant(int64_t value, uint32_t dex_pc = kNoDexPc)
: HConstant(DataType::Type::kInt64, dex_pc), value_(value) {}
@@ -2830,7 +2903,6 @@ class HLongConstant FINAL : public HConstant {
const int64_t value_;
friend class HGraph;
- DISALLOW_COPY_AND_ASSIGN(HLongConstant);
};
class HFloatConstant FINAL : public HConstant {
@@ -2872,6 +2944,9 @@ class HFloatConstant FINAL : public HConstant {
DECLARE_INSTRUCTION(FloatConstant);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(FloatConstant);
+
private:
explicit HFloatConstant(float value, uint32_t dex_pc = kNoDexPc)
: HConstant(DataType::Type::kFloat32, dex_pc), value_(value) {}
@@ -2883,7 +2958,6 @@ class HFloatConstant FINAL : public HConstant {
// Only the SsaBuilder and HGraph can create floating-point constants.
friend class SsaBuilder;
friend class HGraph;
- DISALLOW_COPY_AND_ASSIGN(HFloatConstant);
};
class HDoubleConstant FINAL : public HConstant {
@@ -2923,6 +2997,9 @@ class HDoubleConstant FINAL : public HConstant {
DECLARE_INSTRUCTION(DoubleConstant);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(DoubleConstant);
+
private:
explicit HDoubleConstant(double value, uint32_t dex_pc = kNoDexPc)
: HConstant(DataType::Type::kFloat64, dex_pc), value_(value) {}
@@ -2934,7 +3011,6 @@ class HDoubleConstant FINAL : public HConstant {
// Only the SsaBuilder and HGraph can create floating-point constants.
friend class SsaBuilder;
friend class HGraph;
- DISALLOW_COPY_AND_ASSIGN(HDoubleConstant);
};
// Conditional branch. A block ending with an HIf instruction must have
@@ -2946,6 +3022,7 @@ class HIf FINAL : public HTemplateInstruction<1> {
SetRawInputAt(0, input);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool IsControlFlow() const OVERRIDE { return true; }
HBasicBlock* IfTrueSuccessor() const {
@@ -2958,8 +3035,8 @@ class HIf FINAL : public HTemplateInstruction<1> {
DECLARE_INSTRUCTION(If);
- private:
- DISALLOW_COPY_AND_ASSIGN(HIf);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(If);
};
@@ -3012,6 +3089,9 @@ class HTryBoundary FINAL : public HTemplateInstruction<0> {
DECLARE_INSTRUCTION(TryBoundary);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(TryBoundary);
+
private:
static constexpr size_t kFieldBoundaryKind = kNumberOfGenericPackedBits;
static constexpr size_t kFieldBoundaryKindSize =
@@ -3021,8 +3101,6 @@ class HTryBoundary FINAL : public HTemplateInstruction<0> {
static_assert(kNumberOfTryBoundaryPackedBits <= kMaxNumberOfPackedBits,
"Too many packed fields.");
using BoundaryKindField = BitField<BoundaryKind, kFieldBoundaryKind, kFieldBoundaryKindSize>;
-
- DISALLOW_COPY_AND_ASSIGN(HTryBoundary);
};
// Deoptimize to interpreter, upon checking a condition.
@@ -3045,6 +3123,8 @@ class HDeoptimize FINAL : public HVariableInputSizeInstruction {
SetRawInputAt(0, cond);
}
+ bool IsClonable() const OVERRIDE { return true; }
+
// Use this constructor when the `HDeoptimize` guards an instruction, and any user
// that relies on the deoptimization to pass should have its input be the `HDeoptimize`
// instead of `guard`.
@@ -3098,6 +3178,9 @@ class HDeoptimize FINAL : public HVariableInputSizeInstruction {
DECLARE_INSTRUCTION(Deoptimize);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Deoptimize);
+
private:
static constexpr size_t kFieldCanBeMoved = kNumberOfGenericPackedBits;
static constexpr size_t kFieldDeoptimizeKind = kNumberOfGenericPackedBits + 1;
@@ -3109,8 +3192,6 @@ class HDeoptimize FINAL : public HVariableInputSizeInstruction {
"Too many packed fields.");
using DeoptimizeKindField =
BitField<DeoptimizationKind, kFieldDeoptimizeKind, kFieldDeoptimizeKindSize>;
-
- DISALLOW_COPY_AND_ASSIGN(HDeoptimize);
};
// Represents a should_deoptimize flag. Currently used for CHA-based devirtualization.
@@ -3136,8 +3217,8 @@ class HShouldDeoptimizeFlag FINAL : public HVariableInputSizeInstruction {
DECLARE_INSTRUCTION(ShouldDeoptimizeFlag);
- private:
- DISALLOW_COPY_AND_ASSIGN(HShouldDeoptimizeFlag);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(ShouldDeoptimizeFlag);
};
// Represents the ArtMethod that was passed as a first argument to
@@ -3150,8 +3231,8 @@ class HCurrentMethod FINAL : public HExpression<0> {
DECLARE_INSTRUCTION(CurrentMethod);
- private:
- DISALLOW_COPY_AND_ASSIGN(HCurrentMethod);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(CurrentMethod);
};
// Fetches an ArtMethod from the virtual table or the interface method table
@@ -3174,6 +3255,7 @@ class HClassTableGet FINAL : public HExpression<1> {
SetRawInputAt(0, cls);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
return other->AsClassTableGet()->GetIndex() == index_ &&
@@ -3185,6 +3267,9 @@ class HClassTableGet FINAL : public HExpression<1> {
DECLARE_INSTRUCTION(ClassTableGet);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(ClassTableGet);
+
private:
static constexpr size_t kFieldTableKind = kNumberOfExpressionPackedBits;
static constexpr size_t kFieldTableKindSize =
@@ -3196,8 +3281,6 @@ class HClassTableGet FINAL : public HExpression<1> {
// The index of the ArtMethod in the table.
const size_t index_;
-
- DISALLOW_COPY_AND_ASSIGN(HClassTableGet);
};
// PackedSwitch (jump table). A block ending with a PackedSwitch instruction will
@@ -3215,6 +3298,8 @@ class HPackedSwitch FINAL : public HTemplateInstruction<1> {
SetRawInputAt(0, input);
}
+ bool IsClonable() const OVERRIDE { return true; }
+
bool IsControlFlow() const OVERRIDE { return true; }
int32_t GetStartValue() const { return start_value_; }
@@ -3227,11 +3312,12 @@ class HPackedSwitch FINAL : public HTemplateInstruction<1> {
}
DECLARE_INSTRUCTION(PackedSwitch);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(PackedSwitch);
+
private:
const int32_t start_value_;
const uint32_t num_entries_;
-
- DISALLOW_COPY_AND_ASSIGN(HPackedSwitch);
};
class HUnaryOperation : public HExpression<1> {
@@ -3241,6 +3327,9 @@ class HUnaryOperation : public HExpression<1> {
SetRawInputAt(0, input);
}
+ // All of the UnaryOperation instructions are clonable.
+ bool IsClonable() const OVERRIDE { return true; }
+
HInstruction* GetInput() const { return InputAt(0); }
DataType::Type GetResultType() const { return GetType(); }
@@ -3262,8 +3351,8 @@ class HUnaryOperation : public HExpression<1> {
DECLARE_ABSTRACT_INSTRUCTION(UnaryOperation);
- private:
- DISALLOW_COPY_AND_ASSIGN(HUnaryOperation);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(UnaryOperation);
};
class HBinaryOperation : public HExpression<2> {
@@ -3278,6 +3367,9 @@ class HBinaryOperation : public HExpression<2> {
SetRawInputAt(1, right);
}
+ // All of the BinaryOperation instructions are clonable.
+ bool IsClonable() const OVERRIDE { return true; }
+
HInstruction* GetLeft() const { return InputAt(0); }
HInstruction* GetRight() const { return InputAt(1); }
DataType::Type GetResultType() const { return GetType(); }
@@ -3352,8 +3444,8 @@ class HBinaryOperation : public HExpression<2> {
DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation);
- private:
- DISALLOW_COPY_AND_ASSIGN(HBinaryOperation);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(BinaryOperation);
};
// The comparison bias applies for floating point operations and indicates how NaN
@@ -3443,8 +3535,7 @@ class HCondition : public HBinaryOperation {
return GetBlock()->GetGraph()->GetIntConstant(value, dex_pc);
}
- private:
- DISALLOW_COPY_AND_ASSIGN(HCondition);
+ DEFAULT_COPY_CONSTRUCTOR(Condition);
};
// Instruction to check if two inputs are equal to each other.
@@ -3486,10 +3577,11 @@ class HEqual FINAL : public HCondition {
return kCondNE;
}
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Equal);
+
private:
template <typename T> static bool Compute(T x, T y) { return x == y; }
-
- DISALLOW_COPY_AND_ASSIGN(HEqual);
};
class HNotEqual FINAL : public HCondition {
@@ -3529,10 +3621,11 @@ class HNotEqual FINAL : public HCondition {
return kCondEQ;
}
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(NotEqual);
+
private:
template <typename T> static bool Compute(T x, T y) { return x != y; }
-
- DISALLOW_COPY_AND_ASSIGN(HNotEqual);
};
class HLessThan FINAL : public HCondition {
@@ -3566,10 +3659,11 @@ class HLessThan FINAL : public HCondition {
return kCondGE;
}
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(LessThan);
+
private:
template <typename T> static bool Compute(T x, T y) { return x < y; }
-
- DISALLOW_COPY_AND_ASSIGN(HLessThan);
};
class HLessThanOrEqual FINAL : public HCondition {
@@ -3603,10 +3697,11 @@ class HLessThanOrEqual FINAL : public HCondition {
return kCondGT;
}
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(LessThanOrEqual);
+
private:
template <typename T> static bool Compute(T x, T y) { return x <= y; }
-
- DISALLOW_COPY_AND_ASSIGN(HLessThanOrEqual);
};
class HGreaterThan FINAL : public HCondition {
@@ -3640,10 +3735,11 @@ class HGreaterThan FINAL : public HCondition {
return kCondLE;
}
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(GreaterThan);
+
private:
template <typename T> static bool Compute(T x, T y) { return x > y; }
-
- DISALLOW_COPY_AND_ASSIGN(HGreaterThan);
};
class HGreaterThanOrEqual FINAL : public HCondition {
@@ -3677,10 +3773,11 @@ class HGreaterThanOrEqual FINAL : public HCondition {
return kCondLT;
}
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(GreaterThanOrEqual);
+
private:
template <typename T> static bool Compute(T x, T y) { return x >= y; }
-
- DISALLOW_COPY_AND_ASSIGN(HGreaterThanOrEqual);
};
class HBelow FINAL : public HCondition {
@@ -3715,12 +3812,13 @@ class HBelow FINAL : public HCondition {
return kCondAE;
}
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Below);
+
private:
template <typename T> static bool Compute(T x, T y) {
return MakeUnsigned(x) < MakeUnsigned(y);
}
-
- DISALLOW_COPY_AND_ASSIGN(HBelow);
};
class HBelowOrEqual FINAL : public HCondition {
@@ -3755,12 +3853,13 @@ class HBelowOrEqual FINAL : public HCondition {
return kCondA;
}
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(BelowOrEqual);
+
private:
template <typename T> static bool Compute(T x, T y) {
return MakeUnsigned(x) <= MakeUnsigned(y);
}
-
- DISALLOW_COPY_AND_ASSIGN(HBelowOrEqual);
};
class HAbove FINAL : public HCondition {
@@ -3795,12 +3894,13 @@ class HAbove FINAL : public HCondition {
return kCondBE;
}
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Above);
+
private:
template <typename T> static bool Compute(T x, T y) {
return MakeUnsigned(x) > MakeUnsigned(y);
}
-
- DISALLOW_COPY_AND_ASSIGN(HAbove);
};
class HAboveOrEqual FINAL : public HCondition {
@@ -3835,12 +3935,13 @@ class HAboveOrEqual FINAL : public HCondition {
return kCondB;
}
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(AboveOrEqual);
+
private:
template <typename T> static bool Compute(T x, T y) {
return MakeUnsigned(x) >= MakeUnsigned(y);
}
-
- DISALLOW_COPY_AND_ASSIGN(HAboveOrEqual);
};
// Instruction to check how two inputs compare to each other.
@@ -3930,8 +4031,7 @@ class HCompare FINAL : public HBinaryOperation {
return GetBlock()->GetGraph()->GetIntConstant(value, dex_pc);
}
- private:
- DISALLOW_COPY_AND_ASSIGN(HCompare);
+ DEFAULT_COPY_CONSTRUCTOR(Compare);
};
class HNewInstance FINAL : public HExpression<1> {
@@ -3950,6 +4050,8 @@ class HNewInstance FINAL : public HExpression<1> {
SetRawInputAt(0, cls);
}
+ bool IsClonable() const OVERRIDE { return true; }
+
dex::TypeIndex GetTypeIndex() const { return type_index_; }
const DexFile& GetDexFile() const { return dex_file_; }
@@ -3986,6 +4088,9 @@ class HNewInstance FINAL : public HExpression<1> {
DECLARE_INSTRUCTION(NewInstance);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(NewInstance);
+
private:
static constexpr size_t kFlagFinalizable = kNumberOfExpressionPackedBits;
static constexpr size_t kNumberOfNewInstancePackedBits = kFlagFinalizable + 1;
@@ -3995,8 +4100,6 @@ class HNewInstance FINAL : public HExpression<1> {
const dex::TypeIndex type_index_;
const DexFile& dex_file_;
QuickEntrypointEnum entrypoint_;
-
- DISALLOW_COPY_AND_ASSIGN(HNewInstance);
};
enum IntrinsicNeedsEnvironmentOrCache {
@@ -4114,6 +4217,8 @@ class HInvoke : public HVariableInputSizeInstruction {
SetPackedFlag<kFlagCanThrow>(true);
}
+ DEFAULT_COPY_CONSTRUCTOR(Invoke);
+
uint32_t number_of_arguments_;
ArtMethod* resolved_method_;
const uint32_t dex_method_index_;
@@ -4121,9 +4226,6 @@ class HInvoke : public HVariableInputSizeInstruction {
// A magic word holding optimizations for intrinsics. See intrinsics.h.
uint32_t intrinsic_optimizations_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(HInvoke);
};
class HInvokeUnresolved FINAL : public HInvoke {
@@ -4144,10 +4246,12 @@ class HInvokeUnresolved FINAL : public HInvoke {
invoke_type) {
}
+ bool IsClonable() const OVERRIDE { return true; }
+
DECLARE_INSTRUCTION(InvokeUnresolved);
- private:
- DISALLOW_COPY_AND_ASSIGN(HInvokeUnresolved);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(InvokeUnresolved);
};
class HInvokePolymorphic FINAL : public HInvoke {
@@ -4166,10 +4270,12 @@ class HInvokePolymorphic FINAL : public HInvoke {
nullptr,
kVirtual) {}
+ bool IsClonable() const OVERRIDE { return true; }
+
DECLARE_INSTRUCTION(InvokePolymorphic);
- private:
- DISALLOW_COPY_AND_ASSIGN(HInvokePolymorphic);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(InvokePolymorphic);
};
class HInvokeStaticOrDirect FINAL : public HInvoke {
@@ -4256,6 +4362,8 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
SetPackedField<ClinitCheckRequirementField>(clinit_check_requirement);
}
+ bool IsClonable() const OVERRIDE { return true; }
+
void SetDispatchInfo(const DispatchInfo& dispatch_info) {
bool had_current_method_input = HasCurrentMethodInput();
bool needs_current_method_input = NeedsCurrentMethodInput(dispatch_info.method_load_kind);
@@ -4401,6 +4509,9 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
DECLARE_INSTRUCTION(InvokeStaticOrDirect);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(InvokeStaticOrDirect);
+
private:
static constexpr size_t kFieldClinitCheckRequirement = kNumberOfInvokePackedBits;
static constexpr size_t kFieldClinitCheckRequirementSize =
@@ -4416,8 +4527,6 @@ class HInvokeStaticOrDirect FINAL : public HInvoke {
// Cached values of the resolved method, to avoid needing the mutator lock.
MethodReference target_method_;
DispatchInfo dispatch_info_;
-
- DISALLOW_COPY_AND_ASSIGN(HInvokeStaticOrDirect);
};
std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::MethodLoadKind rhs);
std::ostream& operator<<(std::ostream& os, HInvokeStaticOrDirect::ClinitCheckRequirement rhs);
@@ -4441,6 +4550,8 @@ class HInvokeVirtual FINAL : public HInvoke {
kVirtual),
vtable_index_(vtable_index) {}
+ bool IsClonable() const OVERRIDE { return true; }
+
bool CanBeNull() const OVERRIDE {
switch (GetIntrinsic()) {
case Intrinsics::kThreadCurrentThread:
@@ -4463,11 +4574,12 @@ class HInvokeVirtual FINAL : public HInvoke {
DECLARE_INSTRUCTION(InvokeVirtual);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(InvokeVirtual);
+
private:
// Cached value of the resolved method, to avoid needing the mutator lock.
const uint32_t vtable_index_;
-
- DISALLOW_COPY_AND_ASSIGN(HInvokeVirtual);
};
class HInvokeInterface FINAL : public HInvoke {
@@ -4489,6 +4601,8 @@ class HInvokeInterface FINAL : public HInvoke {
kInterface),
imt_index_(imt_index) {}
+ bool IsClonable() const OVERRIDE { return true; }
+
bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
// TODO: Add implicit null checks in intrinsics.
return (obj == InputAt(0)) && !GetLocations()->Intrinsified();
@@ -4504,11 +4618,12 @@ class HInvokeInterface FINAL : public HInvoke {
DECLARE_INSTRUCTION(InvokeInterface);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(InvokeInterface);
+
private:
// Cached value of the resolved method, to avoid needing the mutator lock.
const uint32_t imt_index_;
-
- DISALLOW_COPY_AND_ASSIGN(HInvokeInterface);
};
class HNeg FINAL : public HUnaryOperation {
@@ -4535,8 +4650,8 @@ class HNeg FINAL : public HUnaryOperation {
DECLARE_INSTRUCTION(Neg);
- private:
- DISALLOW_COPY_AND_ASSIGN(HNeg);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Neg);
};
class HNewArray FINAL : public HExpression<2> {
@@ -4547,6 +4662,8 @@ class HNewArray FINAL : public HExpression<2> {
SetRawInputAt(1, length);
}
+ bool IsClonable() const OVERRIDE { return true; }
+
// Calls runtime so needs an environment.
bool NeedsEnvironment() const OVERRIDE { return true; }
@@ -4566,8 +4683,8 @@ class HNewArray FINAL : public HExpression<2> {
DECLARE_INSTRUCTION(NewArray);
- private:
- DISALLOW_COPY_AND_ASSIGN(HNewArray);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(NewArray);
};
class HAdd FINAL : public HBinaryOperation {
@@ -4601,8 +4718,8 @@ class HAdd FINAL : public HBinaryOperation {
DECLARE_INSTRUCTION(Add);
- private:
- DISALLOW_COPY_AND_ASSIGN(HAdd);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Add);
};
class HSub FINAL : public HBinaryOperation {
@@ -4634,8 +4751,8 @@ class HSub FINAL : public HBinaryOperation {
DECLARE_INSTRUCTION(Sub);
- private:
- DISALLOW_COPY_AND_ASSIGN(HSub);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Sub);
};
class HMul FINAL : public HBinaryOperation {
@@ -4669,8 +4786,8 @@ class HMul FINAL : public HBinaryOperation {
DECLARE_INSTRUCTION(Mul);
- private:
- DISALLOW_COPY_AND_ASSIGN(HMul);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Mul);
};
class HDiv FINAL : public HBinaryOperation {
@@ -4716,8 +4833,8 @@ class HDiv FINAL : public HBinaryOperation {
DECLARE_INSTRUCTION(Div);
- private:
- DISALLOW_COPY_AND_ASSIGN(HDiv);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Div);
};
class HRem FINAL : public HBinaryOperation {
@@ -4763,8 +4880,8 @@ class HRem FINAL : public HBinaryOperation {
DECLARE_INSTRUCTION(Rem);
- private:
- DISALLOW_COPY_AND_ASSIGN(HRem);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Rem);
};
class HDivZeroCheck FINAL : public HExpression<1> {
@@ -4789,8 +4906,8 @@ class HDivZeroCheck FINAL : public HExpression<1> {
DECLARE_INSTRUCTION(DivZeroCheck);
- private:
- DISALLOW_COPY_AND_ASSIGN(HDivZeroCheck);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(DivZeroCheck);
};
class HShl FINAL : public HBinaryOperation {
@@ -4835,8 +4952,8 @@ class HShl FINAL : public HBinaryOperation {
DECLARE_INSTRUCTION(Shl);
- private:
- DISALLOW_COPY_AND_ASSIGN(HShl);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Shl);
};
class HShr FINAL : public HBinaryOperation {
@@ -4881,8 +4998,8 @@ class HShr FINAL : public HBinaryOperation {
DECLARE_INSTRUCTION(Shr);
- private:
- DISALLOW_COPY_AND_ASSIGN(HShr);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Shr);
};
class HUShr FINAL : public HBinaryOperation {
@@ -4929,8 +5046,8 @@ class HUShr FINAL : public HBinaryOperation {
DECLARE_INSTRUCTION(UShr);
- private:
- DISALLOW_COPY_AND_ASSIGN(HUShr);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(UShr);
};
class HAnd FINAL : public HBinaryOperation {
@@ -4966,8 +5083,8 @@ class HAnd FINAL : public HBinaryOperation {
DECLARE_INSTRUCTION(And);
- private:
- DISALLOW_COPY_AND_ASSIGN(HAnd);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(And);
};
class HOr FINAL : public HBinaryOperation {
@@ -5003,8 +5120,8 @@ class HOr FINAL : public HBinaryOperation {
DECLARE_INSTRUCTION(Or);
- private:
- DISALLOW_COPY_AND_ASSIGN(HOr);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Or);
};
class HXor FINAL : public HBinaryOperation {
@@ -5040,8 +5157,8 @@ class HXor FINAL : public HBinaryOperation {
DECLARE_INSTRUCTION(Xor);
- private:
- DISALLOW_COPY_AND_ASSIGN(HXor);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Xor);
};
class HRor FINAL : public HBinaryOperation {
@@ -5091,8 +5208,8 @@ class HRor FINAL : public HBinaryOperation {
DECLARE_INSTRUCTION(Ror);
- private:
- DISALLOW_COPY_AND_ASSIGN(HRor);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Ror);
};
// The value of a parameter in this method. Its location depends on
@@ -5122,6 +5239,9 @@ class HParameterValue FINAL : public HExpression<0> {
DECLARE_INSTRUCTION(ParameterValue);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(ParameterValue);
+
private:
// Whether or not the parameter value corresponds to 'this' argument.
static constexpr size_t kFlagIsThis = kNumberOfExpressionPackedBits;
@@ -5135,8 +5255,6 @@ class HParameterValue FINAL : public HExpression<0> {
// The index of this parameter in the parameters list. Must be less
// than HGraph::number_of_in_vregs_.
const uint8_t index_;
-
- DISALLOW_COPY_AND_ASSIGN(HParameterValue);
};
class HNot FINAL : public HUnaryOperation {
@@ -5168,8 +5286,8 @@ class HNot FINAL : public HUnaryOperation {
DECLARE_INSTRUCTION(Not);
- private:
- DISALLOW_COPY_AND_ASSIGN(HNot);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Not);
};
class HBooleanNot FINAL : public HUnaryOperation {
@@ -5205,8 +5323,8 @@ class HBooleanNot FINAL : public HUnaryOperation {
DECLARE_INSTRUCTION(BooleanNot);
- private:
- DISALLOW_COPY_AND_ASSIGN(HBooleanNot);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(BooleanNot);
};
class HTypeConversion FINAL : public HExpression<1> {
@@ -5234,8 +5352,8 @@ class HTypeConversion FINAL : public HExpression<1> {
DECLARE_INSTRUCTION(TypeConversion);
- private:
- DISALLOW_COPY_AND_ASSIGN(HTypeConversion);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(TypeConversion);
};
static constexpr uint32_t kNoRegNumber = -1;
@@ -5249,6 +5367,7 @@ class HNullCheck FINAL : public HExpression<1> {
SetRawInputAt(0, value);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
@@ -5260,11 +5379,10 @@ class HNullCheck FINAL : public HExpression<1> {
bool CanBeNull() const OVERRIDE { return false; }
-
DECLARE_INSTRUCTION(NullCheck);
- private:
- DISALLOW_COPY_AND_ASSIGN(HNullCheck);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(NullCheck);
};
// Embeds an ArtField and all the information required by the compiler. We cache
@@ -5326,6 +5444,7 @@ class HInstanceFieldGet FINAL : public HExpression<1> {
SetRawInputAt(0, value);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return !IsVolatile(); }
bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
@@ -5355,10 +5474,11 @@ class HInstanceFieldGet FINAL : public HExpression<1> {
DECLARE_INSTRUCTION(InstanceFieldGet);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(InstanceFieldGet);
+
private:
const FieldInfo field_info_;
-
- DISALLOW_COPY_AND_ASSIGN(HInstanceFieldGet);
};
class HInstanceFieldSet FINAL : public HTemplateInstruction<2> {
@@ -5386,6 +5506,8 @@ class HInstanceFieldSet FINAL : public HTemplateInstruction<2> {
SetRawInputAt(1, value);
}
+ bool IsClonable() const OVERRIDE { return true; }
+
bool CanDoImplicitNullCheckOn(HInstruction* obj) const OVERRIDE {
return (obj == InputAt(0)) && art::CanDoImplicitNullCheckOn(GetFieldOffset().Uint32Value());
}
@@ -5400,6 +5522,9 @@ class HInstanceFieldSet FINAL : public HTemplateInstruction<2> {
DECLARE_INSTRUCTION(InstanceFieldSet);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(InstanceFieldSet);
+
private:
static constexpr size_t kFlagValueCanBeNull = kNumberOfGenericPackedBits;
static constexpr size_t kNumberOfInstanceFieldSetPackedBits = kFlagValueCanBeNull + 1;
@@ -5407,8 +5532,6 @@ class HInstanceFieldSet FINAL : public HTemplateInstruction<2> {
"Too many packed fields.");
const FieldInfo field_info_;
-
- DISALLOW_COPY_AND_ASSIGN(HInstanceFieldSet);
};
class HArrayGet FINAL : public HExpression<2> {
@@ -5436,6 +5559,7 @@ class HArrayGet FINAL : public HExpression<2> {
SetRawInputAt(1, index);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
@@ -5485,6 +5609,9 @@ class HArrayGet FINAL : public HExpression<2> {
DECLARE_INSTRUCTION(ArrayGet);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(ArrayGet);
+
private:
// We treat a String as an array, creating the HArrayGet from String.charAt()
// intrinsic in the instruction simplifier. We can always determine whether
@@ -5495,8 +5622,6 @@ class HArrayGet FINAL : public HExpression<2> {
static constexpr size_t kNumberOfArrayGetPackedBits = kFlagIsStringCharAt + 1;
static_assert(kNumberOfArrayGetPackedBits <= HInstruction::kMaxNumberOfPackedBits,
"Too many packed fields.");
-
- DISALLOW_COPY_AND_ASSIGN(HArrayGet);
};
class HArraySet FINAL : public HTemplateInstruction<3> {
@@ -5530,6 +5655,8 @@ class HArraySet FINAL : public HTemplateInstruction<3> {
SetRawInputAt(2, value);
}
+ bool IsClonable() const OVERRIDE { return true; }
+
bool NeedsEnvironment() const OVERRIDE {
// We call a runtime method to throw ArrayStoreException.
return NeedsTypeCheck();
@@ -5595,6 +5722,9 @@ class HArraySet FINAL : public HTemplateInstruction<3> {
DECLARE_INSTRUCTION(ArraySet);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(ArraySet);
+
private:
static constexpr size_t kFieldExpectedComponentType = kNumberOfGenericPackedBits;
static constexpr size_t kFieldExpectedComponentTypeSize =
@@ -5610,8 +5740,6 @@ class HArraySet FINAL : public HTemplateInstruction<3> {
static_assert(kNumberOfArraySetPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
using ExpectedComponentTypeField =
BitField<DataType::Type, kFieldExpectedComponentType, kFieldExpectedComponentTypeSize>;
-
- DISALLOW_COPY_AND_ASSIGN(HArraySet);
};
class HArrayLength FINAL : public HExpression<1> {
@@ -5624,6 +5752,7 @@ class HArrayLength FINAL : public HExpression<1> {
SetRawInputAt(0, array);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
@@ -5636,6 +5765,9 @@ class HArrayLength FINAL : public HExpression<1> {
DECLARE_INSTRUCTION(ArrayLength);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(ArrayLength);
+
private:
// We treat a String as an array, creating the HArrayLength from String.length()
// or String.isEmpty() intrinsic in the instruction simplifier. We can always
@@ -5646,8 +5778,6 @@ class HArrayLength FINAL : public HExpression<1> {
static constexpr size_t kNumberOfArrayLengthPackedBits = kFlagIsStringLength + 1;
static_assert(kNumberOfArrayLengthPackedBits <= HInstruction::kMaxNumberOfPackedBits,
"Too many packed fields.");
-
- DISALLOW_COPY_AND_ASSIGN(HArrayLength);
};
class HBoundsCheck FINAL : public HExpression<2> {
@@ -5665,6 +5795,7 @@ class HBoundsCheck FINAL : public HExpression<2> {
SetRawInputAt(1, length);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
@@ -5680,10 +5811,11 @@ class HBoundsCheck FINAL : public HExpression<2> {
DECLARE_INSTRUCTION(BoundsCheck);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(BoundsCheck);
+
private:
static constexpr size_t kFlagIsStringCharAt = kNumberOfExpressionPackedBits;
-
- DISALLOW_COPY_AND_ASSIGN(HBoundsCheck);
};
class HSuspendCheck FINAL : public HTemplateInstruction<0> {
@@ -5691,6 +5823,8 @@ class HSuspendCheck FINAL : public HTemplateInstruction<0> {
explicit HSuspendCheck(uint32_t dex_pc = kNoDexPc)
: HTemplateInstruction(SideEffects::CanTriggerGC(), dex_pc), slow_path_(nullptr) {}
+ bool IsClonable() const OVERRIDE { return true; }
+
bool NeedsEnvironment() const OVERRIDE {
return true;
}
@@ -5700,12 +5834,13 @@ class HSuspendCheck FINAL : public HTemplateInstruction<0> {
DECLARE_INSTRUCTION(SuspendCheck);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(SuspendCheck);
+
private:
// Only used for code generation, in order to share the same slow path between back edges
// of a same loop.
SlowPathCode* slow_path_;
-
- DISALLOW_COPY_AND_ASSIGN(HSuspendCheck);
};
// Pseudo-instruction which provides the native debugger with mapping information.
@@ -5721,8 +5856,8 @@ class HNativeDebugInfo : public HTemplateInstruction<0> {
DECLARE_INSTRUCTION(NativeDebugInfo);
- private:
- DISALLOW_COPY_AND_ASSIGN(HNativeDebugInfo);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(NativeDebugInfo);
};
/**
@@ -5788,6 +5923,8 @@ class HLoadClass FINAL : public HInstruction {
SetPackedFlag<kFlagGenerateClInitCheck>(false);
}
+ bool IsClonable() const OVERRIDE { return true; }
+
void SetLoadKind(LoadKind load_kind);
LoadKind GetLoadKind() const {
@@ -5879,6 +6016,9 @@ class HLoadClass FINAL : public HInstruction {
DECLARE_INSTRUCTION(LoadClass);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(LoadClass);
+
private:
static constexpr size_t kFlagNeedsAccessCheck = kNumberOfGenericPackedBits;
static constexpr size_t kFlagIsInBootImage = kFlagNeedsAccessCheck + 1;
@@ -5918,8 +6058,6 @@ class HLoadClass FINAL : public HInstruction {
Handle<mirror::Class> klass_;
ReferenceTypeInfo loaded_class_rti_;
-
- DISALLOW_COPY_AND_ASSIGN(HLoadClass);
};
std::ostream& operator<<(std::ostream& os, HLoadClass::LoadKind rhs);
@@ -5977,6 +6115,8 @@ class HLoadString FINAL : public HInstruction {
SetPackedField<LoadKindField>(LoadKind::kRuntimeCall);
}
+ bool IsClonable() const OVERRIDE { return true; }
+
void SetLoadKind(LoadKind load_kind);
LoadKind GetLoadKind() const {
@@ -6043,6 +6183,9 @@ class HLoadString FINAL : public HInstruction {
DECLARE_INSTRUCTION(LoadString);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(LoadString);
+
private:
static constexpr size_t kFieldLoadKind = kNumberOfGenericPackedBits;
static constexpr size_t kFieldLoadKindSize =
@@ -6062,8 +6205,6 @@ class HLoadString FINAL : public HInstruction {
const DexFile& dex_file_;
Handle<mirror::String> string_;
-
- DISALLOW_COPY_AND_ASSIGN(HLoadString);
};
std::ostream& operator<<(std::ostream& os, HLoadString::LoadKind rhs);
@@ -6095,6 +6236,7 @@ class HClinitCheck FINAL : public HExpression<1> {
SetRawInputAt(0, constant);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
@@ -6114,8 +6256,9 @@ class HClinitCheck FINAL : public HExpression<1> {
DECLARE_INSTRUCTION(ClinitCheck);
- private:
- DISALLOW_COPY_AND_ASSIGN(HClinitCheck);
+
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(ClinitCheck);
};
class HStaticFieldGet FINAL : public HExpression<1> {
@@ -6141,6 +6284,7 @@ class HStaticFieldGet FINAL : public HExpression<1> {
}
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return !IsVolatile(); }
bool InstructionDataEquals(const HInstruction* other) const OVERRIDE {
@@ -6166,10 +6310,11 @@ class HStaticFieldGet FINAL : public HExpression<1> {
DECLARE_INSTRUCTION(StaticFieldGet);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(StaticFieldGet);
+
private:
const FieldInfo field_info_;
-
- DISALLOW_COPY_AND_ASSIGN(HStaticFieldGet);
};
class HStaticFieldSet FINAL : public HTemplateInstruction<2> {
@@ -6197,6 +6342,7 @@ class HStaticFieldSet FINAL : public HTemplateInstruction<2> {
SetRawInputAt(1, value);
}
+ bool IsClonable() const OVERRIDE { return true; }
const FieldInfo& GetFieldInfo() const { return field_info_; }
MemberOffset GetFieldOffset() const { return field_info_.GetFieldOffset(); }
DataType::Type GetFieldType() const { return field_info_.GetFieldType(); }
@@ -6208,6 +6354,9 @@ class HStaticFieldSet FINAL : public HTemplateInstruction<2> {
DECLARE_INSTRUCTION(StaticFieldSet);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(StaticFieldSet);
+
private:
static constexpr size_t kFlagValueCanBeNull = kNumberOfGenericPackedBits;
static constexpr size_t kNumberOfStaticFieldSetPackedBits = kFlagValueCanBeNull + 1;
@@ -6215,8 +6364,6 @@ class HStaticFieldSet FINAL : public HTemplateInstruction<2> {
"Too many packed fields.");
const FieldInfo field_info_;
-
- DISALLOW_COPY_AND_ASSIGN(HStaticFieldSet);
};
class HUnresolvedInstanceFieldGet FINAL : public HExpression<1> {
@@ -6230,6 +6377,7 @@ class HUnresolvedInstanceFieldGet FINAL : public HExpression<1> {
SetRawInputAt(0, obj);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool NeedsEnvironment() const OVERRIDE { return true; }
bool CanThrow() const OVERRIDE { return true; }
@@ -6238,10 +6386,11 @@ class HUnresolvedInstanceFieldGet FINAL : public HExpression<1> {
DECLARE_INSTRUCTION(UnresolvedInstanceFieldGet);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(UnresolvedInstanceFieldGet);
+
private:
const uint32_t field_index_;
-
- DISALLOW_COPY_AND_ASSIGN(HUnresolvedInstanceFieldGet);
};
class HUnresolvedInstanceFieldSet FINAL : public HTemplateInstruction<2> {
@@ -6259,6 +6408,7 @@ class HUnresolvedInstanceFieldSet FINAL : public HTemplateInstruction<2> {
SetRawInputAt(1, value);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool NeedsEnvironment() const OVERRIDE { return true; }
bool CanThrow() const OVERRIDE { return true; }
@@ -6267,6 +6417,9 @@ class HUnresolvedInstanceFieldSet FINAL : public HTemplateInstruction<2> {
DECLARE_INSTRUCTION(UnresolvedInstanceFieldSet);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(UnresolvedInstanceFieldSet);
+
private:
static constexpr size_t kFieldFieldType = HInstruction::kNumberOfGenericPackedBits;
static constexpr size_t kFieldFieldTypeSize =
@@ -6278,8 +6431,6 @@ class HUnresolvedInstanceFieldSet FINAL : public HTemplateInstruction<2> {
using FieldTypeField = BitField<DataType::Type, kFieldFieldType, kFieldFieldTypeSize>;
const uint32_t field_index_;
-
- DISALLOW_COPY_AND_ASSIGN(HUnresolvedInstanceFieldSet);
};
class HUnresolvedStaticFieldGet FINAL : public HExpression<0> {
@@ -6291,6 +6442,7 @@ class HUnresolvedStaticFieldGet FINAL : public HExpression<0> {
field_index_(field_index) {
}
+ bool IsClonable() const OVERRIDE { return true; }
bool NeedsEnvironment() const OVERRIDE { return true; }
bool CanThrow() const OVERRIDE { return true; }
@@ -6299,10 +6451,11 @@ class HUnresolvedStaticFieldGet FINAL : public HExpression<0> {
DECLARE_INSTRUCTION(UnresolvedStaticFieldGet);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(UnresolvedStaticFieldGet);
+
private:
const uint32_t field_index_;
-
- DISALLOW_COPY_AND_ASSIGN(HUnresolvedStaticFieldGet);
};
class HUnresolvedStaticFieldSet FINAL : public HTemplateInstruction<1> {
@@ -6318,6 +6471,7 @@ class HUnresolvedStaticFieldSet FINAL : public HTemplateInstruction<1> {
SetRawInputAt(0, value);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool NeedsEnvironment() const OVERRIDE { return true; }
bool CanThrow() const OVERRIDE { return true; }
@@ -6326,6 +6480,9 @@ class HUnresolvedStaticFieldSet FINAL : public HTemplateInstruction<1> {
DECLARE_INSTRUCTION(UnresolvedStaticFieldSet);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(UnresolvedStaticFieldSet);
+
private:
static constexpr size_t kFieldFieldType = HInstruction::kNumberOfGenericPackedBits;
static constexpr size_t kFieldFieldTypeSize =
@@ -6337,8 +6494,6 @@ class HUnresolvedStaticFieldSet FINAL : public HTemplateInstruction<1> {
using FieldTypeField = BitField<DataType::Type, kFieldFieldType, kFieldFieldTypeSize>;
const uint32_t field_index_;
-
- DISALLOW_COPY_AND_ASSIGN(HUnresolvedStaticFieldSet);
};
// Implement the move-exception DEX instruction.
@@ -6351,8 +6506,8 @@ class HLoadException FINAL : public HExpression<0> {
DECLARE_INSTRUCTION(LoadException);
- private:
- DISALLOW_COPY_AND_ASSIGN(HLoadException);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(LoadException);
};
// Implicit part of move-exception which clears thread-local exception storage.
@@ -6364,8 +6519,8 @@ class HClearException FINAL : public HTemplateInstruction<0> {
DECLARE_INSTRUCTION(ClearException);
- private:
- DISALLOW_COPY_AND_ASSIGN(HClearException);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(ClearException);
};
class HThrow FINAL : public HTemplateInstruction<1> {
@@ -6381,11 +6536,10 @@ class HThrow FINAL : public HTemplateInstruction<1> {
bool CanThrow() const OVERRIDE { return true; }
-
DECLARE_INSTRUCTION(Throw);
- private:
- DISALLOW_COPY_AND_ASSIGN(HThrow);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Throw);
};
/**
@@ -6420,6 +6574,7 @@ class HInstanceOf FINAL : public HExpression<2> {
SetRawInputAt(1, constant);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
@@ -6447,6 +6602,9 @@ class HInstanceOf FINAL : public HExpression<2> {
DECLARE_INSTRUCTION(InstanceOf);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(InstanceOf);
+
private:
static constexpr size_t kFieldTypeCheckKind = kNumberOfExpressionPackedBits;
static constexpr size_t kFieldTypeCheckKindSize =
@@ -6455,8 +6613,6 @@ class HInstanceOf FINAL : public HExpression<2> {
static constexpr size_t kNumberOfInstanceOfPackedBits = kFlagMustDoNullCheck + 1;
static_assert(kNumberOfInstanceOfPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
using TypeCheckKindField = BitField<TypeCheckKind, kFieldTypeCheckKind, kFieldTypeCheckKindSize>;
-
- DISALLOW_COPY_AND_ASSIGN(HInstanceOf);
};
class HBoundType FINAL : public HExpression<1> {
@@ -6470,6 +6626,8 @@ class HBoundType FINAL : public HExpression<1> {
SetRawInputAt(0, input);
}
+ bool IsClonable() const OVERRIDE { return true; }
+
// {Get,Set}Upper* should only be used in reference type propagation.
const ReferenceTypeInfo& GetUpperBound() const { return upper_bound_; }
bool GetUpperCanBeNull() const { return GetPackedFlag<kFlagUpperCanBeNull>(); }
@@ -6484,6 +6642,9 @@ class HBoundType FINAL : public HExpression<1> {
DECLARE_INSTRUCTION(BoundType);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(BoundType);
+
private:
// Represents the top constraint that can_be_null_ cannot exceed (i.e. if this
// is false then CanBeNull() cannot be true).
@@ -6499,8 +6660,6 @@ class HBoundType FINAL : public HExpression<1> {
// // uper_bound_ will be ClassX
// }
ReferenceTypeInfo upper_bound_;
-
- DISALLOW_COPY_AND_ASSIGN(HBoundType);
};
class HCheckCast FINAL : public HTemplateInstruction<2> {
@@ -6516,6 +6675,7 @@ class HCheckCast FINAL : public HTemplateInstruction<2> {
SetRawInputAt(1, constant);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
@@ -6536,6 +6696,9 @@ class HCheckCast FINAL : public HTemplateInstruction<2> {
DECLARE_INSTRUCTION(CheckCast);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(CheckCast);
+
private:
static constexpr size_t kFieldTypeCheckKind = kNumberOfGenericPackedBits;
static constexpr size_t kFieldTypeCheckKindSize =
@@ -6544,8 +6707,6 @@ class HCheckCast FINAL : public HTemplateInstruction<2> {
static constexpr size_t kNumberOfCheckCastPackedBits = kFlagMustDoNullCheck + 1;
static_assert(kNumberOfCheckCastPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
using TypeCheckKindField = BitField<TypeCheckKind, kFieldTypeCheckKind, kFieldTypeCheckKindSize>;
-
- DISALLOW_COPY_AND_ASSIGN(HCheckCast);
};
/**
@@ -6582,10 +6743,15 @@ class HMemoryBarrier FINAL : public HTemplateInstruction<0> {
SetPackedField<BarrierKindField>(barrier_kind);
}
+ bool IsClonable() const OVERRIDE { return true; }
+
MemBarrierKind GetBarrierKind() { return GetPackedField<BarrierKindField>(); }
DECLARE_INSTRUCTION(MemoryBarrier);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(MemoryBarrier);
+
private:
static constexpr size_t kFieldBarrierKind = HInstruction::kNumberOfGenericPackedBits;
static constexpr size_t kFieldBarrierKindSize =
@@ -6595,8 +6761,6 @@ class HMemoryBarrier FINAL : public HTemplateInstruction<0> {
static_assert(kNumberOfMemoryBarrierPackedBits <= kMaxNumberOfPackedBits,
"Too many packed fields.");
using BarrierKindField = BitField<MemBarrierKind, kFieldBarrierKind, kFieldBarrierKindSize>;
-
- DISALLOW_COPY_AND_ASSIGN(HMemoryBarrier);
};
// A constructor fence orders all prior stores to fields that could be accessed via a final field of
@@ -6747,8 +6911,8 @@ class HConstructorFence FINAL : public HVariableInputSizeInstruction {
DECLARE_INSTRUCTION(ConstructorFence);
- private:
- DISALLOW_COPY_AND_ASSIGN(HConstructorFence);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(ConstructorFence);
};
class HMonitorOperation FINAL : public HTemplateInstruction<1> {
@@ -6782,6 +6946,9 @@ class HMonitorOperation FINAL : public HTemplateInstruction<1> {
DECLARE_INSTRUCTION(MonitorOperation);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(MonitorOperation);
+
private:
static constexpr size_t kFieldOperationKind = HInstruction::kNumberOfGenericPackedBits;
static constexpr size_t kFieldOperationKindSize =
@@ -6791,9 +6958,6 @@ class HMonitorOperation FINAL : public HTemplateInstruction<1> {
static_assert(kNumberOfMonitorOperationPackedBits <= HInstruction::kMaxNumberOfPackedBits,
"Too many packed fields.");
using OperationKindField = BitField<OperationKind, kFieldOperationKind, kFieldOperationKindSize>;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(HMonitorOperation);
};
class HSelect FINAL : public HExpression<3> {
@@ -6814,6 +6978,7 @@ class HSelect FINAL : public HExpression<3> {
SetRawInputAt(2, condition);
}
+ bool IsClonable() const OVERRIDE { return true; }
HInstruction* GetFalseValue() const { return InputAt(0); }
HInstruction* GetTrueValue() const { return InputAt(1); }
HInstruction* GetCondition() const { return InputAt(2); }
@@ -6829,8 +6994,8 @@ class HSelect FINAL : public HExpression<3> {
DECLARE_INSTRUCTION(Select);
- private:
- DISALLOW_COPY_AND_ASSIGN(HSelect);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(Select);
};
class MoveOperands : public ArenaObject<kArenaAllocMoveOperands> {
@@ -6961,10 +7126,11 @@ class HParallelMove FINAL : public HTemplateInstruction<0> {
DECLARE_INSTRUCTION(ParallelMove);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(ParallelMove);
+
private:
ArenaVector<MoveOperands> moves_;
-
- DISALLOW_COPY_AND_ASSIGN(HParallelMove);
};
// This instruction computes an intermediate address pointing in the 'middle' of an object. The
@@ -6983,6 +7149,7 @@ class HIntermediateAddress FINAL : public HExpression<2> {
SetRawInputAt(1, offset);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
@@ -6994,8 +7161,8 @@ class HIntermediateAddress FINAL : public HExpression<2> {
DECLARE_INSTRUCTION(IntermediateAddress);
- private:
- DISALLOW_COPY_AND_ASSIGN(HIntermediateAddress);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(IntermediateAddress);
};
@@ -7070,6 +7237,33 @@ class HGraphDelegateVisitor : public HGraphVisitor {
DISALLOW_COPY_AND_ASSIGN(HGraphDelegateVisitor);
};
+// Create a clone of the instruction, insert it into the graph; replace the old one with a new
+// and remove the old instruction.
+HInstruction* ReplaceInstrOrPhiByClone(HInstruction* instr);
+
+// Create a clone for each clonable instructions/phis and replace the original with the clone.
+//
+// Used for testing individual instruction cloner.
+class CloneAndReplaceInstructionVisitor : public HGraphDelegateVisitor {
+ public:
+ explicit CloneAndReplaceInstructionVisitor(HGraph* graph)
+ : HGraphDelegateVisitor(graph), instr_replaced_by_clones_count(0) {}
+
+ void VisitInstruction(HInstruction* instruction) OVERRIDE {
+ if (instruction->IsClonable()) {
+ ReplaceInstrOrPhiByClone(instruction);
+ instr_replaced_by_clones_count++;
+ }
+ }
+
+ size_t GetInstrReplacedByClonesCount() const { return instr_replaced_by_clones_count; }
+
+ private:
+ size_t instr_replaced_by_clones_count;
+
+ DISALLOW_COPY_AND_ASSIGN(CloneAndReplaceInstructionVisitor);
+};
+
// Iterator over the blocks that art part of the loop. Includes blocks part
// of an inner loop. The order in which the blocks are iterated is on their
// block id.
diff --git a/compiler/optimizing/nodes_mips.h b/compiler/optimizing/nodes_mips.h
index ef388c30d5..2c0595e3d8 100644
--- a/compiler/optimizing/nodes_mips.h
+++ b/compiler/optimizing/nodes_mips.h
@@ -30,8 +30,8 @@ class HMipsComputeBaseMethodAddress : public HExpression<0> {
DECLARE_INSTRUCTION(MipsComputeBaseMethodAddress);
- private:
- DISALLOW_COPY_AND_ASSIGN(HMipsComputeBaseMethodAddress);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(MipsComputeBaseMethodAddress);
};
// Mips version of HPackedSwitch that holds a pointer to the base method address.
@@ -62,11 +62,12 @@ class HMipsPackedSwitch FINAL : public HTemplateInstruction<2> {
DECLARE_INSTRUCTION(MipsPackedSwitch);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(MipsPackedSwitch);
+
private:
const int32_t start_value_;
const int32_t num_entries_;
-
- DISALLOW_COPY_AND_ASSIGN(HMipsPackedSwitch);
};
// This instruction computes part of the array access offset (index offset).
@@ -105,8 +106,8 @@ class HIntermediateArrayAddressIndex FINAL : public HExpression<2> {
DECLARE_INSTRUCTION(IntermediateArrayAddressIndex);
- private:
- DISALLOW_COPY_AND_ASSIGN(HIntermediateArrayAddressIndex);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(IntermediateArrayAddressIndex);
};
} // namespace art
diff --git a/compiler/optimizing/nodes_shared.h b/compiler/optimizing/nodes_shared.h
index 7b4f5f7cbb..e837f1e7e0 100644
--- a/compiler/optimizing/nodes_shared.h
+++ b/compiler/optimizing/nodes_shared.h
@@ -38,6 +38,8 @@ class HMultiplyAccumulate FINAL : public HExpression<3> {
SetRawInputAt(kInputMulRightIndex, mul_right);
}
+ bool IsClonable() const OVERRIDE { return true; }
+
static constexpr int kInputAccumulatorIndex = 0;
static constexpr int kInputMulLeftIndex = 1;
static constexpr int kInputMulRightIndex = 2;
@@ -51,11 +53,12 @@ class HMultiplyAccumulate FINAL : public HExpression<3> {
DECLARE_INSTRUCTION(MultiplyAccumulate);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(MultiplyAccumulate);
+
private:
// Indicates if this is a MADD or MSUB.
const InstructionKind op_kind_;
-
- DISALLOW_COPY_AND_ASSIGN(HMultiplyAccumulate);
};
class HBitwiseNegatedRight FINAL : public HBinaryOperation {
@@ -111,11 +114,12 @@ class HBitwiseNegatedRight FINAL : public HBinaryOperation {
DECLARE_INSTRUCTION(BitwiseNegatedRight);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(BitwiseNegatedRight);
+
private:
// Specifies the bitwise operation, which will be then negated.
const InstructionKind op_kind_;
-
- DISALLOW_COPY_AND_ASSIGN(HBitwiseNegatedRight);
};
// This instruction computes part of the array access offset (data and index offset).
@@ -145,6 +149,7 @@ class HIntermediateAddressIndex FINAL : public HExpression<3> {
SetRawInputAt(2, shift);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(const HInstruction* other ATTRIBUTE_UNUSED) const OVERRIDE {
return true;
@@ -157,8 +162,8 @@ class HIntermediateAddressIndex FINAL : public HExpression<3> {
DECLARE_INSTRUCTION(IntermediateAddressIndex);
- private:
- DISALLOW_COPY_AND_ASSIGN(HIntermediateAddressIndex);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(IntermediateAddressIndex);
};
class HDataProcWithShifterOp FINAL : public HExpression<2> {
@@ -198,6 +203,7 @@ class HDataProcWithShifterOp FINAL : public HExpression<2> {
SetRawInputAt(1, right);
}
+ bool IsClonable() const OVERRIDE { return true; }
bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(const HInstruction* other_instr) const OVERRIDE {
const HDataProcWithShifterOp* other = other_instr->AsDataProcWithShifterOp();
@@ -225,14 +231,15 @@ class HDataProcWithShifterOp FINAL : public HExpression<2> {
DECLARE_INSTRUCTION(DataProcWithShifterOp);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(DataProcWithShifterOp);
+
private:
InstructionKind instr_kind_;
OpKind op_kind_;
int shift_amount_;
friend std::ostream& operator<<(std::ostream& os, OpKind op);
-
- DISALLOW_COPY_AND_ASSIGN(HDataProcWithShifterOp);
};
std::ostream& operator<<(std::ostream& os, const HDataProcWithShifterOp::OpKind op);
diff --git a/compiler/optimizing/nodes_vector.h b/compiler/optimizing/nodes_vector.h
index 17540b9770..59d5b9f847 100644
--- a/compiler/optimizing/nodes_vector.h
+++ b/compiler/optimizing/nodes_vector.h
@@ -161,10 +161,10 @@ class HVecOperation : public HVariableInputSizeInstruction {
static_assert(kNumberOfVectorOpPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
using TypeField = BitField<DataType::Type, kFieldType, kFieldTypeSize>;
+ DEFAULT_COPY_CONSTRUCTOR(VecOperation);
+
private:
const size_t vector_length_;
-
- DISALLOW_COPY_AND_ASSIGN(HVecOperation);
};
// Abstraction of a unary vector operation.
@@ -188,8 +188,8 @@ class HVecUnaryOperation : public HVecOperation {
DECLARE_ABSTRACT_INSTRUCTION(VecUnaryOperation);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecUnaryOperation);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecUnaryOperation);
};
// Abstraction of a binary vector operation.
@@ -216,8 +216,8 @@ class HVecBinaryOperation : public HVecOperation {
DECLARE_ABSTRACT_INSTRUCTION(VecBinaryOperation);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecBinaryOperation);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecBinaryOperation);
};
// Abstraction of a vector operation that references memory, with an alignment.
@@ -255,10 +255,11 @@ class HVecMemoryOperation : public HVecOperation {
DECLARE_ABSTRACT_INSTRUCTION(VecMemoryOperation);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecMemoryOperation);
+
private:
Alignment alignment_;
-
- DISALLOW_COPY_AND_ASSIGN(HVecMemoryOperation);
};
// Packed type consistency checker ("same vector length" integral types may mix freely).
@@ -296,8 +297,8 @@ class HVecReplicateScalar FINAL : public HVecUnaryOperation {
DECLARE_INSTRUCTION(VecReplicateScalar);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecReplicateScalar);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecReplicateScalar);
};
// Extracts a particular scalar from the given vector,
@@ -329,8 +330,8 @@ class HVecExtractScalar FINAL : public HVecUnaryOperation {
DECLARE_INSTRUCTION(VecExtractScalar);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecExtractScalar);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecExtractScalar);
};
// Reduces the given vector into the first element as sum/min/max,
@@ -367,10 +368,11 @@ class HVecReduce FINAL : public HVecUnaryOperation {
DECLARE_INSTRUCTION(VecReduce);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecReduce);
+
private:
const ReductionKind kind_;
-
- DISALLOW_COPY_AND_ASSIGN(HVecReduce);
};
// Converts every component in the vector,
@@ -394,8 +396,8 @@ class HVecCnv FINAL : public HVecUnaryOperation {
DECLARE_INSTRUCTION(VecCnv);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecCnv);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecCnv);
};
// Negates every component in the vector,
@@ -415,8 +417,8 @@ class HVecNeg FINAL : public HVecUnaryOperation {
DECLARE_INSTRUCTION(VecNeg);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecNeg);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecNeg);
};
// Takes absolute value of every component in the vector,
@@ -437,8 +439,8 @@ class HVecAbs FINAL : public HVecUnaryOperation {
DECLARE_INSTRUCTION(VecAbs);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecAbs);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecAbs);
};
// Bitwise- or boolean-nots every component in the vector,
@@ -459,8 +461,8 @@ class HVecNot FINAL : public HVecUnaryOperation {
DECLARE_INSTRUCTION(VecNot);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecNot);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecNot);
};
//
@@ -486,8 +488,8 @@ class HVecAdd FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecAdd);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecAdd);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecAdd);
};
// Performs halving add on every component in the two vectors, viz.
@@ -531,14 +533,15 @@ class HVecHalvingAdd FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecHalvingAdd);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecHalvingAdd);
+
private:
// Additional packed bits.
static constexpr size_t kFieldHAddIsUnsigned = HVecOperation::kNumberOfVectorOpPackedBits;
static constexpr size_t kFieldHAddIsRounded = kFieldHAddIsUnsigned + 1;
static constexpr size_t kNumberOfHAddPackedBits = kFieldHAddIsRounded + 1;
static_assert(kNumberOfHAddPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
-
- DISALLOW_COPY_AND_ASSIGN(HVecHalvingAdd);
};
// Subtracts every component in the two vectors,
@@ -560,8 +563,8 @@ class HVecSub FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecSub);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecSub);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecSub);
};
// Multiplies every component in the two vectors,
@@ -583,8 +586,8 @@ class HVecMul FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecMul);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecMul);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecMul);
};
// Divides every component in the two vectors,
@@ -606,8 +609,8 @@ class HVecDiv FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecDiv);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecDiv);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecDiv);
};
// Takes minimum of every component in the two vectors,
@@ -645,13 +648,14 @@ class HVecMin FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecMin);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecMin);
+
private:
// Additional packed bits.
static constexpr size_t kFieldMinOpIsUnsigned = HVecOperation::kNumberOfVectorOpPackedBits;
static constexpr size_t kNumberOfMinOpPackedBits = kFieldMinOpIsUnsigned + 1;
static_assert(kNumberOfMinOpPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
-
- DISALLOW_COPY_AND_ASSIGN(HVecMin);
};
// Takes maximum of every component in the two vectors,
@@ -689,13 +693,14 @@ class HVecMax FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecMax);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecMax);
+
private:
// Additional packed bits.
static constexpr size_t kFieldMaxOpIsUnsigned = HVecOperation::kNumberOfVectorOpPackedBits;
static constexpr size_t kNumberOfMaxOpPackedBits = kFieldMaxOpIsUnsigned + 1;
static_assert(kNumberOfMaxOpPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
-
- DISALLOW_COPY_AND_ASSIGN(HVecMax);
};
// Bitwise-ands every component in the two vectors,
@@ -716,8 +721,8 @@ class HVecAnd FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecAnd);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecAnd);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecAnd);
};
// Bitwise-and-nots every component in the two vectors,
@@ -738,8 +743,8 @@ class HVecAndNot FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecAndNot);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecAndNot);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecAndNot);
};
// Bitwise-ors every component in the two vectors,
@@ -760,8 +765,8 @@ class HVecOr FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecOr);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecOr);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecOr);
};
// Bitwise-xors every component in the two vectors,
@@ -782,8 +787,8 @@ class HVecXor FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecXor);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecXor);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecXor);
};
// Logically shifts every component in the vector left by the given distance,
@@ -804,8 +809,8 @@ class HVecShl FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecShl);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecShl);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecShl);
};
// Arithmetically shifts every component in the vector right by the given distance,
@@ -826,8 +831,8 @@ class HVecShr FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecShr);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecShr);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecShr);
};
// Logically shifts every component in the vector right by the given distance,
@@ -848,8 +853,8 @@ class HVecUShr FINAL : public HVecBinaryOperation {
DECLARE_INSTRUCTION(VecUShr);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecUShr);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecUShr);
};
//
@@ -885,8 +890,8 @@ class HVecSetScalars FINAL : public HVecOperation {
DECLARE_INSTRUCTION(VecSetScalars);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecSetScalars);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecSetScalars);
};
// Multiplies every component in the two vectors, adds the result vector to the accumulator vector,
@@ -929,11 +934,12 @@ class HVecMultiplyAccumulate FINAL : public HVecOperation {
DECLARE_INSTRUCTION(VecMultiplyAccumulate);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecMultiplyAccumulate);
+
private:
// Indicates if this is a MADD or MSUB.
const InstructionKind op_kind_;
-
- DISALLOW_COPY_AND_ASSIGN(HVecMultiplyAccumulate);
};
// Takes the absolute difference of two vectors, and adds the results to
@@ -968,8 +974,8 @@ class HVecSADAccumulate FINAL : public HVecOperation {
DECLARE_INSTRUCTION(VecSADAccumulate);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecSADAccumulate);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecSADAccumulate);
};
// Loads a vector from memory, viz. load(mem, 1)
@@ -1007,13 +1013,14 @@ class HVecLoad FINAL : public HVecMemoryOperation {
DECLARE_INSTRUCTION(VecLoad);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecLoad);
+
private:
// Additional packed bits.
static constexpr size_t kFieldIsStringCharAt = HVecOperation::kNumberOfVectorOpPackedBits;
static constexpr size_t kNumberOfVecLoadPackedBits = kFieldIsStringCharAt + 1;
static_assert(kNumberOfVecLoadPackedBits <= kMaxNumberOfPackedBits, "Too many packed fields.");
-
- DISALLOW_COPY_AND_ASSIGN(HVecLoad);
};
// Stores a vector to memory, viz. store(m, 1, [x1, .. , xn] )
@@ -1045,8 +1052,8 @@ class HVecStore FINAL : public HVecMemoryOperation {
DECLARE_INSTRUCTION(VecStore);
- private:
- DISALLOW_COPY_AND_ASSIGN(HVecStore);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(VecStore)
};
} // namespace art
diff --git a/compiler/optimizing/nodes_x86.h b/compiler/optimizing/nodes_x86.h
index 22e92eab31..6326065fe2 100644
--- a/compiler/optimizing/nodes_x86.h
+++ b/compiler/optimizing/nodes_x86.h
@@ -30,8 +30,8 @@ class HX86ComputeBaseMethodAddress FINAL : public HExpression<0> {
DECLARE_INSTRUCTION(X86ComputeBaseMethodAddress);
- private:
- DISALLOW_COPY_AND_ASSIGN(HX86ComputeBaseMethodAddress);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(X86ComputeBaseMethodAddress);
};
// Load a constant value from the constant table.
@@ -54,8 +54,8 @@ class HX86LoadFromConstantTable FINAL : public HExpression<2> {
DECLARE_INSTRUCTION(X86LoadFromConstantTable);
- private:
- DISALLOW_COPY_AND_ASSIGN(HX86LoadFromConstantTable);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(X86LoadFromConstantTable);
};
// Version of HNeg with access to the constant table for FP types.
@@ -77,8 +77,8 @@ class HX86FPNeg FINAL : public HExpression<2> {
DECLARE_INSTRUCTION(X86FPNeg);
- private:
- DISALLOW_COPY_AND_ASSIGN(HX86FPNeg);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(X86FPNeg);
};
// X86 version of HPackedSwitch that holds a pointer to the base method address.
@@ -113,11 +113,12 @@ class HX86PackedSwitch FINAL : public HTemplateInstruction<2> {
DECLARE_INSTRUCTION(X86PackedSwitch);
+ protected:
+ DEFAULT_COPY_CONSTRUCTOR(X86PackedSwitch);
+
private:
const int32_t start_value_;
const int32_t num_entries_;
-
- DISALLOW_COPY_AND_ASSIGN(HX86PackedSwitch);
};
} // namespace art
diff --git a/compiler/optimizing/optimization.cc b/compiler/optimizing/optimization.cc
index 1e68ca2802..7edb642c5b 100644
--- a/compiler/optimizing/optimization.cc
+++ b/compiler/optimizing/optimization.cc
@@ -16,5 +16,317 @@
#include "optimization.h"
+#ifdef ART_ENABLE_CODEGEN_arm
+#include "instruction_simplifier_arm.h"
+#endif
+#ifdef ART_ENABLE_CODEGEN_arm64
+#include "instruction_simplifier_arm64.h"
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips
+#include "instruction_simplifier_mips.h"
+#include "pc_relative_fixups_mips.h"
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86
+#include "pc_relative_fixups_x86.h"
+#endif
+#if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
+#include "x86_memory_gen.h"
+#endif
+
+#include "bounds_check_elimination.h"
+#include "cha_guard_optimization.h"
+#include "code_sinking.h"
+#include "constant_folding.h"
+#include "constructor_fence_redundancy_elimination.h"
+#include "dead_code_elimination.h"
+#include "driver/dex_compilation_unit.h"
+#include "gvn.h"
+#include "induction_var_analysis.h"
+#include "inliner.h"
+#include "instruction_simplifier.h"
+#include "intrinsics.h"
+#include "licm.h"
+#include "load_store_analysis.h"
+#include "load_store_elimination.h"
+#include "loop_optimization.h"
+#include "scheduler.h"
+#include "select_generator.h"
+#include "sharpening.h"
+#include "side_effects_analysis.h"
+
+// Decide between default or alternative pass name.
+
namespace art {
+
+const char* OptimizationPassName(OptimizationPass pass) {
+ switch (pass) {
+ case OptimizationPass::kSideEffectsAnalysis:
+ return SideEffectsAnalysis::kSideEffectsAnalysisPassName;
+ case OptimizationPass::kInductionVarAnalysis:
+ return HInductionVarAnalysis::kInductionPassName;
+ case OptimizationPass::kLoadStoreAnalysis:
+ return LoadStoreAnalysis::kLoadStoreAnalysisPassName;
+ case OptimizationPass::kGlobalValueNumbering:
+ return GVNOptimization::kGlobalValueNumberingPassName;
+ case OptimizationPass::kInvariantCodeMotion:
+ return LICM::kLoopInvariantCodeMotionPassName;
+ case OptimizationPass::kLoopOptimization:
+ return HLoopOptimization::kLoopOptimizationPassName;
+ case OptimizationPass::kBoundsCheckElimination:
+ return BoundsCheckElimination::kBoundsCheckEliminationPassName;
+ case OptimizationPass::kLoadStoreElimination:
+ return LoadStoreElimination::kLoadStoreEliminationPassName;
+ case OptimizationPass::kConstantFolding:
+ return HConstantFolding::kConstantFoldingPassName;
+ case OptimizationPass::kDeadCodeElimination:
+ return HDeadCodeElimination::kDeadCodeEliminationPassName;
+ case OptimizationPass::kInliner:
+ return HInliner::kInlinerPassName;
+ case OptimizationPass::kSharpening:
+ return HSharpening::kSharpeningPassName;
+ case OptimizationPass::kSelectGenerator:
+ return HSelectGenerator::kSelectGeneratorPassName;
+ case OptimizationPass::kInstructionSimplifier:
+ return InstructionSimplifier::kInstructionSimplifierPassName;
+ case OptimizationPass::kIntrinsicsRecognizer:
+ return IntrinsicsRecognizer::kIntrinsicsRecognizerPassName;
+ case OptimizationPass::kCHAGuardOptimization:
+ return CHAGuardOptimization::kCHAGuardOptimizationPassName;
+ case OptimizationPass::kCodeSinking:
+ return CodeSinking::kCodeSinkingPassName;
+ case OptimizationPass::kConstructorFenceRedundancyElimination:
+ return ConstructorFenceRedundancyElimination::kCFREPassName;
+ case OptimizationPass::kScheduling:
+ return HInstructionScheduling::kInstructionSchedulingPassName;
+#ifdef ART_ENABLE_CODEGEN_arm
+ case OptimizationPass::kInstructionSimplifierArm:
+ return arm::InstructionSimplifierArm::kInstructionSimplifierArmPassName;
+#endif
+#ifdef ART_ENABLE_CODEGEN_arm64
+ case OptimizationPass::kInstructionSimplifierArm64:
+ return arm64::InstructionSimplifierArm64::kInstructionSimplifierArm64PassName;
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips
+ case OptimizationPass::kPcRelativeFixupsMips:
+ return mips::PcRelativeFixups::kPcRelativeFixupsMipsPassName;
+ case OptimizationPass::kInstructionSimplifierMips:
+ return mips::InstructionSimplifierMips::kInstructionSimplifierMipsPassName;
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86
+ case OptimizationPass::kPcRelativeFixupsX86:
+ return x86::PcRelativeFixups::kPcRelativeFixupsX86PassName;
+#endif
+#if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
+ case OptimizationPass::kX86MemoryOperandGeneration:
+ return x86::X86MemoryOperandGeneration::kX86MemoryOperandGenerationPassName;
+#endif
+ }
+}
+
+#define X(x) if (name == OptimizationPassName((x))) return (x)
+
+OptimizationPass OptimizationPassByName(const std::string& name) {
+ X(OptimizationPass::kBoundsCheckElimination);
+ X(OptimizationPass::kCHAGuardOptimization);
+ X(OptimizationPass::kCodeSinking);
+ X(OptimizationPass::kConstantFolding);
+ X(OptimizationPass::kConstructorFenceRedundancyElimination);
+ X(OptimizationPass::kDeadCodeElimination);
+ X(OptimizationPass::kGlobalValueNumbering);
+ X(OptimizationPass::kInductionVarAnalysis);
+ X(OptimizationPass::kInliner);
+ X(OptimizationPass::kInstructionSimplifier);
+ X(OptimizationPass::kIntrinsicsRecognizer);
+ X(OptimizationPass::kInvariantCodeMotion);
+ X(OptimizationPass::kLoadStoreAnalysis);
+ X(OptimizationPass::kLoadStoreElimination);
+ X(OptimizationPass::kLoopOptimization);
+ X(OptimizationPass::kScheduling);
+ X(OptimizationPass::kSelectGenerator);
+ X(OptimizationPass::kSharpening);
+ X(OptimizationPass::kSideEffectsAnalysis);
+#ifdef ART_ENABLE_CODEGEN_arm
+ X(OptimizationPass::kInstructionSimplifierArm);
+#endif
+#ifdef ART_ENABLE_CODEGEN_arm64
+ X(OptimizationPass::kInstructionSimplifierArm64);
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips
+ X(OptimizationPass::kPcRelativeFixupsMips);
+ X(OptimizationPass::kInstructionSimplifierMips);
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86
+ X(OptimizationPass::kPcRelativeFixupsX86);
+ X(OptimizationPass::kX86MemoryOperandGeneration);
+#endif
+ LOG(FATAL) << "Cannot find optimization " << name;
+ UNREACHABLE();
+}
+
+#undef X
+
+ArenaVector<HOptimization*> ConstructOptimizations(
+ const OptimizationDef definitions[],
+ size_t length,
+ ArenaAllocator* allocator,
+ HGraph* graph,
+ OptimizingCompilerStats* stats,
+ CodeGenerator* codegen,
+ CompilerDriver* driver,
+ const DexCompilationUnit& dex_compilation_unit,
+ VariableSizedHandleScope* handles) {
+ ArenaVector<HOptimization*> optimizations(allocator->Adapter());
+
+ // Some optimizations require SideEffectsAnalysis or HInductionVarAnalysis
+ // instances. This method uses the nearest instance preceeding it in the pass
+ // name list or fails fatally if no such analysis can be found.
+ SideEffectsAnalysis* most_recent_side_effects = nullptr;
+ HInductionVarAnalysis* most_recent_induction = nullptr;
+ LoadStoreAnalysis* most_recent_lsa = nullptr;
+
+ // Loop over the requested optimizations.
+ for (size_t i = 0; i < length; i++) {
+ OptimizationPass pass = definitions[i].first;
+ const char* alt_name = definitions[i].second;
+ const char* name = alt_name != nullptr
+ ? alt_name
+ : OptimizationPassName(pass);
+ HOptimization* opt = nullptr;
+
+ switch (pass) {
+ //
+ // Analysis passes (kept in most recent for subsequent passes).
+ //
+ case OptimizationPass::kSideEffectsAnalysis:
+ opt = most_recent_side_effects = new (allocator) SideEffectsAnalysis(graph, name);
+ break;
+ case OptimizationPass::kInductionVarAnalysis:
+ opt = most_recent_induction = new (allocator) HInductionVarAnalysis(graph, name);
+ break;
+ case OptimizationPass::kLoadStoreAnalysis:
+ opt = most_recent_lsa = new (allocator) LoadStoreAnalysis(graph, name);
+ break;
+ //
+ // Passes that need prior analysis.
+ //
+ case OptimizationPass::kGlobalValueNumbering:
+ CHECK(most_recent_side_effects != nullptr);
+ opt = new (allocator) GVNOptimization(graph, *most_recent_side_effects, name);
+ break;
+ case OptimizationPass::kInvariantCodeMotion:
+ CHECK(most_recent_side_effects != nullptr);
+ opt = new (allocator) LICM(graph, *most_recent_side_effects, stats, name);
+ break;
+ case OptimizationPass::kLoopOptimization:
+ CHECK(most_recent_induction != nullptr);
+ opt = new (allocator) HLoopOptimization(graph, driver, most_recent_induction, stats, name);
+ break;
+ case OptimizationPass::kBoundsCheckElimination:
+ CHECK(most_recent_side_effects != nullptr && most_recent_induction != nullptr);
+ opt = new (allocator) BoundsCheckElimination(
+ graph, *most_recent_side_effects, most_recent_induction, name);
+ break;
+ case OptimizationPass::kLoadStoreElimination:
+ CHECK(most_recent_side_effects != nullptr && most_recent_induction != nullptr);
+ opt = new (allocator) LoadStoreElimination(
+ graph, *most_recent_side_effects, *most_recent_lsa, stats, name);
+ break;
+ //
+ // Regular passes.
+ //
+ case OptimizationPass::kConstantFolding:
+ opt = new (allocator) HConstantFolding(graph, name);
+ break;
+ case OptimizationPass::kDeadCodeElimination:
+ opt = new (allocator) HDeadCodeElimination(graph, stats, name);
+ break;
+ case OptimizationPass::kInliner: {
+ size_t number_of_dex_registers = dex_compilation_unit.GetCodeItem()->registers_size_;
+ opt = new (allocator) HInliner(graph, // outer_graph
+ graph, // outermost_graph
+ codegen,
+ dex_compilation_unit, // outer_compilation_unit
+ dex_compilation_unit, // outermost_compilation_unit
+ driver,
+ handles,
+ stats,
+ number_of_dex_registers,
+ /* total_number_of_instructions */ 0,
+ /* parent */ nullptr,
+ /* depth */ 0,
+ name);
+ break;
+ }
+ case OptimizationPass::kSharpening:
+ opt = new (allocator) HSharpening(
+ graph, codegen, dex_compilation_unit, driver, handles, name);
+ break;
+ case OptimizationPass::kSelectGenerator:
+ opt = new (allocator) HSelectGenerator(graph, handles, stats, name);
+ break;
+ case OptimizationPass::kInstructionSimplifier:
+ opt = new (allocator) InstructionSimplifier(graph, codegen, driver, stats, name);
+ break;
+ case OptimizationPass::kIntrinsicsRecognizer:
+ opt = new (allocator) IntrinsicsRecognizer(graph, stats, name);
+ break;
+ case OptimizationPass::kCHAGuardOptimization:
+ opt = new (allocator) CHAGuardOptimization(graph, name);
+ break;
+ case OptimizationPass::kCodeSinking:
+ opt = new (allocator) CodeSinking(graph, stats, name);
+ break;
+ case OptimizationPass::kConstructorFenceRedundancyElimination:
+ opt = new (allocator) ConstructorFenceRedundancyElimination(graph, stats, name);
+ break;
+ case OptimizationPass::kScheduling:
+ opt = new (allocator) HInstructionScheduling(
+ graph, driver->GetInstructionSet(), codegen, name);
+ break;
+ //
+ // Arch-specific passes.
+ //
+#ifdef ART_ENABLE_CODEGEN_arm
+ case OptimizationPass::kInstructionSimplifierArm:
+ DCHECK(alt_name == nullptr) << "arch-specific pass does not support alternative name";
+ opt = new (allocator) arm::InstructionSimplifierArm(graph, stats);
+ break;
+#endif
+#ifdef ART_ENABLE_CODEGEN_arm64
+ case OptimizationPass::kInstructionSimplifierArm64:
+ DCHECK(alt_name == nullptr) << "arch-specific pass does not support alternative name";
+ opt = new (allocator) arm64::InstructionSimplifierArm64(graph, stats);
+ break;
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips
+ case OptimizationPass::kPcRelativeFixupsMips:
+ DCHECK(alt_name == nullptr) << "arch-specific pass does not support alternative name";
+ opt = new (allocator) mips::PcRelativeFixups(graph, codegen, stats);
+ break;
+ case OptimizationPass::kInstructionSimplifierMips:
+ DCHECK(alt_name == nullptr) << "arch-specific pass does not support alternative name";
+ opt = new (allocator) mips::InstructionSimplifierMips(graph, codegen, stats);
+ break;
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86
+ case OptimizationPass::kPcRelativeFixupsX86:
+ DCHECK(alt_name == nullptr) << "arch-specific pass does not support alternative name";
+ opt = new (allocator) x86::PcRelativeFixups(graph, codegen, stats);
+ break;
+ case OptimizationPass::kX86MemoryOperandGeneration:
+ DCHECK(alt_name == nullptr) << "arch-specific pass does not support alternative name";
+ opt = new (allocator) x86::X86MemoryOperandGeneration(graph, codegen, stats);
+ break;
+#endif
+ } // switch
+
+ // Add each next optimization to result vector.
+ CHECK(opt != nullptr);
+ DCHECK_STREQ(name, opt->GetPassName()); // sanity
+ optimizations.push_back(opt);
+ }
+
+ return optimizations;
+}
+
} // namespace art
diff --git a/compiler/optimizing/optimization.h b/compiler/optimizing/optimization.h
index ce41a2e512..c170f155fa 100644
--- a/compiler/optimizing/optimization.h
+++ b/compiler/optimizing/optimization.h
@@ -23,6 +23,10 @@
namespace art {
+class CodeGenerator;
+class CompilerDriver;
+class DexCompilationUnit;
+
/**
* Abstraction to implement an optimization pass.
*/
@@ -58,6 +62,81 @@ class HOptimization : public ArenaObject<kArenaAllocOptimization> {
DISALLOW_COPY_AND_ASSIGN(HOptimization);
};
+// Optimization passes that can be constructed by the helper method below. An enum
+// field is preferred over a string lookup at places where performance matters.
+// TODO: generate this table and lookup methods below automatically?
+enum class OptimizationPass {
+ kBoundsCheckElimination,
+ kCHAGuardOptimization,
+ kCodeSinking,
+ kConstantFolding,
+ kConstructorFenceRedundancyElimination,
+ kDeadCodeElimination,
+ kGlobalValueNumbering,
+ kInductionVarAnalysis,
+ kInliner,
+ kInstructionSimplifier,
+ kIntrinsicsRecognizer,
+ kInvariantCodeMotion,
+ kLoadStoreAnalysis,
+ kLoadStoreElimination,
+ kLoopOptimization,
+ kScheduling,
+ kSelectGenerator,
+ kSharpening,
+ kSideEffectsAnalysis,
+#ifdef ART_ENABLE_CODEGEN_arm
+ kInstructionSimplifierArm,
+#endif
+#ifdef ART_ENABLE_CODEGEN_arm64
+ kInstructionSimplifierArm64,
+#endif
+#ifdef ART_ENABLE_CODEGEN_mips
+ kPcRelativeFixupsMips,
+ kInstructionSimplifierMips,
+#endif
+#ifdef ART_ENABLE_CODEGEN_x86
+ kPcRelativeFixupsX86,
+#endif
+#if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
+ kX86MemoryOperandGeneration,
+#endif
+};
+
+// Lookup name of optimization pass.
+const char* OptimizationPassName(OptimizationPass pass);
+
+// Lookup optimization pass by name.
+OptimizationPass OptimizationPassByName(const std::string& name);
+
+// Optimization definition consisting of an optimization pass
+// and an optional alternative name (nullptr denotes default).
+typedef std::pair<OptimizationPass, const char*> OptimizationDef;
+
+// Helper method for optimization definition array entries.
+inline OptimizationDef OptDef(OptimizationPass pass, const char* name = nullptr) {
+ return std::make_pair(pass, name);
+}
+
+// Helper method to construct series of optimization passes.
+// The array should consist of the requested optimizations
+// and optional alternative names for repeated passes.
+// Example:
+// { OptPass(kConstantFolding),
+// OptPass(Inliner),
+// OptPass(kConstantFolding, "constant_folding$after_inlining")
+// }
+ArenaVector<HOptimization*> ConstructOptimizations(
+ const OptimizationDef definitions[],
+ size_t length,
+ ArenaAllocator* allocator,
+ HGraph* graph,
+ OptimizingCompilerStats* stats,
+ CodeGenerator* codegen,
+ CompilerDriver* driver,
+ const DexCompilationUnit& dex_compilation_unit,
+ VariableSizedHandleScope* handles);
+
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_OPTIMIZATION_H_
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 4974ed0ec5..73c72fc57a 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -22,23 +22,6 @@
#include <stdint.h>
-#ifdef ART_ENABLE_CODEGEN_arm64
-#include "instruction_simplifier_arm64.h"
-#endif
-
-#ifdef ART_ENABLE_CODEGEN_mips
-#include "instruction_simplifier_mips.h"
-#include "pc_relative_fixups_mips.h"
-#endif
-
-#ifdef ART_ENABLE_CODEGEN_x86
-#include "pc_relative_fixups_x86.h"
-#endif
-
-#if defined(ART_ENABLE_CODEGEN_x86) || defined(ART_ENABLE_CODEGEN_x86_64)
-#include "x86_memory_gen.h"
-#endif
-
#include "art_method-inl.h"
#include "base/arena_allocator.h"
#include "base/arena_containers.h"
@@ -47,16 +30,10 @@
#include "base/mutex.h"
#include "base/scoped_arena_allocator.h"
#include "base/timing_logger.h"
-#include "bounds_check_elimination.h"
#include "builder.h"
-#include "cha_guard_optimization.h"
#include "code_generator.h"
-#include "code_sinking.h"
#include "compiled_method.h"
#include "compiler.h"
-#include "constant_folding.h"
-#include "constructor_fence_redundancy_elimination.h"
-#include "dead_code_elimination.h"
#include "debug/elf_debug_writer.h"
#include "debug/method_debug_info.h"
#include "dex/verification_results.h"
@@ -67,31 +44,19 @@
#include "driver/dex_compilation_unit.h"
#include "graph_checker.h"
#include "graph_visualizer.h"
-#include "gvn.h"
-#include "induction_var_analysis.h"
#include "inliner.h"
-#include "instruction_simplifier.h"
-#include "instruction_simplifier_arm.h"
-#include "intrinsics.h"
#include "jit/debugger_interface.h"
#include "jit/jit.h"
#include "jit/jit_code_cache.h"
#include "jit/jit_logger.h"
#include "jni/quick/jni_compiler.h"
-#include "licm.h"
#include "linker/linker_patch.h"
-#include "load_store_analysis.h"
-#include "load_store_elimination.h"
-#include "loop_optimization.h"
#include "nodes.h"
#include "oat_quick_method_header.h"
#include "prepare_for_register_allocation.h"
#include "reference_type_propagation.h"
#include "register_allocator_linear_scan.h"
-#include "scheduler.h"
#include "select_generator.h"
-#include "sharpening.h"
-#include "side_effects_analysis.h"
#include "ssa_builder.h"
#include "ssa_liveness_analysis.h"
#include "ssa_phi_elimination.h"
@@ -147,7 +112,7 @@ class PassObserver : public ValueObject {
Mutex& dump_mutex)
: graph_(graph),
cached_method_name_(),
- timing_logger_enabled_(compiler_driver->GetDumpPasses()),
+ timing_logger_enabled_(compiler_driver->GetCompilerOptions().GetDumpTimings()),
timing_logger_(timing_logger_enabled_ ? GetMethodName() : "", true, true),
disasm_info_(graph->GetAllocator()),
visualizer_oss_(),
@@ -335,21 +300,52 @@ class OptimizingCompiler FINAL : public Compiler {
private:
void RunOptimizations(HGraph* graph,
CodeGenerator* codegen,
- CompilerDriver* driver,
const DexCompilationUnit& dex_compilation_unit,
PassObserver* pass_observer,
- VariableSizedHandleScope* handles) const;
+ VariableSizedHandleScope* handles,
+ const OptimizationDef definitions[],
+ size_t length) const {
+ // Convert definitions to optimization passes.
+ ArenaVector<HOptimization*> optimizations = ConstructOptimizations(
+ definitions,
+ length,
+ graph->GetAllocator(),
+ graph,
+ compilation_stats_.get(),
+ codegen,
+ GetCompilerDriver(),
+ dex_compilation_unit,
+ handles);
+ DCHECK_EQ(length, optimizations.size());
+ // Run the optimization passes one by one.
+ for (size_t i = 0; i < length; ++i) {
+ PassScope scope(optimizations[i]->GetPassName(), pass_observer);
+ optimizations[i]->Run();
+ }
+ }
- void RunOptimizations(HOptimization* optimizations[],
- size_t length,
- PassObserver* pass_observer) const;
+ template <size_t length> void RunOptimizations(
+ HGraph* graph,
+ CodeGenerator* codegen,
+ const DexCompilationUnit& dex_compilation_unit,
+ PassObserver* pass_observer,
+ VariableSizedHandleScope* handles,
+ const OptimizationDef (&definitions)[length]) const {
+ RunOptimizations(
+ graph, codegen, dex_compilation_unit, pass_observer, handles, definitions, length);
+ }
+
+ void RunOptimizations(HGraph* graph,
+ CodeGenerator* codegen,
+ const DexCompilationUnit& dex_compilation_unit,
+ PassObserver* pass_observer,
+ VariableSizedHandleScope* handles) const;
private:
// Create a 'CompiledMethod' for an optimized graph.
CompiledMethod* Emit(ArenaAllocator* allocator,
CodeVectorAllocator* code_allocator,
CodeGenerator* codegen,
- CompilerDriver* driver,
const DexFile::CodeItem* item) const;
// Try compiling a method and return the code generator used for
@@ -376,15 +372,15 @@ class OptimizingCompiler FINAL : public Compiler {
void MaybeRunInliner(HGraph* graph,
CodeGenerator* codegen,
- CompilerDriver* driver,
const DexCompilationUnit& dex_compilation_unit,
PassObserver* pass_observer,
VariableSizedHandleScope* handles) const;
- void RunArchOptimizations(InstructionSet instruction_set,
- HGraph* graph,
+ void RunArchOptimizations(HGraph* graph,
CodeGenerator* codegen,
- PassObserver* pass_observer) const;
+ const DexCompilationUnit& dex_compilation_unit,
+ PassObserver* pass_observer,
+ VariableSizedHandleScope* handles) const;
std::unique_ptr<OptimizingCompilerStats> compilation_stats_;
@@ -411,7 +407,7 @@ void OptimizingCompiler::Init() {
driver->GetCompilerOptions().GetDumpCfgAppend() ? std::ofstream::app : std::ofstream::out;
visualizer_output_.reset(new std::ofstream(cfg_file_name, cfg_file_mode));
}
- if (driver->GetDumpStats()) {
+ if (driver->GetCompilerOptions().GetDumpStats()) {
compilation_stats_.reset(new OptimizingCompilerStats());
}
}
@@ -440,299 +436,130 @@ static bool IsInstructionSetSupported(InstructionSet instruction_set) {
|| instruction_set == InstructionSet::kX86_64;
}
-// Strip pass name suffix to get optimization name.
-static std::string ConvertPassNameToOptimizationName(const std::string& pass_name) {
- size_t pos = pass_name.find(kPassNameSeparator);
- return pos == std::string::npos ? pass_name : pass_name.substr(0, pos);
-}
-
-static HOptimization* BuildOptimization(
- const std::string& pass_name,
- ArenaAllocator* allocator,
- HGraph* graph,
- OptimizingCompilerStats* stats,
- CodeGenerator* codegen,
- CompilerDriver* driver,
- const DexCompilationUnit& dex_compilation_unit,
- VariableSizedHandleScope* handles,
- SideEffectsAnalysis* most_recent_side_effects,
- HInductionVarAnalysis* most_recent_induction,
- LoadStoreAnalysis* most_recent_lsa) {
- std::string opt_name = ConvertPassNameToOptimizationName(pass_name);
- if (opt_name == BoundsCheckElimination::kBoundsCheckEliminationPassName) {
- CHECK(most_recent_side_effects != nullptr && most_recent_induction != nullptr);
- return new (allocator) BoundsCheckElimination(graph,
- *most_recent_side_effects,
- most_recent_induction);
- } else if (opt_name == GVNOptimization::kGlobalValueNumberingPassName) {
- CHECK(most_recent_side_effects != nullptr);
- return new (allocator) GVNOptimization(graph, *most_recent_side_effects, pass_name.c_str());
- } else if (opt_name == HConstantFolding::kConstantFoldingPassName) {
- return new (allocator) HConstantFolding(graph, pass_name.c_str());
- } else if (opt_name == HDeadCodeElimination::kDeadCodeEliminationPassName) {
- return new (allocator) HDeadCodeElimination(graph, stats, pass_name.c_str());
- } else if (opt_name == HInliner::kInlinerPassName) {
- size_t number_of_dex_registers = dex_compilation_unit.GetCodeItem()->registers_size_;
- return new (allocator) HInliner(graph, // outer_graph
- graph, // outermost_graph
- codegen,
- dex_compilation_unit, // outer_compilation_unit
- dex_compilation_unit, // outermost_compilation_unit
- driver,
- handles,
- stats,
- number_of_dex_registers,
- /* total_number_of_instructions */ 0,
- /* parent */ nullptr);
- } else if (opt_name == HSharpening::kSharpeningPassName) {
- return new (allocator) HSharpening(graph, codegen, dex_compilation_unit, driver, handles);
- } else if (opt_name == HSelectGenerator::kSelectGeneratorPassName) {
- return new (allocator) HSelectGenerator(graph, handles, stats);
- } else if (opt_name == HInductionVarAnalysis::kInductionPassName) {
- return new (allocator) HInductionVarAnalysis(graph);
- } else if (opt_name == InstructionSimplifier::kInstructionSimplifierPassName) {
- return new (allocator) InstructionSimplifier(graph, codegen, driver, stats, pass_name.c_str());
- } else if (opt_name == IntrinsicsRecognizer::kIntrinsicsRecognizerPassName) {
- return new (allocator) IntrinsicsRecognizer(graph, stats);
- } else if (opt_name == LICM::kLoopInvariantCodeMotionPassName) {
- CHECK(most_recent_side_effects != nullptr);
- return new (allocator) LICM(graph, *most_recent_side_effects, stats);
- } else if (opt_name == LoadStoreAnalysis::kLoadStoreAnalysisPassName) {
- return new (allocator) LoadStoreAnalysis(graph);
- } else if (opt_name == LoadStoreElimination::kLoadStoreEliminationPassName) {
- CHECK(most_recent_side_effects != nullptr);
- CHECK(most_recent_lsa != nullptr);
- return new (allocator) LoadStoreElimination(graph,
- *most_recent_side_effects,
- *most_recent_lsa, stats);
- } else if (opt_name == SideEffectsAnalysis::kSideEffectsAnalysisPassName) {
- return new (allocator) SideEffectsAnalysis(graph);
- } else if (opt_name == HLoopOptimization::kLoopOptimizationPassName) {
- return new (allocator) HLoopOptimization(graph, driver, most_recent_induction, stats);
- } else if (opt_name == CHAGuardOptimization::kCHAGuardOptimizationPassName) {
- return new (allocator) CHAGuardOptimization(graph);
- } else if (opt_name == CodeSinking::kCodeSinkingPassName) {
- return new (allocator) CodeSinking(graph, stats);
- } else if (opt_name == ConstructorFenceRedundancyElimination::kPassName) {
- return new (allocator) ConstructorFenceRedundancyElimination(graph, stats);
-#ifdef ART_ENABLE_CODEGEN_arm
- } else if (opt_name == arm::InstructionSimplifierArm::kInstructionSimplifierArmPassName) {
- return new (allocator) arm::InstructionSimplifierArm(graph, stats);
-#endif
-#ifdef ART_ENABLE_CODEGEN_arm64
- } else if (opt_name == arm64::InstructionSimplifierArm64::kInstructionSimplifierArm64PassName) {
- return new (allocator) arm64::InstructionSimplifierArm64(graph, stats);
-#endif
-#ifdef ART_ENABLE_CODEGEN_mips
- } else if (opt_name == mips::PcRelativeFixups::kPcRelativeFixupsMipsPassName) {
- return new (allocator) mips::PcRelativeFixups(graph, codegen, stats);
- } else if (opt_name == mips::InstructionSimplifierMips::kInstructionSimplifierMipsPassName) {
- return new (allocator) mips::InstructionSimplifierMips(graph, codegen, stats);
-#endif
-#ifdef ART_ENABLE_CODEGEN_x86
- } else if (opt_name == x86::PcRelativeFixups::kPcRelativeFixupsX86PassName) {
- return new (allocator) x86::PcRelativeFixups(graph, codegen, stats);
- } else if (opt_name == x86::X86MemoryOperandGeneration::kX86MemoryOperandGenerationPassName) {
- return new (allocator) x86::X86MemoryOperandGeneration(graph, codegen, stats);
-#endif
- }
- return nullptr;
-}
-
-static ArenaVector<HOptimization*> BuildOptimizations(
- const std::vector<std::string>& pass_names,
- ArenaAllocator* allocator,
- HGraph* graph,
- OptimizingCompilerStats* stats,
- CodeGenerator* codegen,
- CompilerDriver* driver,
- const DexCompilationUnit& dex_compilation_unit,
- VariableSizedHandleScope* handles) {
- // Few HOptimizations constructors require SideEffectsAnalysis or HInductionVarAnalysis
- // instances. This method assumes that each of them expects the nearest instance preceeding it
- // in the pass name list.
- SideEffectsAnalysis* most_recent_side_effects = nullptr;
- HInductionVarAnalysis* most_recent_induction = nullptr;
- LoadStoreAnalysis* most_recent_lsa = nullptr;
- ArenaVector<HOptimization*> ret(allocator->Adapter());
- for (const std::string& pass_name : pass_names) {
- HOptimization* opt = BuildOptimization(
- pass_name,
- allocator,
- graph,
- stats,
- codegen,
- driver,
- dex_compilation_unit,
- handles,
- most_recent_side_effects,
- most_recent_induction,
- most_recent_lsa);
- CHECK(opt != nullptr) << "Couldn't build optimization: \"" << pass_name << "\"";
- ret.push_back(opt);
-
- std::string opt_name = ConvertPassNameToOptimizationName(pass_name);
- if (opt_name == SideEffectsAnalysis::kSideEffectsAnalysisPassName) {
- most_recent_side_effects = down_cast<SideEffectsAnalysis*>(opt);
- } else if (opt_name == HInductionVarAnalysis::kInductionPassName) {
- most_recent_induction = down_cast<HInductionVarAnalysis*>(opt);
- } else if (opt_name == LoadStoreAnalysis::kLoadStoreAnalysisPassName) {
- most_recent_lsa = down_cast<LoadStoreAnalysis*>(opt);
- }
- }
- return ret;
-}
-
-void OptimizingCompiler::RunOptimizations(HOptimization* optimizations[],
- size_t length,
- PassObserver* pass_observer) const {
- for (size_t i = 0; i < length; ++i) {
- PassScope scope(optimizations[i]->GetPassName(), pass_observer);
- optimizations[i]->Run();
- }
-}
-
void OptimizingCompiler::MaybeRunInliner(HGraph* graph,
CodeGenerator* codegen,
- CompilerDriver* driver,
const DexCompilationUnit& dex_compilation_unit,
PassObserver* pass_observer,
VariableSizedHandleScope* handles) const {
- OptimizingCompilerStats* stats = compilation_stats_.get();
- const CompilerOptions& compiler_options = driver->GetCompilerOptions();
+ const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions();
bool should_inline = (compiler_options.GetInlineMaxCodeUnits() > 0);
if (!should_inline) {
return;
}
- size_t number_of_dex_registers = dex_compilation_unit.GetCodeItem()->registers_size_;
- HInliner* inliner = new (graph->GetAllocator()) HInliner(
- graph, // outer_graph
- graph, // outermost_graph
- codegen,
- dex_compilation_unit, // outer_compilation_unit
- dex_compilation_unit, // outermost_compilation_unit
- driver,
- handles,
- stats,
- number_of_dex_registers,
- /* total_number_of_instructions */ 0,
- /* parent */ nullptr);
- HOptimization* optimizations[] = { inliner };
-
- RunOptimizations(optimizations, arraysize(optimizations), pass_observer);
+ OptimizationDef optimizations[] = {
+ OptDef(OptimizationPass::kInliner)
+ };
+ RunOptimizations(graph,
+ codegen,
+ dex_compilation_unit,
+ pass_observer,
+ handles,
+ optimizations);
}
-void OptimizingCompiler::RunArchOptimizations(InstructionSet instruction_set,
- HGraph* graph,
+void OptimizingCompiler::RunArchOptimizations(HGraph* graph,
CodeGenerator* codegen,
- PassObserver* pass_observer) const {
- UNUSED(codegen); // To avoid compilation error when compiling for svelte
- OptimizingCompilerStats* stats = compilation_stats_.get();
- ArenaAllocator* allocator = graph->GetAllocator();
- switch (instruction_set) {
+ const DexCompilationUnit& dex_compilation_unit,
+ PassObserver* pass_observer,
+ VariableSizedHandleScope* handles) const {
+ switch (GetCompilerDriver()->GetInstructionSet()) {
#if defined(ART_ENABLE_CODEGEN_arm)
case InstructionSet::kThumb2:
case InstructionSet::kArm: {
- arm::InstructionSimplifierArm* simplifier =
- new (allocator) arm::InstructionSimplifierArm(graph, stats);
- SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
- GVNOptimization* gvn =
- new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
- HInstructionScheduling* scheduling =
- new (allocator) HInstructionScheduling(graph, instruction_set, codegen);
- HOptimization* arm_optimizations[] = {
- simplifier,
- side_effects,
- gvn,
- scheduling,
+ OptimizationDef arm_optimizations[] = {
+ OptDef(OptimizationPass::kInstructionSimplifierArm),
+ OptDef(OptimizationPass::kSideEffectsAnalysis),
+ OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
+ OptDef(OptimizationPass::kScheduling)
};
- RunOptimizations(arm_optimizations, arraysize(arm_optimizations), pass_observer);
+ RunOptimizations(graph,
+ codegen,
+ dex_compilation_unit,
+ pass_observer,
+ handles,
+ arm_optimizations);
break;
}
#endif
#ifdef ART_ENABLE_CODEGEN_arm64
case InstructionSet::kArm64: {
- arm64::InstructionSimplifierArm64* simplifier =
- new (allocator) arm64::InstructionSimplifierArm64(graph, stats);
- SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
- GVNOptimization* gvn =
- new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
- HInstructionScheduling* scheduling =
- new (allocator) HInstructionScheduling(graph, instruction_set);
- HOptimization* arm64_optimizations[] = {
- simplifier,
- side_effects,
- gvn,
- scheduling,
+ OptimizationDef arm64_optimizations[] = {
+ OptDef(OptimizationPass::kInstructionSimplifierArm64),
+ OptDef(OptimizationPass::kSideEffectsAnalysis),
+ OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
+ OptDef(OptimizationPass::kScheduling)
};
- RunOptimizations(arm64_optimizations, arraysize(arm64_optimizations), pass_observer);
+ RunOptimizations(graph,
+ codegen,
+ dex_compilation_unit,
+ pass_observer,
+ handles,
+ arm64_optimizations);
break;
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips
case InstructionSet::kMips: {
- mips::InstructionSimplifierMips* simplifier =
- new (allocator) mips::InstructionSimplifierMips(graph, codegen, stats);
- SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
- GVNOptimization* gvn =
- new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
- mips::PcRelativeFixups* pc_relative_fixups =
- new (allocator) mips::PcRelativeFixups(graph, codegen, stats);
- HOptimization* mips_optimizations[] = {
- simplifier,
- side_effects,
- gvn,
- pc_relative_fixups,
+ OptimizationDef mips_optimizations[] = {
+ OptDef(OptimizationPass::kInstructionSimplifierMips),
+ OptDef(OptimizationPass::kSideEffectsAnalysis),
+ OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
+ OptDef(OptimizationPass::kPcRelativeFixupsMips)
};
- RunOptimizations(mips_optimizations, arraysize(mips_optimizations), pass_observer);
+ RunOptimizations(graph,
+ codegen,
+ dex_compilation_unit,
+ pass_observer,
+ handles,
+ mips_optimizations);
break;
}
#endif
#ifdef ART_ENABLE_CODEGEN_mips64
case InstructionSet::kMips64: {
- SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
- GVNOptimization* gvn =
- new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
- HOptimization* mips64_optimizations[] = {
- side_effects,
- gvn,
+ OptimizationDef mips64_optimizations[] = {
+ OptDef(OptimizationPass::kSideEffectsAnalysis),
+ OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch")
};
- RunOptimizations(mips64_optimizations, arraysize(mips64_optimizations), pass_observer);
+ RunOptimizations(graph,
+ codegen,
+ dex_compilation_unit,
+ pass_observer,
+ handles,
+ mips64_optimizations);
break;
}
#endif
#ifdef ART_ENABLE_CODEGEN_x86
case InstructionSet::kX86: {
- SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
- GVNOptimization* gvn =
- new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
- x86::PcRelativeFixups* pc_relative_fixups =
- new (allocator) x86::PcRelativeFixups(graph, codegen, stats);
- x86::X86MemoryOperandGeneration* memory_gen =
- new (allocator) x86::X86MemoryOperandGeneration(graph, codegen, stats);
- HOptimization* x86_optimizations[] = {
- side_effects,
- gvn,
- pc_relative_fixups,
- memory_gen
+ OptimizationDef x86_optimizations[] = {
+ OptDef(OptimizationPass::kSideEffectsAnalysis),
+ OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
+ OptDef(OptimizationPass::kPcRelativeFixupsX86),
+ OptDef(OptimizationPass::kX86MemoryOperandGeneration)
};
- RunOptimizations(x86_optimizations, arraysize(x86_optimizations), pass_observer);
+ RunOptimizations(graph,
+ codegen,
+ dex_compilation_unit,
+ pass_observer,
+ handles,
+ x86_optimizations);
break;
}
#endif
#ifdef ART_ENABLE_CODEGEN_x86_64
case InstructionSet::kX86_64: {
- SideEffectsAnalysis* side_effects = new (allocator) SideEffectsAnalysis(graph);
- GVNOptimization* gvn =
- new (allocator) GVNOptimization(graph, *side_effects, "GVN$after_arch");
- x86::X86MemoryOperandGeneration* memory_gen =
- new (allocator) x86::X86MemoryOperandGeneration(graph, codegen, stats);
- HOptimization* x86_64_optimizations[] = {
- side_effects,
- gvn,
- memory_gen
+ OptimizationDef x86_64_optimizations[] = {
+ OptDef(OptimizationPass::kSideEffectsAnalysis),
+ OptDef(OptimizationPass::kGlobalValueNumbering, "GVN$after_arch"),
+ OptDef(OptimizationPass::kX86MemoryOperandGeneration)
};
- RunOptimizations(x86_64_optimizations, arraysize(x86_64_optimizations), pass_observer);
+ RunOptimizations(graph,
+ codegen,
+ dex_compilation_unit,
+ pass_observer,
+ handles,
+ x86_64_optimizations);
break;
}
#endif
@@ -768,110 +595,93 @@ static void AllocateRegisters(HGraph* graph,
}
}
+// Strip pass name suffix to get optimization name.
+static std::string ConvertPassNameToOptimizationName(const std::string& pass_name) {
+ size_t pos = pass_name.find(kPassNameSeparator);
+ return pos == std::string::npos ? pass_name : pass_name.substr(0, pos);
+}
+
void OptimizingCompiler::RunOptimizations(HGraph* graph,
CodeGenerator* codegen,
- CompilerDriver* driver,
const DexCompilationUnit& dex_compilation_unit,
PassObserver* pass_observer,
VariableSizedHandleScope* handles) const {
- OptimizingCompilerStats* stats = compilation_stats_.get();
- ArenaAllocator* allocator = graph->GetAllocator();
- if (driver->GetCompilerOptions().GetPassesToRun() != nullptr) {
- ArenaVector<HOptimization*> optimizations = BuildOptimizations(
- *driver->GetCompilerOptions().GetPassesToRun(),
- allocator,
- graph,
- stats,
- codegen,
- driver,
- dex_compilation_unit,
- handles);
- RunOptimizations(optimizations.data(), optimizations.size(), pass_observer);
+ const std::vector<std::string>* pass_names =
+ GetCompilerDriver()->GetCompilerOptions().GetPassesToRun();
+ if (pass_names != nullptr) {
+ // If passes were defined on command-line, build the optimization
+ // passes and run these instead of the built-in optimizations.
+ const size_t length = pass_names->size();
+ std::vector<OptimizationDef> optimizations;
+ for (const std::string& pass_name : *pass_names) {
+ std::string opt_name = ConvertPassNameToOptimizationName(pass_name);
+ optimizations.push_back(OptDef(OptimizationPassByName(opt_name.c_str()), pass_name.c_str()));
+ }
+ RunOptimizations(graph,
+ codegen,
+ dex_compilation_unit,
+ pass_observer,
+ handles,
+ optimizations.data(),
+ length);
return;
}
- HDeadCodeElimination* dce1 = new (allocator) HDeadCodeElimination(
- graph, stats, "dead_code_elimination$initial");
- HDeadCodeElimination* dce2 = new (allocator) HDeadCodeElimination(
- graph, stats, "dead_code_elimination$after_inlining");
- HDeadCodeElimination* dce3 = new (allocator) HDeadCodeElimination(
- graph, stats, "dead_code_elimination$final");
- HConstantFolding* fold1 = new (allocator) HConstantFolding(graph, "constant_folding");
- InstructionSimplifier* simplify1 = new (allocator) InstructionSimplifier(
- graph, codegen, driver, stats);
- HSelectGenerator* select_generator = new (allocator) HSelectGenerator(graph, handles, stats);
- HConstantFolding* fold2 = new (allocator) HConstantFolding(
- graph, "constant_folding$after_inlining");
- HConstantFolding* fold3 = new (allocator) HConstantFolding(graph, "constant_folding$after_bce");
- SideEffectsAnalysis* side_effects1 = new (allocator) SideEffectsAnalysis(
- graph, "side_effects$before_gvn");
- SideEffectsAnalysis* side_effects2 = new (allocator) SideEffectsAnalysis(
- graph, "side_effects$before_lse");
- GVNOptimization* gvn = new (allocator) GVNOptimization(graph, *side_effects1);
- LICM* licm = new (allocator) LICM(graph, *side_effects1, stats);
- HInductionVarAnalysis* induction = new (allocator) HInductionVarAnalysis(graph);
- BoundsCheckElimination* bce =
- new (allocator) BoundsCheckElimination(graph, *side_effects1, induction);
- HLoopOptimization* loop = new (allocator) HLoopOptimization(graph, driver, induction, stats);
- LoadStoreAnalysis* lsa = new (allocator) LoadStoreAnalysis(graph);
- LoadStoreElimination* lse =
- new (allocator) LoadStoreElimination(graph, *side_effects2, *lsa, stats);
- HSharpening* sharpening = new (allocator) HSharpening(
- graph, codegen, dex_compilation_unit, driver, handles);
- InstructionSimplifier* simplify2 = new (allocator) InstructionSimplifier(
- graph, codegen, driver, stats, "instruction_simplifier$after_inlining");
- InstructionSimplifier* simplify3 = new (allocator) InstructionSimplifier(
- graph, codegen, driver, stats, "instruction_simplifier$after_bce");
- InstructionSimplifier* simplify4 = new (allocator) InstructionSimplifier(
- graph, codegen, driver, stats, "instruction_simplifier$before_codegen");
- IntrinsicsRecognizer* intrinsics = new (allocator) IntrinsicsRecognizer(graph, stats);
- CHAGuardOptimization* cha_guard = new (allocator) CHAGuardOptimization(graph);
- CodeSinking* code_sinking = new (allocator) CodeSinking(graph, stats);
- ConstructorFenceRedundancyElimination* cfre =
- new (allocator) ConstructorFenceRedundancyElimination(graph, stats);
-
- HOptimization* optimizations1[] = {
- intrinsics,
- sharpening,
- fold1,
- simplify1,
- dce1,
+ OptimizationDef optimizations1[] = {
+ OptDef(OptimizationPass::kIntrinsicsRecognizer),
+ OptDef(OptimizationPass::kSharpening),
+ OptDef(OptimizationPass::kConstantFolding),
+ OptDef(OptimizationPass::kInstructionSimplifier),
+ OptDef(OptimizationPass::kDeadCodeElimination, "dead_code_elimination$initial")
};
- RunOptimizations(optimizations1, arraysize(optimizations1), pass_observer);
+ RunOptimizations(graph,
+ codegen,
+ dex_compilation_unit,
+ pass_observer,
+ handles,
+ optimizations1);
- MaybeRunInliner(graph, codegen, driver, dex_compilation_unit, pass_observer, handles);
+ MaybeRunInliner(graph, codegen, dex_compilation_unit, pass_observer, handles);
- HOptimization* optimizations2[] = {
+ OptimizationDef optimizations2[] = {
// SelectGenerator depends on the InstructionSimplifier removing
// redundant suspend checks to recognize empty blocks.
- select_generator,
- fold2, // TODO: if we don't inline we can also skip fold2.
- simplify2,
- dce2,
- side_effects1,
- gvn,
- licm,
- induction,
- bce,
- loop,
- fold3, // evaluates code generated by dynamic bce
- simplify3,
- side_effects2,
- lsa,
- lse,
- cha_guard,
- dce3,
- code_sinking,
+ OptDef(OptimizationPass::kSelectGenerator),
+ // TODO: if we don't inline we can also skip fold2.
+ OptDef(OptimizationPass::kConstantFolding, "constant_folding$after_inlining"),
+ OptDef(OptimizationPass::kInstructionSimplifier, "instruction_simplifier$after_inlining"),
+ OptDef(OptimizationPass::kDeadCodeElimination, "dead_code_elimination$after_inlining"),
+ OptDef(OptimizationPass::kSideEffectsAnalysis, "side_effects$before_gvn"),
+ OptDef(OptimizationPass::kGlobalValueNumbering),
+ OptDef(OptimizationPass::kInvariantCodeMotion),
+ OptDef(OptimizationPass::kInductionVarAnalysis),
+ OptDef(OptimizationPass::kBoundsCheckElimination),
+ OptDef(OptimizationPass::kLoopOptimization),
+ // Evaluates code generated by dynamic bce.
+ OptDef(OptimizationPass::kConstantFolding, "constant_folding$after_bce"),
+ OptDef(OptimizationPass::kInstructionSimplifier, "instruction_simplifier$after_bce"),
+ OptDef(OptimizationPass::kSideEffectsAnalysis, "side_effects$before_lse"),
+ OptDef(OptimizationPass::kLoadStoreAnalysis),
+ OptDef(OptimizationPass::kLoadStoreElimination),
+ OptDef(OptimizationPass::kCHAGuardOptimization),
+ OptDef(OptimizationPass::kDeadCodeElimination, "dead_code_elimination$final"),
+ OptDef(OptimizationPass::kCodeSinking),
// The codegen has a few assumptions that only the instruction simplifier
// can satisfy. For example, the code generator does not expect to see a
// HTypeConversion from a type to the same type.
- simplify4,
- cfre, // Eliminate constructor fences after code sinking to avoid
- // complicated sinking logic to split a fence with many inputs.
+ OptDef(OptimizationPass::kInstructionSimplifier, "instruction_simplifier$before_codegen"),
+ // Eliminate constructor fences after code sinking to avoid
+ // complicated sinking logic to split a fence with many inputs.
+ OptDef(OptimizationPass::kConstructorFenceRedundancyElimination)
};
- RunOptimizations(optimizations2, arraysize(optimizations2), pass_observer);
+ RunOptimizations(graph,
+ codegen,
+ dex_compilation_unit,
+ pass_observer,
+ handles,
+ optimizations2);
- RunArchOptimizations(driver->GetInstructionSet(), graph, codegen, pass_observer);
+ RunArchOptimizations(graph, codegen, dex_compilation_unit, pass_observer, handles);
}
static ArenaVector<linker::LinkerPatch> EmitAndSortLinkerPatches(CodeGenerator* codegen) {
@@ -890,7 +700,6 @@ static ArenaVector<linker::LinkerPatch> EmitAndSortLinkerPatches(CodeGenerator*
CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* allocator,
CodeVectorAllocator* code_allocator,
CodeGenerator* codegen,
- CompilerDriver* compiler_driver,
const DexFile::CodeItem* code_item_for_osr_check) const {
ArenaVector<linker::LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen);
ArenaVector<uint8_t> stack_map(allocator->Adapter(kArenaAllocStackMaps));
@@ -905,7 +714,7 @@ CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* allocator,
code_item_for_osr_check);
CompiledMethod* compiled_method = CompiledMethod::SwapAllocCompiledMethod(
- compiler_driver,
+ GetCompilerDriver(),
codegen->GetInstructionSet(),
ArrayRef<const uint8_t>(code_allocator->GetMemory()),
// Follow Quick's behavior and set the frame size to zero if it is
@@ -929,7 +738,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
ArtMethod* method,
bool osr,
VariableSizedHandleScope* handles) const {
- MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptCompilation);
+ MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptBytecodeCompilation);
CompilerDriver* compiler_driver = GetCompilerDriver();
InstructionSet instruction_set = compiler_driver->GetInstructionSet();
const DexFile& dex_file = *dex_compilation_unit.GetDexFile();
@@ -948,8 +757,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
}
if (Compiler::IsPathologicalCase(*code_item, method_idx, dex_file)) {
- MaybeRecordStat(compilation_stats_.get(),
- MethodCompilationStat::kNotCompiledPathological);
+ MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kNotCompiledPathological);
return nullptr;
}
@@ -959,8 +767,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
const CompilerOptions& compiler_options = compiler_driver->GetCompilerOptions();
if ((compiler_options.GetCompilerFilter() == CompilerFilter::kSpace)
&& (code_item->insns_size_in_code_units_ > kSpaceFilterOptimizingThreshold)) {
- MaybeRecordStat(compilation_stats_.get(),
- MethodCompilationStat::kNotCompiledSpaceFilter);
+ MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kNotCompiledSpaceFilter);
return nullptr;
}
@@ -991,8 +798,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
compiler_driver->GetCompilerOptions(),
compilation_stats_.get()));
if (codegen.get() == nullptr) {
- MaybeRecordStat(compilation_stats_.get(),
- MethodCompilationStat::kNotCompiledNoCodegen);
+ MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kNotCompiledNoCodegen);
return nullptr;
}
codegen->GetAssembler()->cfi().SetEnabled(
@@ -1049,7 +855,6 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
RunOptimizations(graph,
codegen.get(),
- compiler_driver,
dex_compilation_unit,
&pass_observer,
handles);
@@ -1065,6 +870,7 @@ CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* allocator,
codegen->Compile(code_allocator);
pass_observer.DumpDisassembly();
+ MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledBytecode);
return codegen.release();
}
@@ -1075,6 +881,7 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
const DexCompilationUnit& dex_compilation_unit,
ArtMethod* method,
VariableSizedHandleScope* handles) const {
+ MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kAttemptIntrinsicCompilation);
CompilerDriver* compiler_driver = GetCompilerDriver();
InstructionSet instruction_set = compiler_driver->GetInstructionSet();
const DexFile& dex_file = *dex_compilation_unit.GetDexFile();
@@ -1086,8 +893,6 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
// Do not attempt to compile on architectures we do not support.
if (!IsInstructionSetSupported(instruction_set)) {
- MaybeRecordStat(compilation_stats_.get(),
- MethodCompilationStat::kNotCompiledUnsupportedIsa);
return nullptr;
}
@@ -1112,8 +917,6 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
compiler_driver->GetCompilerOptions(),
compilation_stats_.get()));
if (codegen.get() == nullptr) {
- MaybeRecordStat(compilation_stats_.get(),
- MethodCompilationStat::kNotCompiledNoCodegen);
return nullptr;
}
codegen->GetAssembler()->cfi().SetEnabled(
@@ -1140,20 +943,20 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
builder.BuildIntrinsicGraph(method);
}
- OptimizingCompilerStats* stats = compilation_stats_.get();
- InstructionSimplifier* simplify = new (allocator) InstructionSimplifier(
- graph, codegen.get(), compiler_driver, stats);
- IntrinsicsRecognizer* intrinsics = new (allocator) IntrinsicsRecognizer(graph, stats);
-
- HOptimization* optimizations[] = {
- intrinsics,
- // Some intrinsics are converted to HIR by the simplifier and the codegen also
- // has a few assumptions that only the instruction simplifier can satisfy.
- simplify,
+ OptimizationDef optimizations[] = {
+ OptDef(OptimizationPass::kIntrinsicsRecognizer),
+ // Some intrinsics are converted to HIR by the simplifier and the codegen also
+ // has a few assumptions that only the instruction simplifier can satisfy.
+ OptDef(OptimizationPass::kInstructionSimplifier),
};
- RunOptimizations(optimizations, arraysize(optimizations), &pass_observer);
+ RunOptimizations(graph,
+ codegen.get(),
+ dex_compilation_unit,
+ &pass_observer,
+ handles,
+ optimizations);
- RunArchOptimizations(compiler_driver->GetInstructionSet(), graph, codegen.get(), &pass_observer);
+ RunArchOptimizations(graph, codegen.get(), dex_compilation_unit, &pass_observer, handles);
AllocateRegisters(graph,
codegen.get(),
@@ -1171,6 +974,7 @@ CodeGenerator* OptimizingCompiler::TryCompileIntrinsic(
VLOG(compiler) << "Compiled intrinsic: " << method->GetIntrinsic()
<< " " << graph->PrettyMethod();
+ MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledIntrinsic);
return codegen.release();
}
@@ -1238,12 +1042,9 @@ CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
}
}
if (codegen.get() != nullptr) {
- MaybeRecordStat(compilation_stats_.get(),
- MethodCompilationStat::kCompiled);
compiled_method = Emit(&allocator,
&code_allocator,
codegen.get(),
- compiler_driver,
compiled_intrinsic ? nullptr : code_item);
if (compiled_intrinsic) {
compiled_method->MarkAsIntrinsic();
@@ -1325,7 +1126,6 @@ CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
CompiledMethod* compiled_method = Emit(&allocator,
&code_allocator,
codegen.get(),
- GetCompilerDriver(),
/* code_item_for_osr_check */ nullptr);
compiled_method->MarkAsIntrinsic();
return compiled_method;
@@ -1333,10 +1133,20 @@ CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
}
}
- return ArtQuickJniCompileMethod(GetCompilerDriver(),
- access_flags,
- method_idx,
- dex_file);
+ JniCompiledMethod jni_compiled_method = ArtQuickJniCompileMethod(
+ GetCompilerDriver(), access_flags, method_idx, dex_file);
+ MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiledNativeStub);
+ return CompiledMethod::SwapAllocCompiledMethod(
+ GetCompilerDriver(),
+ jni_compiled_method.GetInstructionSet(),
+ jni_compiled_method.GetCode(),
+ jni_compiled_method.GetFrameSize(),
+ jni_compiled_method.GetCoreSpillMask(),
+ jni_compiled_method.GetFpSpillMask(),
+ /* method_info */ ArrayRef<const uint8_t>(),
+ /* vmap_table */ ArrayRef<const uint8_t>(),
+ jni_compiled_method.GetCfi(),
+ /* patches */ ArrayRef<const linker::LinkerPatch>());
}
Compiler* CreateOptimizingCompiler(CompilerDriver* driver) {
@@ -1386,7 +1196,69 @@ bool OptimizingCompiler::JitCompile(Thread* self,
Runtime* runtime = Runtime::Current();
ArenaAllocator allocator(runtime->GetJitArenaPool());
- ArenaStack arena_stack(Runtime::Current()->GetJitArenaPool());
+
+ if (UNLIKELY(method->IsNative())) {
+ JniCompiledMethod jni_compiled_method = ArtQuickJniCompileMethod(
+ GetCompilerDriver(), access_flags, method_idx, *dex_file);
+ ScopedNullHandle<mirror::ObjectArray<mirror::Object>> roots;
+ ArenaSet<ArtMethod*, std::less<ArtMethod*>> cha_single_implementation_list(
+ allocator.Adapter(kArenaAllocCHA));
+ const void* code = code_cache->CommitCode(
+ self,
+ method,
+ /* stack_map_data */ nullptr,
+ /* method_info_data */ nullptr,
+ /* roots_data */ nullptr,
+ jni_compiled_method.GetFrameSize(),
+ jni_compiled_method.GetCoreSpillMask(),
+ jni_compiled_method.GetFpSpillMask(),
+ jni_compiled_method.GetCode().data(),
+ jni_compiled_method.GetCode().size(),
+ /* data_size */ 0u,
+ osr,
+ roots,
+ /* has_should_deoptimize_flag */ false,
+ cha_single_implementation_list);
+ if (code == nullptr) {
+ return false;
+ }
+
+ const CompilerOptions& compiler_options = GetCompilerDriver()->GetCompilerOptions();
+ if (compiler_options.GetGenerateDebugInfo()) {
+ const auto* method_header = reinterpret_cast<const OatQuickMethodHeader*>(code);
+ const uintptr_t code_address = reinterpret_cast<uintptr_t>(method_header->GetCode());
+ debug::MethodDebugInfo info = {};
+ DCHECK(info.trampoline_name.empty());
+ info.dex_file = dex_file;
+ info.class_def_index = class_def_idx;
+ info.dex_method_index = method_idx;
+ info.access_flags = access_flags;
+ info.code_item = code_item;
+ info.isa = jni_compiled_method.GetInstructionSet();
+ info.deduped = false;
+ info.is_native_debuggable = compiler_options.GetNativeDebuggable();
+ info.is_optimized = true;
+ info.is_code_address_text_relative = false;
+ info.code_address = code_address;
+ info.code_size = jni_compiled_method.GetCode().size();
+ info.frame_size_in_bytes = method_header->GetFrameSizeInBytes();
+ info.code_info = nullptr;
+ info.cfi = jni_compiled_method.GetCfi();
+ std::vector<uint8_t> elf_file = debug::WriteDebugElfFileForMethods(
+ GetCompilerDriver()->GetInstructionSet(),
+ GetCompilerDriver()->GetInstructionSetFeatures(),
+ ArrayRef<const debug::MethodDebugInfo>(&info, 1));
+ CreateJITCodeEntryForAddress(code_address, std::move(elf_file));
+ }
+
+ Runtime::Current()->GetJit()->AddMemoryUsage(method, allocator.BytesUsed());
+ if (jit_logger != nullptr) {
+ jit_logger->WriteLog(code, jni_compiled_method.GetCode().size(), method);
+ }
+ return true;
+ }
+
+ ArenaStack arena_stack(runtime->GetJitArenaPool());
CodeVectorAllocator code_allocator(&allocator);
VariableSizedHandleScope handles(self);
@@ -1431,6 +1303,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
self, class_linker->GetClassRoot(ClassLinker::kObjectArrayClass), number_of_roots)));
if (roots == nullptr) {
// Out of memory, just clear the exception to avoid any Java exception uncaught problems.
+ MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
DCHECK(self->IsExceptionPending());
self->ClearException();
return false;
@@ -1447,9 +1320,9 @@ bool OptimizingCompiler::JitCompile(Thread* self,
&method_info_data,
&roots_data);
if (stack_map_data == nullptr || roots_data == nullptr) {
+ MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
return false;
}
- MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kCompiled);
codegen->BuildStackMaps(MemoryRegion(stack_map_data, stack_map_size),
MemoryRegion(method_info_data, method_info_size),
code_item);
@@ -1473,6 +1346,7 @@ bool OptimizingCompiler::JitCompile(Thread* self,
codegen->GetGraph()->GetCHASingleImplementationList());
if (code == nullptr) {
+ MaybeRecordStat(compilation_stats_.get(), MethodCompilationStat::kJitOutOfMemoryForCommit);
code_cache->ClearData(self, stack_map_data, roots_data);
return false;
}
diff --git a/compiler/optimizing/optimizing_compiler_stats.h b/compiler/optimizing/optimizing_compiler_stats.h
index 07f9635aba..a2e92d2931 100644
--- a/compiler/optimizing/optimizing_compiler_stats.h
+++ b/compiler/optimizing/optimizing_compiler_stats.h
@@ -27,10 +27,13 @@
namespace art {
-enum MethodCompilationStat {
- kAttemptCompilation = 0,
+enum class MethodCompilationStat {
+ kAttemptBytecodeCompilation = 0,
+ kAttemptIntrinsicCompilation,
+ kCompiledNativeStub,
+ kCompiledIntrinsic,
+ kCompiledBytecode,
kCHAInline,
- kCompiled,
kInlinedInvoke,
kReplacedInvokeWithSimplePattern,
kInstructionSimplifications,
@@ -94,8 +97,10 @@ enum MethodCompilationStat {
kConstructorFenceRemovedLSE,
kConstructorFenceRemovedPFRA,
kConstructorFenceRemovedCFRE,
+ kJitOutOfMemoryForCommit,
kLastStat
};
+std::ostream& operator<<(std::ostream& os, const MethodCompilationStat& rhs);
class OptimizingCompilerStats {
public:
@@ -105,7 +110,15 @@ class OptimizingCompilerStats {
}
void RecordStat(MethodCompilationStat stat, uint32_t count = 1) {
- compile_stats_[stat] += count;
+ size_t stat_index = static_cast<size_t>(stat);
+ DCHECK_LT(stat_index, arraysize(compile_stats_));
+ compile_stats_[stat_index] += count;
+ }
+
+ uint32_t GetStat(MethodCompilationStat stat) const {
+ size_t stat_index = static_cast<size_t>(stat);
+ DCHECK_LT(stat_index, arraysize(compile_stats_));
+ return compile_stats_[stat_index];
}
void Log() const {
@@ -114,18 +127,29 @@ class OptimizingCompilerStats {
return;
}
- if (compile_stats_[kAttemptCompilation] == 0) {
+ uint32_t compiled_intrinsics = GetStat(MethodCompilationStat::kCompiledIntrinsic);
+ uint32_t compiled_native_stubs = GetStat(MethodCompilationStat::kCompiledNativeStub);
+ uint32_t bytecode_attempts =
+ GetStat(MethodCompilationStat::kAttemptBytecodeCompilation);
+ if (compiled_intrinsics == 0u && compiled_native_stubs == 0u && bytecode_attempts == 0u) {
LOG(INFO) << "Did not compile any method.";
} else {
- float compiled_percent =
- compile_stats_[kCompiled] * 100.0f / compile_stats_[kAttemptCompilation];
- LOG(INFO) << "Attempted compilation of " << compile_stats_[kAttemptCompilation]
- << " methods: " << std::fixed << std::setprecision(2)
- << compiled_percent << "% (" << compile_stats_[kCompiled] << ") compiled.";
-
- for (size_t i = 0; i < kLastStat; i++) {
+ uint32_t compiled_bytecode_methods =
+ GetStat(MethodCompilationStat::kCompiledBytecode);
+ // Successful intrinsic compilation preempts other compilation attempts but failed intrinsic
+ // compilation shall still count towards bytecode or native stub compilation attempts.
+ uint32_t num_compilation_attempts =
+ compiled_intrinsics + compiled_native_stubs + bytecode_attempts;
+ uint32_t num_successful_compilations =
+ compiled_intrinsics + compiled_native_stubs + compiled_bytecode_methods;
+ float compiled_percent = num_successful_compilations * 100.0f / num_compilation_attempts;
+ LOG(INFO) << "Attempted compilation of "
+ << num_compilation_attempts << " methods: " << std::fixed << std::setprecision(2)
+ << compiled_percent << "% (" << num_successful_compilations << ") compiled.";
+
+ for (size_t i = 0; i < arraysize(compile_stats_); ++i) {
if (compile_stats_[i] != 0) {
- LOG(INFO) << PrintMethodCompilationStat(static_cast<MethodCompilationStat>(i)) << ": "
+ LOG(INFO) << "OptStat#" << static_cast<MethodCompilationStat>(i) << ": "
<< compile_stats_[i];
}
}
@@ -133,7 +157,7 @@ class OptimizingCompilerStats {
}
void AddTo(OptimizingCompilerStats* other_stats) {
- for (size_t i = 0; i != kLastStat; ++i) {
+ for (size_t i = 0; i != arraysize(compile_stats_); ++i) {
uint32_t count = compile_stats_[i];
if (count != 0) {
other_stats->RecordStat(static_cast<MethodCompilationStat>(i), count);
@@ -142,91 +166,13 @@ class OptimizingCompilerStats {
}
void Reset() {
- for (size_t i = 0; i != kLastStat; ++i) {
- compile_stats_[i] = 0u;
+ for (std::atomic<uint32_t>& stat : compile_stats_) {
+ stat = 0u;
}
}
private:
- std::string PrintMethodCompilationStat(MethodCompilationStat stat) const {
- std::string name;
- switch (stat) {
- case kAttemptCompilation : name = "AttemptCompilation"; break;
- case kCHAInline : name = "CHAInline"; break;
- case kCompiled : name = "Compiled"; break;
- case kInlinedInvoke : name = "InlinedInvoke"; break;
- case kReplacedInvokeWithSimplePattern: name = "ReplacedInvokeWithSimplePattern"; break;
- case kInstructionSimplifications: name = "InstructionSimplifications"; break;
- case kInstructionSimplificationsArch: name = "InstructionSimplificationsArch"; break;
- case kUnresolvedMethod : name = "UnresolvedMethod"; break;
- case kUnresolvedField : name = "UnresolvedField"; break;
- case kUnresolvedFieldNotAFastAccess : name = "UnresolvedFieldNotAFastAccess"; break;
- case kRemovedCheckedCast: name = "RemovedCheckedCast"; break;
- case kRemovedDeadInstruction: name = "RemovedDeadInstruction"; break;
- case kRemovedNullCheck: name = "RemovedNullCheck"; break;
- case kNotCompiledSkipped: name = "NotCompiledSkipped"; break;
- case kNotCompiledInvalidBytecode: name = "NotCompiledInvalidBytecode"; break;
- case kNotCompiledThrowCatchLoop : name = "NotCompiledThrowCatchLoop"; break;
- case kNotCompiledAmbiguousArrayOp : name = "NotCompiledAmbiguousArrayOp"; break;
- case kNotCompiledHugeMethod : name = "NotCompiledHugeMethod"; break;
- case kNotCompiledLargeMethodNoBranches : name = "NotCompiledLargeMethodNoBranches"; break;
- case kNotCompiledMalformedOpcode : name = "NotCompiledMalformedOpcode"; break;
- case kNotCompiledNoCodegen : name = "NotCompiledNoCodegen"; break;
- case kNotCompiledPathological : name = "NotCompiledPathological"; break;
- case kNotCompiledSpaceFilter : name = "NotCompiledSpaceFilter"; break;
- case kNotCompiledUnhandledInstruction : name = "NotCompiledUnhandledInstruction"; break;
- case kNotCompiledUnsupportedIsa : name = "NotCompiledUnsupportedIsa"; break;
- case kNotCompiledVerificationError : name = "NotCompiledVerificationError"; break;
- case kNotCompiledVerifyAtRuntime : name = "NotCompiledVerifyAtRuntime"; break;
- case kInlinedMonomorphicCall: name = "InlinedMonomorphicCall"; break;
- case kInlinedPolymorphicCall: name = "InlinedPolymorphicCall"; break;
- case kMonomorphicCall: name = "MonomorphicCall"; break;
- case kPolymorphicCall: name = "PolymorphicCall"; break;
- case kMegamorphicCall: name = "MegamorphicCall"; break;
- case kBooleanSimplified : name = "BooleanSimplified"; break;
- case kIntrinsicRecognized : name = "IntrinsicRecognized"; break;
- case kLoopInvariantMoved : name = "LoopInvariantMoved"; break;
- case kLoopVectorized : name = "LoopVectorized"; break;
- case kLoopVectorizedIdiom : name = "LoopVectorizedIdiom"; break;
- case kSelectGenerated : name = "SelectGenerated"; break;
- case kRemovedInstanceOf: name = "RemovedInstanceOf"; break;
- case kInlinedInvokeVirtualOrInterface: name = "InlinedInvokeVirtualOrInterface"; break;
- case kImplicitNullCheckGenerated: name = "ImplicitNullCheckGenerated"; break;
- case kExplicitNullCheckGenerated: name = "ExplicitNullCheckGenerated"; break;
- case kSimplifyIf: name = "SimplifyIf"; break;
- case kInstructionSunk: name = "InstructionSunk"; break;
- case kNotInlinedUnresolvedEntrypoint: name = "NotInlinedUnresolvedEntrypoint"; break;
- case kNotInlinedDexCache: name = "NotInlinedDexCache"; break;
- case kNotInlinedStackMaps: name = "NotInlinedStackMaps"; break;
- case kNotInlinedEnvironmentBudget: name = "NotInlinedEnvironmentBudget"; break;
- case kNotInlinedInstructionBudget: name = "NotInlinedInstructionBudget"; break;
- case kNotInlinedLoopWithoutExit: name = "NotInlinedLoopWithoutExit"; break;
- case kNotInlinedIrreducibleLoop: name = "NotInlinedIrreducibleLoop"; break;
- case kNotInlinedAlwaysThrows: name = "NotInlinedAlwaysThrows"; break;
- case kNotInlinedInfiniteLoop: name = "NotInlinedInfiniteLoop"; break;
- case kNotInlinedTryCatch: name = "NotInlinedTryCatch"; break;
- case kNotInlinedRegisterAllocator: name = "NotInlinedRegisterAllocator"; break;
- case kNotInlinedCannotBuild: name = "NotInlinedCannotBuild"; break;
- case kNotInlinedNotVerified: name = "NotInlinedNotVerified"; break;
- case kNotInlinedCodeItem: name = "NotInlinedCodeItem"; break;
- case kNotInlinedWont: name = "NotInlinedWont"; break;
- case kNotInlinedRecursiveBudget: name = "NotInlinedRecursiveBudget"; break;
- case kNotInlinedProxy: name = "NotInlinedProxy"; break;
- case kConstructorFenceGeneratedNew: name = "ConstructorFenceGeneratedNew"; break;
- case kConstructorFenceGeneratedFinal: name = "ConstructorFenceGeneratedFinal"; break;
- case kConstructorFenceRemovedLSE: name = "ConstructorFenceRemovedLSE"; break;
- case kConstructorFenceRemovedPFRA: name = "ConstructorFenceRemovedPFRA"; break;
- case kConstructorFenceRemovedCFRE: name = "ConstructorFenceRemovedCFRE"; break;
-
- case kLastStat:
- LOG(FATAL) << "invalid stat "
- << static_cast<std::underlying_type<MethodCompilationStat>::type>(stat);
- UNREACHABLE();
- }
- return "OptStat#" + name;
- }
-
- std::atomic<uint32_t> compile_stats_[kLastStat];
+ std::atomic<uint32_t> compile_stats_[static_cast<size_t>(MethodCompilationStat::kLastStat)];
DISALLOW_COPY_AND_ASSIGN(OptimizingCompilerStats);
};
diff --git a/compiler/optimizing/pc_relative_fixups_mips.h b/compiler/optimizing/pc_relative_fixups_mips.h
index 5a7397bf9d..ec2c711f8d 100644
--- a/compiler/optimizing/pc_relative_fixups_mips.h
+++ b/compiler/optimizing/pc_relative_fixups_mips.h
@@ -29,7 +29,7 @@ namespace mips {
class PcRelativeFixups : public HOptimization {
public:
PcRelativeFixups(HGraph* graph, CodeGenerator* codegen, OptimizingCompilerStats* stats)
- : HOptimization(graph, "pc_relative_fixups_mips", stats),
+ : HOptimization(graph, kPcRelativeFixupsMipsPassName, stats),
codegen_(codegen) {}
static constexpr const char* kPcRelativeFixupsMipsPassName = "pc_relative_fixups_mips";
diff --git a/compiler/optimizing/scheduler.h b/compiler/optimizing/scheduler.h
index a6e160379b..bb7c353bc2 100644
--- a/compiler/optimizing/scheduler.h
+++ b/compiler/optimizing/scheduler.h
@@ -495,8 +495,11 @@ inline bool SchedulingGraph::IsSchedulingBarrier(const HInstruction* instruction
class HInstructionScheduling : public HOptimization {
public:
- HInstructionScheduling(HGraph* graph, InstructionSet instruction_set, CodeGenerator* cg = nullptr)
- : HOptimization(graph, kInstructionScheduling),
+ HInstructionScheduling(HGraph* graph,
+ InstructionSet instruction_set,
+ CodeGenerator* cg = nullptr,
+ const char* name = kInstructionSchedulingPassName)
+ : HOptimization(graph, name),
codegen_(cg),
instruction_set_(instruction_set) {}
@@ -505,7 +508,7 @@ class HInstructionScheduling : public HOptimization {
}
void Run(bool only_optimize_loop_blocks, bool schedule_randomly);
- static constexpr const char* kInstructionScheduling = "scheduler";
+ static constexpr const char* kInstructionSchedulingPassName = "scheduler";
private:
CodeGenerator* const codegen_;
diff --git a/compiler/optimizing/select_generator.cc b/compiler/optimizing/select_generator.cc
index 77ec9a6285..66e51421ca 100644
--- a/compiler/optimizing/select_generator.cc
+++ b/compiler/optimizing/select_generator.cc
@@ -24,8 +24,9 @@ static constexpr size_t kMaxInstructionsInBranch = 1u;
HSelectGenerator::HSelectGenerator(HGraph* graph,
VariableSizedHandleScope* handles,
- OptimizingCompilerStats* stats)
- : HOptimization(graph, kSelectGeneratorPassName, stats),
+ OptimizingCompilerStats* stats,
+ const char* name)
+ : HOptimization(graph, name, stats),
handle_scope_(handles) {
}
diff --git a/compiler/optimizing/select_generator.h b/compiler/optimizing/select_generator.h
index f8cf00e35a..bda57fd5c8 100644
--- a/compiler/optimizing/select_generator.h
+++ b/compiler/optimizing/select_generator.h
@@ -65,7 +65,8 @@ class HSelectGenerator : public HOptimization {
public:
HSelectGenerator(HGraph* graph,
VariableSizedHandleScope* handles,
- OptimizingCompilerStats* stats);
+ OptimizingCompilerStats* stats,
+ const char* name = kSelectGeneratorPassName);
void Run() OVERRIDE;
diff --git a/compiler/optimizing/sharpening.h b/compiler/optimizing/sharpening.h
index f74b0afdbf..bb1954eeeb 100644
--- a/compiler/optimizing/sharpening.h
+++ b/compiler/optimizing/sharpening.h
@@ -36,8 +36,9 @@ class HSharpening : public HOptimization {
CodeGenerator* codegen,
const DexCompilationUnit& compilation_unit,
CompilerDriver* compiler_driver,
- VariableSizedHandleScope* handles)
- : HOptimization(graph, kSharpeningPassName),
+ VariableSizedHandleScope* handles,
+ const char* name = kSharpeningPassName)
+ : HOptimization(graph, name),
codegen_(codegen),
compilation_unit_(compilation_unit),
compiler_driver_(compiler_driver),