diff options
author | 2020-05-13 09:21:00 +0100 | |
---|---|---|
committer | 2020-05-15 14:09:54 +0000 | |
commit | f91fc1220f1b77c55317ff50f4dde8e6b043858f (patch) | |
tree | 3b8416a4fa9b9278d1114d4002485e0cb1c704bf /compiler | |
parent | 33c091eaaa0febedc93cff820def75b122fde867 (diff) |
Optimizing: Run gtests without creating the Runtime.
The only Optimizing test that actually needs a Runtime is
the ReferenceTypePropagationTest, so we make it subclass
CommonCompilerTest explicitly and change OptimizingUnitTest
to subclass CommonArtTest for the other tests.
On host, each test that initializes the Runtime takes ~220ms
more than without initializing the Runtime. For example, the
ConstantFoldingTest that has 10 individual tests previously
took over 2.2s to run but without the Runtime initialization
it takes around 3-5ms. On target, running 32-bit gtests on
taimen with run-gtests.sh (single-threaded) goes from
~28m47s to ~26m13s, a reduction of ~9%.
Test: m test-art-host-gtest
Test: run-gtests.sh
Change-Id: I43e50ed58e52cc0ad04cdb4d39801bfbae840a3d
Diffstat (limited to 'compiler')
-rw-r--r-- | compiler/common_compiler_test.cc | 11 | ||||
-rw-r--r-- | compiler/common_compiler_test.h | 3 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm64.cc | 20 | ||||
-rw-r--r-- | compiler/optimizing/code_generator_arm_vixl.cc | 20 | ||||
-rw-r--r-- | compiler/optimizing/codegen_test.cc | 67 | ||||
-rw-r--r-- | compiler/optimizing/instruction_builder.cc | 5 | ||||
-rw-r--r-- | compiler/optimizing/linearize_test.cc | 4 | ||||
-rw-r--r-- | compiler/optimizing/live_ranges_test.cc | 5 | ||||
-rw-r--r-- | compiler/optimizing/liveness_test.cc | 6 | ||||
-rw-r--r-- | compiler/optimizing/loop_optimization_test.cc | 5 | ||||
-rw-r--r-- | compiler/optimizing/optimizing_unit_test.h | 2 | ||||
-rw-r--r-- | compiler/optimizing/reference_type_propagation.cc | 4 | ||||
-rw-r--r-- | compiler/optimizing/reference_type_propagation_test.cc | 2 | ||||
-rw-r--r-- | compiler/optimizing/register_allocator_test.cc | 6 | ||||
-rw-r--r-- | compiler/optimizing/scheduler_test.cc | 5 | ||||
-rw-r--r-- | compiler/optimizing/ssa_liveness_analysis_test.cc | 4 |
16 files changed, 110 insertions, 59 deletions
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc index 18f00e21e4..6b4dbed03b 100644 --- a/compiler/common_compiler_test.cc +++ b/compiler/common_compiler_test.cc @@ -44,6 +44,17 @@ namespace art { +std::unique_ptr<CompilerOptions> CommonCompilerTest::CreateCompilerOptions( + InstructionSet instruction_set, const std::string& variant) { + std::unique_ptr<CompilerOptions> compiler_options = std::make_unique<CompilerOptions>(); + compiler_options->instruction_set_ = instruction_set; + std::string error_msg; + compiler_options->instruction_set_features_ = + InstructionSetFeatures::FromVariant(instruction_set, variant, &error_msg); + CHECK(compiler_options->instruction_set_features_ != nullptr) << error_msg; + return compiler_options; +} + CommonCompilerTest::CommonCompilerTest() {} CommonCompilerTest::~CommonCompilerTest() {} diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h index 4f4e49a720..703e5f8523 100644 --- a/compiler/common_compiler_test.h +++ b/compiler/common_compiler_test.h @@ -44,6 +44,9 @@ template<class T> class Handle; class CommonCompilerTest : public CommonRuntimeTest { public: + static std::unique_ptr<CompilerOptions> CreateCompilerOptions(InstructionSet instruction_set, + const std::string& variant); + CommonCompilerTest(); ~CommonCompilerTest(); diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index f4e18cf7f4..11aade46f4 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -92,6 +92,12 @@ static constexpr uint32_t kPackedSwitchCompareJumpThreshold = 7; // the offset explicitly. constexpr uint32_t kReferenceLoadMinFarOffset = 16 * KB; +ALWAYS_INLINE static inline bool UseJitCompilation() { + Runtime* runtime = Runtime::Current(); + // Note: There may be no Runtime for gtests; gtests use debug builds. + return (!kIsDebugBuild || runtime != nullptr) && runtime->UseJitCompilation(); +} + inline Condition ARM64Condition(IfCondition cond) { switch (cond) { case kCondEQ: return eq; @@ -938,7 +944,7 @@ void CodeGeneratorARM64::Finalize(CodeAllocator* allocator) { EmitJumpTables(); // Emit JIT baker read barrier slow paths. - DCHECK(Runtime::Current()->UseJitCompilation() || jit_baker_read_barrier_slow_paths_.empty()); + DCHECK(UseJitCompilation() || jit_baker_read_barrier_slow_paths_.empty()); for (auto& entry : jit_baker_read_barrier_slow_paths_) { uint32_t encoded_data = entry.first; vixl::aarch64::Label* slow_path_entry = &entry.second.label; @@ -1782,7 +1788,7 @@ void CodeGeneratorARM64::InvokeRuntime(QuickEntrypointEnum entrypoint, // Reduce code size for AOT by using shared trampolines for slow path runtime calls across the // entire oat file. This adds an extra branch and we do not want to slow down the main path. // For JIT, thunk sharing is per-method, so the gains would be smaller or even negative. - if (slow_path == nullptr || Runtime::Current()->UseJitCompilation()) { + if (slow_path == nullptr || UseJitCompilation()) { __ Ldr(lr, MemOperand(tr, entrypoint_offset.Int32Value())); // Ensure the pc position is recorded immediately after the `blr` instruction. ExactAssemblyScope eas(GetVIXLAssembler(), kInstructionSize, CodeBufferCheckScope::kExactSize); @@ -4508,7 +4514,7 @@ vixl::aarch64::Label* CodeGeneratorARM64::NewStringBssEntryPatch( void CodeGeneratorARM64::EmitEntrypointThunkCall(ThreadOffset64 entrypoint_offset) { DCHECK(!__ AllowMacroInstructions()); // In ExactAssemblyScope. - DCHECK(!Runtime::Current()->UseJitCompilation()); + DCHECK(!UseJitCompilation()); call_entrypoint_patches_.emplace_back(/*dex_file*/ nullptr, entrypoint_offset.Uint32Value()); vixl::aarch64::Label* bl_label = &call_entrypoint_patches_.back().label; __ bind(bl_label); @@ -4517,7 +4523,7 @@ void CodeGeneratorARM64::EmitEntrypointThunkCall(ThreadOffset64 entrypoint_offse void CodeGeneratorARM64::EmitBakerReadBarrierCbnz(uint32_t custom_data) { DCHECK(!__ AllowMacroInstructions()); // In ExactAssemblyScope. - if (Runtime::Current()->UseJitCompilation()) { + if (UseJitCompilation()) { auto it = jit_baker_read_barrier_slow_paths_.FindOrAdd(custom_data); vixl::aarch64::Label* slow_path_entry = &it->second.label; __ cbnz(mr, slow_path_entry); @@ -6656,10 +6662,8 @@ void CodeGeneratorARM64::CompileBakerReadBarrierThunk(Arm64Assembler& assembler, } // For JIT, the slow path is considered part of the compiled method, - // so JIT should pass null as `debug_name`. Tests may not have a runtime. - DCHECK(Runtime::Current() == nullptr || - !Runtime::Current()->UseJitCompilation() || - debug_name == nullptr); + // so JIT should pass null as `debug_name`. + DCHECK(!UseJitCompilation() || debug_name == nullptr); if (debug_name != nullptr && GetCompilerOptions().GenerateAnyDebugInfo()) { std::ostringstream oss; oss << "BakerReadBarrierThunk"; diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc index 3a2cf40f04..44d2f4d9fd 100644 --- a/compiler/optimizing/code_generator_arm_vixl.cc +++ b/compiler/optimizing/code_generator_arm_vixl.cc @@ -93,6 +93,12 @@ constexpr uint32_t kReferenceLoadMinFarOffset = 4 * KB; // Using a base helps identify when we hit Marking Register check breakpoints. constexpr int kMarkingRegisterCheckBreakCodeBaseCode = 0x10; +ALWAYS_INLINE static inline bool UseJitCompilation() { + Runtime* runtime = Runtime::Current(); + // Note: There may be no Runtime for gtests which use debug builds. + return (!kIsDebugBuild || runtime != nullptr) && runtime->UseJitCompilation(); +} + #ifdef __ #error "ARM Codegen VIXL macro-assembler macro already defined." #endif @@ -1931,7 +1937,7 @@ void CodeGeneratorARMVIXL::Finalize(CodeAllocator* allocator) { FixJumpTables(); // Emit JIT baker read barrier slow paths. - DCHECK(Runtime::Current()->UseJitCompilation() || jit_baker_read_barrier_slow_paths_.empty()); + DCHECK(UseJitCompilation() || jit_baker_read_barrier_slow_paths_.empty()); for (auto& entry : jit_baker_read_barrier_slow_paths_) { uint32_t encoded_data = entry.first; vixl::aarch32::Label* slow_path_entry = &entry.second.label; @@ -2511,7 +2517,7 @@ void CodeGeneratorARMVIXL::InvokeRuntime(QuickEntrypointEnum entrypoint, // Reduce code size for AOT by using shared trampolines for slow path runtime calls across the // entire oat file. This adds an extra branch and we do not want to slow down the main path. // For JIT, thunk sharing is per-method, so the gains would be smaller or even negative. - if (slow_path == nullptr || Runtime::Current()->UseJitCompilation()) { + if (slow_path == nullptr || UseJitCompilation()) { __ Ldr(lr, MemOperand(tr, entrypoint_offset.Int32Value())); // Ensure the pc position is recorded immediately after the `blx` instruction. // blx in T32 has only 16bit encoding that's why a stricter check for the scope is used. @@ -9070,7 +9076,7 @@ CodeGeneratorARMVIXL::PcRelativePatchInfo* CodeGeneratorARMVIXL::NewPcRelativePa void CodeGeneratorARMVIXL::EmitEntrypointThunkCall(ThreadOffset32 entrypoint_offset) { DCHECK(!__ AllowMacroInstructions()); // In ExactAssemblyScope. - DCHECK(!Runtime::Current()->UseJitCompilation()); + DCHECK(!UseJitCompilation()); call_entrypoint_patches_.emplace_back(/*dex_file*/ nullptr, entrypoint_offset.Uint32Value()); vixl::aarch32::Label* bl_label = &call_entrypoint_patches_.back().label; __ bind(bl_label); @@ -9081,7 +9087,7 @@ void CodeGeneratorARMVIXL::EmitEntrypointThunkCall(ThreadOffset32 entrypoint_off void CodeGeneratorARMVIXL::EmitBakerReadBarrierBne(uint32_t custom_data) { DCHECK(!__ AllowMacroInstructions()); // In ExactAssemblyScope. - if (Runtime::Current()->UseJitCompilation()) { + if (UseJitCompilation()) { auto it = jit_baker_read_barrier_slow_paths_.FindOrAdd(custom_data); vixl::aarch32::Label* slow_path_entry = &it->second.label; __ b(ne, EncodingSize(Wide), slow_path_entry); @@ -9710,10 +9716,8 @@ void CodeGeneratorARMVIXL::CompileBakerReadBarrierThunk(ArmVIXLAssembler& assemb } // For JIT, the slow path is considered part of the compiled method, - // so JIT should pass null as `debug_name`. Tests may not have a runtime. - DCHECK(Runtime::Current() == nullptr || - !Runtime::Current()->UseJitCompilation() || - debug_name == nullptr); + // so JIT should pass null as `debug_name`. + DCHECK(!UseJitCompilation() || debug_name == nullptr); if (debug_name != nullptr && GetCompilerOptions().GenerateAnyDebugInfo()) { std::ostringstream oss; oss << "BakerReadBarrierThunk"; diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc index d9b4f79e8b..f4f44a0479 100644 --- a/compiler/optimizing/codegen_test.cc +++ b/compiler/optimizing/codegen_test.cc @@ -81,8 +81,9 @@ void CodegenTest::TestCode(const std::vector<uint16_t>& data, bool has_result, i HGraph* graph = CreateCFG(data); // Remove suspend checks, they cannot be executed in this context. RemoveSuspendChecks(graph); - OverrideInstructionSetFeatures(target_config.GetInstructionSet(), "default"); - RunCode(target_config, *compiler_options_, graph, [](HGraph*) {}, has_result, expected); + std::unique_ptr<CompilerOptions> compiler_options = + CommonCompilerTest::CreateCompilerOptions(target_config.GetInstructionSet(), "default"); + RunCode(target_config, *compiler_options, graph, [](HGraph*) {}, has_result, expected); } } @@ -93,8 +94,9 @@ void CodegenTest::TestCodeLong(const std::vector<uint16_t>& data, HGraph* graph = CreateCFG(data, DataType::Type::kInt64); // Remove suspend checks, they cannot be executed in this context. RemoveSuspendChecks(graph); - OverrideInstructionSetFeatures(target_config.GetInstructionSet(), "default"); - RunCode(target_config, *compiler_options_, graph, [](HGraph*) {}, has_result, expected); + std::unique_ptr<CompilerOptions> compiler_options = + CommonCompilerTest::CreateCompilerOptions(target_config.GetInstructionSet(), "default"); + RunCode(target_config, *compiler_options, graph, [](HGraph*) {}, has_result, expected); } } @@ -445,7 +447,9 @@ TEST_F(CodegenTest, NonMaterializedCondition) { ASSERT_FALSE(equal->IsEmittedAtUseSite()); graph->BuildDominatorTree(); - PrepareForRegisterAllocation(graph, *compiler_options_).Run(); + std::unique_ptr<CompilerOptions> compiler_options = + CommonCompilerTest::CreateCompilerOptions(target_config.GetInstructionSet(), "default"); + PrepareForRegisterAllocation(graph, *compiler_options).Run(); ASSERT_TRUE(equal->IsEmittedAtUseSite()); auto hook_before_codegen = [](HGraph* graph_in) { @@ -454,8 +458,7 @@ TEST_F(CodegenTest, NonMaterializedCondition) { block->InsertInstructionBefore(move, block->GetLastInstruction()); }; - OverrideInstructionSetFeatures(target_config.GetInstructionSet(), "default"); - RunCode(target_config, *compiler_options_, graph, hook_before_codegen, true, 0); + RunCode(target_config, *compiler_options, graph, hook_before_codegen, true, 0); } } @@ -501,8 +504,9 @@ TEST_F(CodegenTest, MaterializedCondition1) { new (graph_in->GetAllocator()) HParallelMove(graph_in->GetAllocator()); block->InsertInstructionBefore(move, block->GetLastInstruction()); }; - OverrideInstructionSetFeatures(target_config.GetInstructionSet(), "default"); - RunCode(target_config, *compiler_options_, graph, hook_before_codegen, true, lhs[i] < rhs[i]); + std::unique_ptr<CompilerOptions> compiler_options = + CommonCompilerTest::CreateCompilerOptions(target_config.GetInstructionSet(), "default"); + RunCode(target_config, *compiler_options, graph, hook_before_codegen, true, lhs[i] < rhs[i]); } } } @@ -569,8 +573,9 @@ TEST_F(CodegenTest, MaterializedCondition2) { new (graph_in->GetAllocator()) HParallelMove(graph_in->GetAllocator()); block->InsertInstructionBefore(move, block->GetLastInstruction()); }; - OverrideInstructionSetFeatures(target_config.GetInstructionSet(), "default"); - RunCode(target_config, *compiler_options_, graph, hook_before_codegen, true, lhs[i] < rhs[i]); + std::unique_ptr<CompilerOptions> compiler_options = + CommonCompilerTest::CreateCompilerOptions(target_config.GetInstructionSet(), "default"); + RunCode(target_config, *compiler_options, graph, hook_before_codegen, true, lhs[i] < rhs[i]); } } } @@ -679,8 +684,9 @@ void CodegenTest::TestComparison(IfCondition condition, block->AddInstruction(new (GetAllocator()) HReturn(comparison)); graph->BuildDominatorTree(); - OverrideInstructionSetFeatures(target_config.GetInstructionSet(), "default"); - RunCode(target_config, *compiler_options_, graph, [](HGraph*) {}, true, expected_result); + std::unique_ptr<CompilerOptions> compiler_options = + CommonCompilerTest::CreateCompilerOptions(target_config.GetInstructionSet(), "default"); + RunCode(target_config, *compiler_options, graph, [](HGraph*) {}, true, expected_result); } TEST_F(CodegenTest, ComparisonsInt) { @@ -711,9 +717,10 @@ TEST_F(CodegenTest, ComparisonsLong) { #ifdef ART_ENABLE_CODEGEN_arm TEST_F(CodegenTest, ARMVIXLParallelMoveResolver) { - OverrideInstructionSetFeatures(InstructionSet::kThumb2, "default"); + std::unique_ptr<CompilerOptions> compiler_options = + CommonCompilerTest::CreateCompilerOptions(InstructionSet::kThumb2, "default"); HGraph* graph = CreateGraph(); - arm::CodeGeneratorARMVIXL codegen(graph, *compiler_options_); + arm::CodeGeneratorARMVIXL codegen(graph, *compiler_options); codegen.Initialize(); @@ -734,9 +741,10 @@ TEST_F(CodegenTest, ARMVIXLParallelMoveResolver) { #ifdef ART_ENABLE_CODEGEN_arm64 // Regression test for b/34760542. TEST_F(CodegenTest, ARM64ParallelMoveResolverB34760542) { - OverrideInstructionSetFeatures(InstructionSet::kArm64, "default"); + std::unique_ptr<CompilerOptions> compiler_options = + CommonCompilerTest::CreateCompilerOptions(InstructionSet::kArm64, "default"); HGraph* graph = CreateGraph(); - arm64::CodeGeneratorARM64 codegen(graph, *compiler_options_); + arm64::CodeGeneratorARM64 codegen(graph, *compiler_options); codegen.Initialize(); @@ -783,9 +791,10 @@ TEST_F(CodegenTest, ARM64ParallelMoveResolverB34760542) { // Check that ParallelMoveResolver works fine for ARM64 for both cases when SIMD is on and off. TEST_F(CodegenTest, ARM64ParallelMoveResolverSIMD) { - OverrideInstructionSetFeatures(InstructionSet::kArm64, "default"); + std::unique_ptr<CompilerOptions> compiler_options = + CommonCompilerTest::CreateCompilerOptions(InstructionSet::kArm64, "default"); HGraph* graph = CreateGraph(); - arm64::CodeGeneratorARM64 codegen(graph, *compiler_options_); + arm64::CodeGeneratorARM64 codegen(graph, *compiler_options); codegen.Initialize(); @@ -818,9 +827,10 @@ TEST_F(CodegenTest, ARM64ParallelMoveResolverSIMD) { // Check that ART ISA Features are propagated to VIXL for arm64 (using cortex-a75 as example). TEST_F(CodegenTest, ARM64IsaVIXLFeaturesA75) { - OverrideInstructionSetFeatures(InstructionSet::kArm64, "cortex-a75"); + std::unique_ptr<CompilerOptions> compiler_options = + CommonCompilerTest::CreateCompilerOptions(InstructionSet::kArm64, "cortex-a75"); HGraph* graph = CreateGraph(); - arm64::CodeGeneratorARM64 codegen(graph, *compiler_options_); + arm64::CodeGeneratorARM64 codegen(graph, *compiler_options); vixl::CPUFeatures* features = codegen.GetVIXLAssembler()->GetCPUFeatures(); EXPECT_TRUE(features->Has(vixl::CPUFeatures::kCRC32)); @@ -832,9 +842,10 @@ TEST_F(CodegenTest, ARM64IsaVIXLFeaturesA75) { // Check that ART ISA Features are propagated to VIXL for arm64 (using cortex-a53 as example). TEST_F(CodegenTest, ARM64IsaVIXLFeaturesA53) { - OverrideInstructionSetFeatures(InstructionSet::kArm64, "cortex-a53"); + std::unique_ptr<CompilerOptions> compiler_options = + CommonCompilerTest::CreateCompilerOptions(InstructionSet::kArm64, "cortex-a53"); HGraph* graph = CreateGraph(); - arm64::CodeGeneratorARM64 codegen(graph, *compiler_options_); + arm64::CodeGeneratorARM64 codegen(graph, *compiler_options); vixl::CPUFeatures* features = codegen.GetVIXLAssembler()->GetCPUFeatures(); EXPECT_TRUE(features->Has(vixl::CPUFeatures::kCRC32)); @@ -850,9 +861,10 @@ constexpr static size_t kExpectedFPSpillSize = 8 * vixl::aarch64::kDRegSizeInByt // allocated on stack per callee-saved FP register to be preserved in the frame entry as // ABI states. TEST_F(CodegenTest, ARM64FrameSizeSIMD) { - OverrideInstructionSetFeatures(InstructionSet::kArm64, "default"); + std::unique_ptr<CompilerOptions> compiler_options = + CommonCompilerTest::CreateCompilerOptions(InstructionSet::kArm64, "default"); HGraph* graph = CreateGraph(); - arm64::CodeGeneratorARM64 codegen(graph, *compiler_options_); + arm64::CodeGeneratorARM64 codegen(graph, *compiler_options); codegen.Initialize(); graph->SetHasSIMD(true); @@ -869,9 +881,10 @@ TEST_F(CodegenTest, ARM64FrameSizeSIMD) { } TEST_F(CodegenTest, ARM64FrameSizeNoSIMD) { - OverrideInstructionSetFeatures(InstructionSet::kArm64, "default"); + std::unique_ptr<CompilerOptions> compiler_options = + CommonCompilerTest::CreateCompilerOptions(InstructionSet::kArm64, "default"); HGraph* graph = CreateGraph(); - arm64::CodeGeneratorARM64 codegen(graph, *compiler_options_); + arm64::CodeGeneratorARM64 codegen(graph, *compiler_options); codegen.Initialize(); graph->SetHasSIMD(false); diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc index 768bc2465c..69f67780f1 100644 --- a/compiler/optimizing/instruction_builder.cc +++ b/compiler/optimizing/instruction_builder.cc @@ -380,13 +380,14 @@ bool HInstructionBuilder::Build() { AppendInstruction(new (allocator_) HNativeDebugInfo(dex_pc)); } - DCHECK(!Thread::Current()->IsExceptionPending()) + // Note: There may be no Thread for gtests. + DCHECK(Thread::Current() == nullptr || !Thread::Current()->IsExceptionPending()) << dex_file_->PrettyMethod(dex_compilation_unit_->GetDexMethodIndex()) << " " << pair.Inst().Name() << "@" << dex_pc; if (!ProcessDexInstruction(pair.Inst(), dex_pc, quicken_index)) { return false; } - DCHECK(!Thread::Current()->IsExceptionPending()) + DCHECK(Thread::Current() == nullptr || !Thread::Current()->IsExceptionPending()) << dex_file_->PrettyMethod(dex_compilation_unit_->GetDexMethodIndex()) << " " << pair.Inst().Name() << "@" << dex_pc; diff --git a/compiler/optimizing/linearize_test.cc b/compiler/optimizing/linearize_test.cc index 50bfe843b5..d56ae11ca9 100644 --- a/compiler/optimizing/linearize_test.cc +++ b/compiler/optimizing/linearize_test.cc @@ -41,7 +41,9 @@ template <size_t number_of_blocks> void LinearizeTest::TestCode(const std::vector<uint16_t>& data, const uint32_t (&expected_order)[number_of_blocks]) { HGraph* graph = CreateCFG(data); - std::unique_ptr<CodeGenerator> codegen = CodeGenerator::Create(graph, *compiler_options_); + std::unique_ptr<CompilerOptions> compiler_options = + CommonCompilerTest::CreateCompilerOptions(kRuntimeISA, "default"); + std::unique_ptr<CodeGenerator> codegen = CodeGenerator::Create(graph, *compiler_options); SsaLivenessAnalysis liveness(graph, codegen.get(), GetScopedAllocator()); liveness.Analyze(); diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc index 60f513ca48..bb8a4dc08e 100644 --- a/compiler/optimizing/live_ranges_test.cc +++ b/compiler/optimizing/live_ranges_test.cc @@ -28,12 +28,15 @@ namespace art { class LiveRangesTest : public OptimizingUnitTest { - public: + protected: HGraph* BuildGraph(const std::vector<uint16_t>& data); + + std::unique_ptr<CompilerOptions> compiler_options_; }; HGraph* LiveRangesTest::BuildGraph(const std::vector<uint16_t>& data) { HGraph* graph = CreateCFG(data); + compiler_options_ = CommonCompilerTest::CreateCompilerOptions(kRuntimeISA, "default"); // Suspend checks implementation may change in the future, and this test relies // on how instructions are ordered. RemoveSuspendChecks(graph); diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc index f11f7a9779..ba3787e9be 100644 --- a/compiler/optimizing/liveness_test.cc +++ b/compiler/optimizing/liveness_test.cc @@ -47,8 +47,10 @@ static void DumpBitVector(BitVector* vector, void LivenessTest::TestCode(const std::vector<uint16_t>& data, const char* expected) { HGraph* graph = CreateCFG(data); // `Inline` conditions into ifs. - PrepareForRegisterAllocation(graph, *compiler_options_).Run(); - std::unique_ptr<CodeGenerator> codegen = CodeGenerator::Create(graph, *compiler_options_); + std::unique_ptr<CompilerOptions> compiler_options = + CommonCompilerTest::CreateCompilerOptions(kRuntimeISA, "default"); + PrepareForRegisterAllocation(graph, *compiler_options).Run(); + std::unique_ptr<CodeGenerator> codegen = CodeGenerator::Create(graph, *compiler_options); SsaLivenessAnalysis liveness(graph, codegen.get(), GetScopedAllocator()); liveness.Analyze(); diff --git a/compiler/optimizing/loop_optimization_test.cc b/compiler/optimizing/loop_optimization_test.cc index 8b4d58eaae..bda25283f5 100644 --- a/compiler/optimizing/loop_optimization_test.cc +++ b/compiler/optimizing/loop_optimization_test.cc @@ -15,6 +15,7 @@ */ #include "code_generator.h" +#include "driver/compiler_options.h" #include "loop_optimization.h" #include "optimizing_unit_test.h" @@ -28,12 +29,12 @@ namespace art { class LoopOptimizationTest : public OptimizingUnitTest { protected: void SetUp() override { - OverrideInstructionSetFeatures(instruction_set_, "default"); OptimizingUnitTest::SetUp(); graph_ = CreateGraph(); BuildGraph(); iva_ = new (GetAllocator()) HInductionVarAnalysis(graph_); + compiler_options_ = CommonCompilerTest::CreateCompilerOptions(kRuntimeISA, "default"); DCHECK(compiler_options_ != nullptr); codegen_ = CodeGenerator::Create(graph_, *compiler_options_); DCHECK(codegen_.get() != nullptr); @@ -43,6 +44,7 @@ class LoopOptimizationTest : public OptimizingUnitTest { void TearDown() override { codegen_.reset(); + compiler_options_.reset(); graph_ = nullptr; ResetPoolAndAllocator(); OptimizingUnitTest::TearDown(); @@ -117,6 +119,7 @@ class LoopOptimizationTest : public OptimizingUnitTest { // General building fields. HGraph* graph_; + std::unique_ptr<CompilerOptions> compiler_options_; std::unique_ptr<CodeGenerator> codegen_; HInductionVarAnalysis* iva_; HLoopOptimization* loop_opt_; diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h index 61e16800e5..c884f5b0a4 100644 --- a/compiler/optimizing/optimizing_unit_test.h +++ b/compiler/optimizing/optimizing_unit_test.h @@ -266,7 +266,7 @@ class OptimizingUnitTestHelper { std::vector<HInstruction*> parameters_; }; -class OptimizingUnitTest : public CommonCompilerTest, public OptimizingUnitTestHelper {}; +class OptimizingUnitTest : public CommonArtTest, public OptimizingUnitTestHelper {}; // Naive string diff data type. typedef std::list<std::pair<std::string, std::string>> diff_t; diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc index ff1e01ba4c..85769437ba 100644 --- a/compiler/optimizing/reference_type_propagation.cc +++ b/compiler/optimizing/reference_type_propagation.cc @@ -126,8 +126,8 @@ ReferenceTypePropagation::ReferenceTypePropagation(HGraph* graph, } void ReferenceTypePropagation::ValidateTypes() { - // TODO: move this to the graph checker. - if (kIsDebugBuild) { + // TODO: move this to the graph checker. Note: There may be no Thread for gtests. + if (kIsDebugBuild && Thread::Current() != nullptr) { ScopedObjectAccess soa(Thread::Current()); for (HBasicBlock* block : graph_->GetReversePostOrder()) { for (HInstructionIterator iti(block->GetInstructions()); !iti.Done(); iti.Advance()) { diff --git a/compiler/optimizing/reference_type_propagation_test.cc b/compiler/optimizing/reference_type_propagation_test.cc index 01f0dd3f2b..a0d66093d2 100644 --- a/compiler/optimizing/reference_type_propagation_test.cc +++ b/compiler/optimizing/reference_type_propagation_test.cc @@ -28,7 +28,7 @@ namespace art { * Fixture class for unit testing the ReferenceTypePropagation phase. Used to verify the * functionality of methods and situations that are hard to set up with checker tests. */ -class ReferenceTypePropagationTest : public OptimizingUnitTest { +class ReferenceTypePropagationTest : public CommonCompilerTest, public OptimizingUnitTestHelper { public: ReferenceTypePropagationTest() : graph_(nullptr), propagation_(nullptr) { } diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc index 79eb082cd7..d1db40be82 100644 --- a/compiler/optimizing/register_allocator_test.cc +++ b/compiler/optimizing/register_allocator_test.cc @@ -41,9 +41,9 @@ using Strategy = RegisterAllocator::Strategy; class RegisterAllocatorTest : public OptimizingUnitTest { protected: void SetUp() override { - // This test is using the x86 ISA. - OverrideInstructionSetFeatures(InstructionSet::kX86, "default"); OptimizingUnitTest::SetUp(); + // This test is using the x86 ISA. + compiler_options_ = CommonCompilerTest::CreateCompilerOptions(InstructionSet::kX86, "default"); } // These functions need to access private variables of LocationSummary, so we declare it @@ -74,6 +74,8 @@ class RegisterAllocatorTest : public OptimizingUnitTest { /* processing_core_registers= */ true, /* log_fatal_on_failure= */ false); } + + std::unique_ptr<CompilerOptions> compiler_options_; }; // This macro should include all register allocation strategies that should be tested. diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc index 7835b1d3d3..b5ec93ea6c 100644 --- a/compiler/optimizing/scheduler_test.cc +++ b/compiler/optimizing/scheduler_test.cc @@ -188,9 +188,10 @@ class SchedulerTest : public OptimizingUnitTest { HInstructionScheduling scheduling(graph, target_config.GetInstructionSet()); scheduling.Run(/*only_optimize_loop_blocks*/ false, /*schedule_randomly*/ true); - OverrideInstructionSetFeatures(target_config.GetInstructionSet(), "default"); + std::unique_ptr<CompilerOptions> compiler_options = + CommonCompilerTest::CreateCompilerOptions(target_config.GetInstructionSet(), "default"); RunCode(target_config, - *compiler_options_, + *compiler_options, graph, [](HGraph* graph_arg) { RemoveSuspendChecks(graph_arg); }, has_result, expected); diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc index 352c44f63a..a477893d57 100644 --- a/compiler/optimizing/ssa_liveness_analysis_test.cc +++ b/compiler/optimizing/ssa_liveness_analysis_test.cc @@ -32,8 +32,9 @@ class SsaLivenessAnalysisTest : public OptimizingUnitTest { void SetUp() override { OptimizingUnitTest::SetUp(); graph_ = CreateGraph(); + compiler_options_ = CommonCompilerTest::CreateCompilerOptions(kRuntimeISA, "default"); codegen_ = CodeGenerator::Create(graph_, *compiler_options_); - CHECK(codegen_ != nullptr) << instruction_set_ << " is not a supported target architecture."; + CHECK(codegen_ != nullptr); // Create entry block. entry_ = new (GetAllocator()) HBasicBlock(graph_); graph_->AddBlock(entry_); @@ -50,6 +51,7 @@ class SsaLivenessAnalysisTest : public OptimizingUnitTest { } HGraph* graph_; + std::unique_ptr<CompilerOptions> compiler_options_; std::unique_ptr<CodeGenerator> codegen_; HBasicBlock* entry_; }; |