Optimizing: Run gtests without creating the Runtime.
The only Optimizing test that actually needs a Runtime is
the ReferenceTypePropagationTest, so we make it subclass
CommonCompilerTest explicitly and change OptimizingUnitTest
to subclass CommonArtTest for the other tests.
On host, each test that initializes the Runtime takes ~220ms
more than without initializing the Runtime. For example, the
ConstantFoldingTest that has 10 individual tests previously
took over 2.2s to run but without the Runtime initialization
it takes around 3-5ms. On target, running 32-bit gtests on
taimen with run-gtests.sh (single-threaded) goes from
~28m47s to ~26m13s, a reduction of ~9%.
Test: m test-art-host-gtest
Test: run-gtests.sh
Change-Id: I43e50ed58e52cc0ad04cdb4d39801bfbae840a3d
diff --git a/compiler/common_compiler_test.cc b/compiler/common_compiler_test.cc
index 18f00e2..6b4dbed 100644
--- a/compiler/common_compiler_test.cc
+++ b/compiler/common_compiler_test.cc
@@ -44,6 +44,17 @@
namespace art {
+std::unique_ptr<CompilerOptions> CommonCompilerTest::CreateCompilerOptions(
+ InstructionSet instruction_set, const std::string& variant) {
+ std::unique_ptr<CompilerOptions> compiler_options = std::make_unique<CompilerOptions>();
+ compiler_options->instruction_set_ = instruction_set;
+ std::string error_msg;
+ compiler_options->instruction_set_features_ =
+ InstructionSetFeatures::FromVariant(instruction_set, variant, &error_msg);
+ CHECK(compiler_options->instruction_set_features_ != nullptr) << error_msg;
+ return compiler_options;
+}
+
CommonCompilerTest::CommonCompilerTest() {}
CommonCompilerTest::~CommonCompilerTest() {}
diff --git a/compiler/common_compiler_test.h b/compiler/common_compiler_test.h
index 4f4e49a..703e5f8 100644
--- a/compiler/common_compiler_test.h
+++ b/compiler/common_compiler_test.h
@@ -44,6 +44,9 @@
class CommonCompilerTest : public CommonRuntimeTest {
public:
+ static std::unique_ptr<CompilerOptions> CreateCompilerOptions(InstructionSet instruction_set,
+ const std::string& variant);
+
CommonCompilerTest();
~CommonCompilerTest();
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index f4e18cf..11aade4 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -92,6 +92,12 @@
// the offset explicitly.
constexpr uint32_t kReferenceLoadMinFarOffset = 16 * KB;
+ALWAYS_INLINE static inline bool UseJitCompilation() {
+ Runtime* runtime = Runtime::Current();
+ // Note: There may be no Runtime for gtests; gtests use debug builds.
+ return (!kIsDebugBuild || runtime != nullptr) && runtime->UseJitCompilation();
+}
+
inline Condition ARM64Condition(IfCondition cond) {
switch (cond) {
case kCondEQ: return eq;
@@ -938,7 +944,7 @@
EmitJumpTables();
// Emit JIT baker read barrier slow paths.
- DCHECK(Runtime::Current()->UseJitCompilation() || jit_baker_read_barrier_slow_paths_.empty());
+ DCHECK(UseJitCompilation() || jit_baker_read_barrier_slow_paths_.empty());
for (auto& entry : jit_baker_read_barrier_slow_paths_) {
uint32_t encoded_data = entry.first;
vixl::aarch64::Label* slow_path_entry = &entry.second.label;
@@ -1782,7 +1788,7 @@
// Reduce code size for AOT by using shared trampolines for slow path runtime calls across the
// entire oat file. This adds an extra branch and we do not want to slow down the main path.
// For JIT, thunk sharing is per-method, so the gains would be smaller or even negative.
- if (slow_path == nullptr || Runtime::Current()->UseJitCompilation()) {
+ if (slow_path == nullptr || UseJitCompilation()) {
__ Ldr(lr, MemOperand(tr, entrypoint_offset.Int32Value()));
// Ensure the pc position is recorded immediately after the `blr` instruction.
ExactAssemblyScope eas(GetVIXLAssembler(), kInstructionSize, CodeBufferCheckScope::kExactSize);
@@ -4508,7 +4514,7 @@
void CodeGeneratorARM64::EmitEntrypointThunkCall(ThreadOffset64 entrypoint_offset) {
DCHECK(!__ AllowMacroInstructions()); // In ExactAssemblyScope.
- DCHECK(!Runtime::Current()->UseJitCompilation());
+ DCHECK(!UseJitCompilation());
call_entrypoint_patches_.emplace_back(/*dex_file*/ nullptr, entrypoint_offset.Uint32Value());
vixl::aarch64::Label* bl_label = &call_entrypoint_patches_.back().label;
__ bind(bl_label);
@@ -4517,7 +4523,7 @@
void CodeGeneratorARM64::EmitBakerReadBarrierCbnz(uint32_t custom_data) {
DCHECK(!__ AllowMacroInstructions()); // In ExactAssemblyScope.
- if (Runtime::Current()->UseJitCompilation()) {
+ if (UseJitCompilation()) {
auto it = jit_baker_read_barrier_slow_paths_.FindOrAdd(custom_data);
vixl::aarch64::Label* slow_path_entry = &it->second.label;
__ cbnz(mr, slow_path_entry);
@@ -6656,10 +6662,8 @@
}
// For JIT, the slow path is considered part of the compiled method,
- // so JIT should pass null as `debug_name`. Tests may not have a runtime.
- DCHECK(Runtime::Current() == nullptr ||
- !Runtime::Current()->UseJitCompilation() ||
- debug_name == nullptr);
+ // so JIT should pass null as `debug_name`.
+ DCHECK(!UseJitCompilation() || debug_name == nullptr);
if (debug_name != nullptr && GetCompilerOptions().GenerateAnyDebugInfo()) {
std::ostringstream oss;
oss << "BakerReadBarrierThunk";
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 3a2cf40..44d2f4d 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -93,6 +93,12 @@
// Using a base helps identify when we hit Marking Register check breakpoints.
constexpr int kMarkingRegisterCheckBreakCodeBaseCode = 0x10;
+ALWAYS_INLINE static inline bool UseJitCompilation() {
+ Runtime* runtime = Runtime::Current();
+ // Note: There may be no Runtime for gtests which use debug builds.
+ return (!kIsDebugBuild || runtime != nullptr) && runtime->UseJitCompilation();
+}
+
#ifdef __
#error "ARM Codegen VIXL macro-assembler macro already defined."
#endif
@@ -1931,7 +1937,7 @@
FixJumpTables();
// Emit JIT baker read barrier slow paths.
- DCHECK(Runtime::Current()->UseJitCompilation() || jit_baker_read_barrier_slow_paths_.empty());
+ DCHECK(UseJitCompilation() || jit_baker_read_barrier_slow_paths_.empty());
for (auto& entry : jit_baker_read_barrier_slow_paths_) {
uint32_t encoded_data = entry.first;
vixl::aarch32::Label* slow_path_entry = &entry.second.label;
@@ -2511,7 +2517,7 @@
// Reduce code size for AOT by using shared trampolines for slow path runtime calls across the
// entire oat file. This adds an extra branch and we do not want to slow down the main path.
// For JIT, thunk sharing is per-method, so the gains would be smaller or even negative.
- if (slow_path == nullptr || Runtime::Current()->UseJitCompilation()) {
+ if (slow_path == nullptr || UseJitCompilation()) {
__ Ldr(lr, MemOperand(tr, entrypoint_offset.Int32Value()));
// Ensure the pc position is recorded immediately after the `blx` instruction.
// blx in T32 has only 16bit encoding that's why a stricter check for the scope is used.
@@ -9070,7 +9076,7 @@
void CodeGeneratorARMVIXL::EmitEntrypointThunkCall(ThreadOffset32 entrypoint_offset) {
DCHECK(!__ AllowMacroInstructions()); // In ExactAssemblyScope.
- DCHECK(!Runtime::Current()->UseJitCompilation());
+ DCHECK(!UseJitCompilation());
call_entrypoint_patches_.emplace_back(/*dex_file*/ nullptr, entrypoint_offset.Uint32Value());
vixl::aarch32::Label* bl_label = &call_entrypoint_patches_.back().label;
__ bind(bl_label);
@@ -9081,7 +9087,7 @@
void CodeGeneratorARMVIXL::EmitBakerReadBarrierBne(uint32_t custom_data) {
DCHECK(!__ AllowMacroInstructions()); // In ExactAssemblyScope.
- if (Runtime::Current()->UseJitCompilation()) {
+ if (UseJitCompilation()) {
auto it = jit_baker_read_barrier_slow_paths_.FindOrAdd(custom_data);
vixl::aarch32::Label* slow_path_entry = &it->second.label;
__ b(ne, EncodingSize(Wide), slow_path_entry);
@@ -9710,10 +9716,8 @@
}
// For JIT, the slow path is considered part of the compiled method,
- // so JIT should pass null as `debug_name`. Tests may not have a runtime.
- DCHECK(Runtime::Current() == nullptr ||
- !Runtime::Current()->UseJitCompilation() ||
- debug_name == nullptr);
+ // so JIT should pass null as `debug_name`.
+ DCHECK(!UseJitCompilation() || debug_name == nullptr);
if (debug_name != nullptr && GetCompilerOptions().GenerateAnyDebugInfo()) {
std::ostringstream oss;
oss << "BakerReadBarrierThunk";
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index d9b4f79..f4f44a0 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -81,8 +81,9 @@
HGraph* graph = CreateCFG(data);
// Remove suspend checks, they cannot be executed in this context.
RemoveSuspendChecks(graph);
- OverrideInstructionSetFeatures(target_config.GetInstructionSet(), "default");
- RunCode(target_config, *compiler_options_, graph, [](HGraph*) {}, has_result, expected);
+ std::unique_ptr<CompilerOptions> compiler_options =
+ CommonCompilerTest::CreateCompilerOptions(target_config.GetInstructionSet(), "default");
+ RunCode(target_config, *compiler_options, graph, [](HGraph*) {}, has_result, expected);
}
}
@@ -93,8 +94,9 @@
HGraph* graph = CreateCFG(data, DataType::Type::kInt64);
// Remove suspend checks, they cannot be executed in this context.
RemoveSuspendChecks(graph);
- OverrideInstructionSetFeatures(target_config.GetInstructionSet(), "default");
- RunCode(target_config, *compiler_options_, graph, [](HGraph*) {}, has_result, expected);
+ std::unique_ptr<CompilerOptions> compiler_options =
+ CommonCompilerTest::CreateCompilerOptions(target_config.GetInstructionSet(), "default");
+ RunCode(target_config, *compiler_options, graph, [](HGraph*) {}, has_result, expected);
}
}
@@ -445,7 +447,9 @@
ASSERT_FALSE(equal->IsEmittedAtUseSite());
graph->BuildDominatorTree();
- PrepareForRegisterAllocation(graph, *compiler_options_).Run();
+ std::unique_ptr<CompilerOptions> compiler_options =
+ CommonCompilerTest::CreateCompilerOptions(target_config.GetInstructionSet(), "default");
+ PrepareForRegisterAllocation(graph, *compiler_options).Run();
ASSERT_TRUE(equal->IsEmittedAtUseSite());
auto hook_before_codegen = [](HGraph* graph_in) {
@@ -454,8 +458,7 @@
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
- OverrideInstructionSetFeatures(target_config.GetInstructionSet(), "default");
- RunCode(target_config, *compiler_options_, graph, hook_before_codegen, true, 0);
+ RunCode(target_config, *compiler_options, graph, hook_before_codegen, true, 0);
}
}
@@ -501,8 +504,9 @@
new (graph_in->GetAllocator()) HParallelMove(graph_in->GetAllocator());
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
- OverrideInstructionSetFeatures(target_config.GetInstructionSet(), "default");
- RunCode(target_config, *compiler_options_, graph, hook_before_codegen, true, lhs[i] < rhs[i]);
+ std::unique_ptr<CompilerOptions> compiler_options =
+ CommonCompilerTest::CreateCompilerOptions(target_config.GetInstructionSet(), "default");
+ RunCode(target_config, *compiler_options, graph, hook_before_codegen, true, lhs[i] < rhs[i]);
}
}
}
@@ -569,8 +573,9 @@
new (graph_in->GetAllocator()) HParallelMove(graph_in->GetAllocator());
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
- OverrideInstructionSetFeatures(target_config.GetInstructionSet(), "default");
- RunCode(target_config, *compiler_options_, graph, hook_before_codegen, true, lhs[i] < rhs[i]);
+ std::unique_ptr<CompilerOptions> compiler_options =
+ CommonCompilerTest::CreateCompilerOptions(target_config.GetInstructionSet(), "default");
+ RunCode(target_config, *compiler_options, graph, hook_before_codegen, true, lhs[i] < rhs[i]);
}
}
}
@@ -679,8 +684,9 @@
block->AddInstruction(new (GetAllocator()) HReturn(comparison));
graph->BuildDominatorTree();
- OverrideInstructionSetFeatures(target_config.GetInstructionSet(), "default");
- RunCode(target_config, *compiler_options_, graph, [](HGraph*) {}, true, expected_result);
+ std::unique_ptr<CompilerOptions> compiler_options =
+ CommonCompilerTest::CreateCompilerOptions(target_config.GetInstructionSet(), "default");
+ RunCode(target_config, *compiler_options, graph, [](HGraph*) {}, true, expected_result);
}
TEST_F(CodegenTest, ComparisonsInt) {
@@ -711,9 +717,10 @@
#ifdef ART_ENABLE_CODEGEN_arm
TEST_F(CodegenTest, ARMVIXLParallelMoveResolver) {
- OverrideInstructionSetFeatures(InstructionSet::kThumb2, "default");
+ std::unique_ptr<CompilerOptions> compiler_options =
+ CommonCompilerTest::CreateCompilerOptions(InstructionSet::kThumb2, "default");
HGraph* graph = CreateGraph();
- arm::CodeGeneratorARMVIXL codegen(graph, *compiler_options_);
+ arm::CodeGeneratorARMVIXL codegen(graph, *compiler_options);
codegen.Initialize();
@@ -734,9 +741,10 @@
#ifdef ART_ENABLE_CODEGEN_arm64
// Regression test for b/34760542.
TEST_F(CodegenTest, ARM64ParallelMoveResolverB34760542) {
- OverrideInstructionSetFeatures(InstructionSet::kArm64, "default");
+ std::unique_ptr<CompilerOptions> compiler_options =
+ CommonCompilerTest::CreateCompilerOptions(InstructionSet::kArm64, "default");
HGraph* graph = CreateGraph();
- arm64::CodeGeneratorARM64 codegen(graph, *compiler_options_);
+ arm64::CodeGeneratorARM64 codegen(graph, *compiler_options);
codegen.Initialize();
@@ -783,9 +791,10 @@
// Check that ParallelMoveResolver works fine for ARM64 for both cases when SIMD is on and off.
TEST_F(CodegenTest, ARM64ParallelMoveResolverSIMD) {
- OverrideInstructionSetFeatures(InstructionSet::kArm64, "default");
+ std::unique_ptr<CompilerOptions> compiler_options =
+ CommonCompilerTest::CreateCompilerOptions(InstructionSet::kArm64, "default");
HGraph* graph = CreateGraph();
- arm64::CodeGeneratorARM64 codegen(graph, *compiler_options_);
+ arm64::CodeGeneratorARM64 codegen(graph, *compiler_options);
codegen.Initialize();
@@ -818,9 +827,10 @@
// Check that ART ISA Features are propagated to VIXL for arm64 (using cortex-a75 as example).
TEST_F(CodegenTest, ARM64IsaVIXLFeaturesA75) {
- OverrideInstructionSetFeatures(InstructionSet::kArm64, "cortex-a75");
+ std::unique_ptr<CompilerOptions> compiler_options =
+ CommonCompilerTest::CreateCompilerOptions(InstructionSet::kArm64, "cortex-a75");
HGraph* graph = CreateGraph();
- arm64::CodeGeneratorARM64 codegen(graph, *compiler_options_);
+ arm64::CodeGeneratorARM64 codegen(graph, *compiler_options);
vixl::CPUFeatures* features = codegen.GetVIXLAssembler()->GetCPUFeatures();
EXPECT_TRUE(features->Has(vixl::CPUFeatures::kCRC32));
@@ -832,9 +842,10 @@
// Check that ART ISA Features are propagated to VIXL for arm64 (using cortex-a53 as example).
TEST_F(CodegenTest, ARM64IsaVIXLFeaturesA53) {
- OverrideInstructionSetFeatures(InstructionSet::kArm64, "cortex-a53");
+ std::unique_ptr<CompilerOptions> compiler_options =
+ CommonCompilerTest::CreateCompilerOptions(InstructionSet::kArm64, "cortex-a53");
HGraph* graph = CreateGraph();
- arm64::CodeGeneratorARM64 codegen(graph, *compiler_options_);
+ arm64::CodeGeneratorARM64 codegen(graph, *compiler_options);
vixl::CPUFeatures* features = codegen.GetVIXLAssembler()->GetCPUFeatures();
EXPECT_TRUE(features->Has(vixl::CPUFeatures::kCRC32));
@@ -850,9 +861,10 @@
// allocated on stack per callee-saved FP register to be preserved in the frame entry as
// ABI states.
TEST_F(CodegenTest, ARM64FrameSizeSIMD) {
- OverrideInstructionSetFeatures(InstructionSet::kArm64, "default");
+ std::unique_ptr<CompilerOptions> compiler_options =
+ CommonCompilerTest::CreateCompilerOptions(InstructionSet::kArm64, "default");
HGraph* graph = CreateGraph();
- arm64::CodeGeneratorARM64 codegen(graph, *compiler_options_);
+ arm64::CodeGeneratorARM64 codegen(graph, *compiler_options);
codegen.Initialize();
graph->SetHasSIMD(true);
@@ -869,9 +881,10 @@
}
TEST_F(CodegenTest, ARM64FrameSizeNoSIMD) {
- OverrideInstructionSetFeatures(InstructionSet::kArm64, "default");
+ std::unique_ptr<CompilerOptions> compiler_options =
+ CommonCompilerTest::CreateCompilerOptions(InstructionSet::kArm64, "default");
HGraph* graph = CreateGraph();
- arm64::CodeGeneratorARM64 codegen(graph, *compiler_options_);
+ arm64::CodeGeneratorARM64 codegen(graph, *compiler_options);
codegen.Initialize();
graph->SetHasSIMD(false);
diff --git a/compiler/optimizing/instruction_builder.cc b/compiler/optimizing/instruction_builder.cc
index 768bc24..69f6778 100644
--- a/compiler/optimizing/instruction_builder.cc
+++ b/compiler/optimizing/instruction_builder.cc
@@ -380,13 +380,14 @@
AppendInstruction(new (allocator_) HNativeDebugInfo(dex_pc));
}
- DCHECK(!Thread::Current()->IsExceptionPending())
+ // Note: There may be no Thread for gtests.
+ DCHECK(Thread::Current() == nullptr || !Thread::Current()->IsExceptionPending())
<< dex_file_->PrettyMethod(dex_compilation_unit_->GetDexMethodIndex())
<< " " << pair.Inst().Name() << "@" << dex_pc;
if (!ProcessDexInstruction(pair.Inst(), dex_pc, quicken_index)) {
return false;
}
- DCHECK(!Thread::Current()->IsExceptionPending())
+ DCHECK(Thread::Current() == nullptr || !Thread::Current()->IsExceptionPending())
<< dex_file_->PrettyMethod(dex_compilation_unit_->GetDexMethodIndex())
<< " " << pair.Inst().Name() << "@" << dex_pc;
diff --git a/compiler/optimizing/linearize_test.cc b/compiler/optimizing/linearize_test.cc
index 50bfe84..d56ae11 100644
--- a/compiler/optimizing/linearize_test.cc
+++ b/compiler/optimizing/linearize_test.cc
@@ -41,7 +41,9 @@
void LinearizeTest::TestCode(const std::vector<uint16_t>& data,
const uint32_t (&expected_order)[number_of_blocks]) {
HGraph* graph = CreateCFG(data);
- std::unique_ptr<CodeGenerator> codegen = CodeGenerator::Create(graph, *compiler_options_);
+ std::unique_ptr<CompilerOptions> compiler_options =
+ CommonCompilerTest::CreateCompilerOptions(kRuntimeISA, "default");
+ std::unique_ptr<CodeGenerator> codegen = CodeGenerator::Create(graph, *compiler_options);
SsaLivenessAnalysis liveness(graph, codegen.get(), GetScopedAllocator());
liveness.Analyze();
diff --git a/compiler/optimizing/live_ranges_test.cc b/compiler/optimizing/live_ranges_test.cc
index 60f513c..bb8a4dc 100644
--- a/compiler/optimizing/live_ranges_test.cc
+++ b/compiler/optimizing/live_ranges_test.cc
@@ -28,12 +28,15 @@
namespace art {
class LiveRangesTest : public OptimizingUnitTest {
- public:
+ protected:
HGraph* BuildGraph(const std::vector<uint16_t>& data);
+
+ std::unique_ptr<CompilerOptions> compiler_options_;
};
HGraph* LiveRangesTest::BuildGraph(const std::vector<uint16_t>& data) {
HGraph* graph = CreateCFG(data);
+ compiler_options_ = CommonCompilerTest::CreateCompilerOptions(kRuntimeISA, "default");
// Suspend checks implementation may change in the future, and this test relies
// on how instructions are ordered.
RemoveSuspendChecks(graph);
diff --git a/compiler/optimizing/liveness_test.cc b/compiler/optimizing/liveness_test.cc
index f11f7a9..ba3787e 100644
--- a/compiler/optimizing/liveness_test.cc
+++ b/compiler/optimizing/liveness_test.cc
@@ -47,8 +47,10 @@
void LivenessTest::TestCode(const std::vector<uint16_t>& data, const char* expected) {
HGraph* graph = CreateCFG(data);
// `Inline` conditions into ifs.
- PrepareForRegisterAllocation(graph, *compiler_options_).Run();
- std::unique_ptr<CodeGenerator> codegen = CodeGenerator::Create(graph, *compiler_options_);
+ std::unique_ptr<CompilerOptions> compiler_options =
+ CommonCompilerTest::CreateCompilerOptions(kRuntimeISA, "default");
+ PrepareForRegisterAllocation(graph, *compiler_options).Run();
+ std::unique_ptr<CodeGenerator> codegen = CodeGenerator::Create(graph, *compiler_options);
SsaLivenessAnalysis liveness(graph, codegen.get(), GetScopedAllocator());
liveness.Analyze();
diff --git a/compiler/optimizing/loop_optimization_test.cc b/compiler/optimizing/loop_optimization_test.cc
index 8b4d58e..bda2528 100644
--- a/compiler/optimizing/loop_optimization_test.cc
+++ b/compiler/optimizing/loop_optimization_test.cc
@@ -15,6 +15,7 @@
*/
#include "code_generator.h"
+#include "driver/compiler_options.h"
#include "loop_optimization.h"
#include "optimizing_unit_test.h"
@@ -28,12 +29,12 @@
class LoopOptimizationTest : public OptimizingUnitTest {
protected:
void SetUp() override {
- OverrideInstructionSetFeatures(instruction_set_, "default");
OptimizingUnitTest::SetUp();
graph_ = CreateGraph();
BuildGraph();
iva_ = new (GetAllocator()) HInductionVarAnalysis(graph_);
+ compiler_options_ = CommonCompilerTest::CreateCompilerOptions(kRuntimeISA, "default");
DCHECK(compiler_options_ != nullptr);
codegen_ = CodeGenerator::Create(graph_, *compiler_options_);
DCHECK(codegen_.get() != nullptr);
@@ -43,6 +44,7 @@
void TearDown() override {
codegen_.reset();
+ compiler_options_.reset();
graph_ = nullptr;
ResetPoolAndAllocator();
OptimizingUnitTest::TearDown();
@@ -117,6 +119,7 @@
// General building fields.
HGraph* graph_;
+ std::unique_ptr<CompilerOptions> compiler_options_;
std::unique_ptr<CodeGenerator> codegen_;
HInductionVarAnalysis* iva_;
HLoopOptimization* loop_opt_;
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 61e1680..c884f5b 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -266,7 +266,7 @@
std::vector<HInstruction*> parameters_;
};
-class OptimizingUnitTest : public CommonCompilerTest, public OptimizingUnitTestHelper {};
+class OptimizingUnitTest : public CommonArtTest, public OptimizingUnitTestHelper {};
// Naive string diff data type.
typedef std::list<std::pair<std::string, std::string>> diff_t;
diff --git a/compiler/optimizing/reference_type_propagation.cc b/compiler/optimizing/reference_type_propagation.cc
index ff1e01b..8576943 100644
--- a/compiler/optimizing/reference_type_propagation.cc
+++ b/compiler/optimizing/reference_type_propagation.cc
@@ -126,8 +126,8 @@
}
void ReferenceTypePropagation::ValidateTypes() {
- // TODO: move this to the graph checker.
- if (kIsDebugBuild) {
+ // TODO: move this to the graph checker. Note: There may be no Thread for gtests.
+ if (kIsDebugBuild && Thread::Current() != nullptr) {
ScopedObjectAccess soa(Thread::Current());
for (HBasicBlock* block : graph_->GetReversePostOrder()) {
for (HInstructionIterator iti(block->GetInstructions()); !iti.Done(); iti.Advance()) {
diff --git a/compiler/optimizing/reference_type_propagation_test.cc b/compiler/optimizing/reference_type_propagation_test.cc
index 01f0dd3..a0d6609 100644
--- a/compiler/optimizing/reference_type_propagation_test.cc
+++ b/compiler/optimizing/reference_type_propagation_test.cc
@@ -28,7 +28,7 @@
* Fixture class for unit testing the ReferenceTypePropagation phase. Used to verify the
* functionality of methods and situations that are hard to set up with checker tests.
*/
-class ReferenceTypePropagationTest : public OptimizingUnitTest {
+class ReferenceTypePropagationTest : public CommonCompilerTest, public OptimizingUnitTestHelper {
public:
ReferenceTypePropagationTest() : graph_(nullptr), propagation_(nullptr) { }
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 79eb082..d1db40b 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -41,9 +41,9 @@
class RegisterAllocatorTest : public OptimizingUnitTest {
protected:
void SetUp() override {
- // This test is using the x86 ISA.
- OverrideInstructionSetFeatures(InstructionSet::kX86, "default");
OptimizingUnitTest::SetUp();
+ // This test is using the x86 ISA.
+ compiler_options_ = CommonCompilerTest::CreateCompilerOptions(InstructionSet::kX86, "default");
}
// These functions need to access private variables of LocationSummary, so we declare it
@@ -74,6 +74,8 @@
/* processing_core_registers= */ true,
/* log_fatal_on_failure= */ false);
}
+
+ std::unique_ptr<CompilerOptions> compiler_options_;
};
// This macro should include all register allocation strategies that should be tested.
diff --git a/compiler/optimizing/scheduler_test.cc b/compiler/optimizing/scheduler_test.cc
index 7835b1d..b5ec93e 100644
--- a/compiler/optimizing/scheduler_test.cc
+++ b/compiler/optimizing/scheduler_test.cc
@@ -188,9 +188,10 @@
HInstructionScheduling scheduling(graph, target_config.GetInstructionSet());
scheduling.Run(/*only_optimize_loop_blocks*/ false, /*schedule_randomly*/ true);
- OverrideInstructionSetFeatures(target_config.GetInstructionSet(), "default");
+ std::unique_ptr<CompilerOptions> compiler_options =
+ CommonCompilerTest::CreateCompilerOptions(target_config.GetInstructionSet(), "default");
RunCode(target_config,
- *compiler_options_,
+ *compiler_options,
graph,
[](HGraph* graph_arg) { RemoveSuspendChecks(graph_arg); },
has_result, expected);
diff --git a/compiler/optimizing/ssa_liveness_analysis_test.cc b/compiler/optimizing/ssa_liveness_analysis_test.cc
index 352c44f..a477893 100644
--- a/compiler/optimizing/ssa_liveness_analysis_test.cc
+++ b/compiler/optimizing/ssa_liveness_analysis_test.cc
@@ -32,8 +32,9 @@
void SetUp() override {
OptimizingUnitTest::SetUp();
graph_ = CreateGraph();
+ compiler_options_ = CommonCompilerTest::CreateCompilerOptions(kRuntimeISA, "default");
codegen_ = CodeGenerator::Create(graph_, *compiler_options_);
- CHECK(codegen_ != nullptr) << instruction_set_ << " is not a supported target architecture.";
+ CHECK(codegen_ != nullptr);
// Create entry block.
entry_ = new (GetAllocator()) HBasicBlock(graph_);
graph_->AddBlock(entry_);
@@ -50,6 +51,7 @@
}
HGraph* graph_;
+ std::unique_ptr<CompilerOptions> compiler_options_;
std::unique_ptr<CodeGenerator> codegen_;
HBasicBlock* entry_;
};