ART: Remove Baseline compiler
We don't need Baseline any more and it hasn't been maintained for
a while anyway. Let's remove it.
Change-Id: I442ed26855527be2df3c79935403a25b1ee55df6
diff --git a/compiler/optimizing/builder.h b/compiler/optimizing/builder.h
index 26bf1cb..1d604e7 100644
--- a/compiler/optimizing/builder.h
+++ b/compiler/optimizing/builder.h
@@ -56,7 +56,6 @@
return_type_(Primitive::GetType(dex_compilation_unit_->GetShorty()[0])),
code_start_(nullptr),
latest_result_(nullptr),
- can_use_baseline_for_string_init_(true),
compilation_stats_(compiler_stats),
interpreter_metadata_(interpreter_metadata),
dex_cache_(dex_cache) {}
@@ -77,7 +76,6 @@
return_type_(return_type),
code_start_(nullptr),
latest_result_(nullptr),
- can_use_baseline_for_string_init_(true),
compilation_stats_(nullptr),
interpreter_metadata_(nullptr),
null_dex_cache_(),
@@ -85,10 +83,6 @@
bool BuildGraph(const DexFile::CodeItem& code);
- bool CanUseBaselineForStringInit() const {
- return can_use_baseline_for_string_init_;
- }
-
static constexpr const char* kBuilderPassName = "builder";
// The number of entries in a packed switch before we use a jump table or specified
@@ -363,11 +357,6 @@
// used by move-result instructions.
HInstruction* latest_result_;
- // We need to know whether we have built a graph that has calls to StringFactory
- // and hasn't gone through the verifier. If the following flag is `false`, then
- // we cannot compile with baseline.
- bool can_use_baseline_for_string_init_;
-
OptimizingCompilerStats* compilation_stats_;
const uint8_t* interpreter_metadata_;
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index ea0b9ec..a3bbfdb 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -142,23 +142,6 @@
return pointer_size * index;
}
-void CodeGenerator::CompileBaseline(CodeAllocator* allocator, bool is_leaf) {
- Initialize();
- if (!is_leaf) {
- MarkNotLeaf();
- }
- const bool is_64_bit = Is64BitInstructionSet(GetInstructionSet());
- InitializeCodeGeneration(GetGraph()->GetNumberOfLocalVRegs()
- + GetGraph()->GetTemporariesVRegSlots()
- + 1 /* filler */,
- 0, /* the baseline compiler does not have live registers at slow path */
- 0, /* the baseline compiler does not have live registers at slow path */
- GetGraph()->GetMaximumNumberOfOutVRegs()
- + (is_64_bit ? 2 : 1) /* current method */,
- GetGraph()->GetBlocks());
- CompileInternal(allocator, /* is_baseline */ true);
-}
-
bool CodeGenerator::GoesToNextBlock(HBasicBlock* current, HBasicBlock* next) const {
DCHECK_EQ((*block_order_)[current_block_index_], current);
return GetNextBlockToEmit() == FirstNonEmptyBlock(next);
@@ -220,8 +203,12 @@
current_slow_path_ = nullptr;
}
-void CodeGenerator::CompileInternal(CodeAllocator* allocator, bool is_baseline) {
- is_baseline_ = is_baseline;
+void CodeGenerator::Compile(CodeAllocator* allocator) {
+ // The register allocator already called `InitializeCodeGeneration`,
+ // where the frame size has been computed.
+ DCHECK(block_order_ != nullptr);
+ Initialize();
+
HGraphVisitor* instruction_visitor = GetInstructionVisitor();
DCHECK_EQ(current_block_index_, 0u);
@@ -242,9 +229,6 @@
for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
HInstruction* current = it.Current();
DisassemblyScope disassembly_scope(current, *this);
- if (is_baseline) {
- InitLocationsBaseline(current);
- }
DCHECK(CheckTypeConsistency(current));
current->Accept(instruction_visitor);
}
@@ -254,7 +238,7 @@
// Emit catch stack maps at the end of the stack map stream as expected by the
// runtime exception handler.
- if (!is_baseline && graph_->HasTryCatch()) {
+ if (graph_->HasTryCatch()) {
RecordCatchBlockInfo();
}
@@ -262,14 +246,6 @@
Finalize(allocator);
}
-void CodeGenerator::CompileOptimized(CodeAllocator* allocator) {
- // The register allocator already called `InitializeCodeGeneration`,
- // where the frame size has been computed.
- DCHECK(block_order_ != nullptr);
- Initialize();
- CompileInternal(allocator, /* is_baseline */ false);
-}
-
void CodeGenerator::Finalize(CodeAllocator* allocator) {
size_t code_size = GetAssembler()->CodeSize();
uint8_t* buffer = allocator->Allocate(code_size);
@@ -282,29 +258,6 @@
// No linker patches by default.
}
-size_t CodeGenerator::FindFreeEntry(bool* array, size_t length) {
- for (size_t i = 0; i < length; ++i) {
- if (!array[i]) {
- array[i] = true;
- return i;
- }
- }
- LOG(FATAL) << "Could not find a register in baseline register allocator";
- UNREACHABLE();
-}
-
-size_t CodeGenerator::FindTwoFreeConsecutiveAlignedEntries(bool* array, size_t length) {
- for (size_t i = 0; i < length - 1; i += 2) {
- if (!array[i] && !array[i + 1]) {
- array[i] = true;
- array[i + 1] = true;
- return i;
- }
- }
- LOG(FATAL) << "Could not find a register in baseline register allocator";
- UNREACHABLE();
-}
-
void CodeGenerator::InitializeCodeGeneration(size_t number_of_spill_slots,
size_t maximum_number_of_live_core_registers,
size_t maximum_number_of_live_fpu_registers,
@@ -592,123 +545,6 @@
}
}
-void CodeGenerator::AllocateRegistersLocally(HInstruction* instruction) const {
- LocationSummary* locations = instruction->GetLocations();
- if (locations == nullptr) return;
-
- for (size_t i = 0, e = GetNumberOfCoreRegisters(); i < e; ++i) {
- blocked_core_registers_[i] = false;
- }
-
- for (size_t i = 0, e = GetNumberOfFloatingPointRegisters(); i < e; ++i) {
- blocked_fpu_registers_[i] = false;
- }
-
- for (size_t i = 0, e = number_of_register_pairs_; i < e; ++i) {
- blocked_register_pairs_[i] = false;
- }
-
- // Mark all fixed input, temp and output registers as used.
- for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
- BlockIfInRegister(locations->InAt(i));
- }
-
- for (size_t i = 0, e = locations->GetTempCount(); i < e; ++i) {
- Location loc = locations->GetTemp(i);
- BlockIfInRegister(loc);
- }
- Location result_location = locations->Out();
- if (locations->OutputCanOverlapWithInputs()) {
- BlockIfInRegister(result_location, /* is_out */ true);
- }
-
- SetupBlockedRegisters(/* is_baseline */ true);
-
- // Allocate all unallocated input locations.
- for (size_t i = 0, e = locations->GetInputCount(); i < e; ++i) {
- Location loc = locations->InAt(i);
- HInstruction* input = instruction->InputAt(i);
- if (loc.IsUnallocated()) {
- if ((loc.GetPolicy() == Location::kRequiresRegister)
- || (loc.GetPolicy() == Location::kRequiresFpuRegister)) {
- loc = AllocateFreeRegister(input->GetType());
- } else {
- DCHECK_EQ(loc.GetPolicy(), Location::kAny);
- HLoadLocal* load = input->AsLoadLocal();
- if (load != nullptr) {
- loc = GetStackLocation(load);
- } else {
- loc = AllocateFreeRegister(input->GetType());
- }
- }
- locations->SetInAt(i, loc);
- }
- }
-
- // Allocate all unallocated temp locations.
- for (size_t i = 0, e = locations->GetTempCount(); i < e; ++i) {
- Location loc = locations->GetTemp(i);
- if (loc.IsUnallocated()) {
- switch (loc.GetPolicy()) {
- case Location::kRequiresRegister:
- // Allocate a core register (large enough to fit a 32-bit integer).
- loc = AllocateFreeRegister(Primitive::kPrimInt);
- break;
-
- case Location::kRequiresFpuRegister:
- // Allocate a core register (large enough to fit a 64-bit double).
- loc = AllocateFreeRegister(Primitive::kPrimDouble);
- break;
-
- default:
- LOG(FATAL) << "Unexpected policy for temporary location "
- << loc.GetPolicy();
- }
- locations->SetTempAt(i, loc);
- }
- }
- if (result_location.IsUnallocated()) {
- switch (result_location.GetPolicy()) {
- case Location::kAny:
- case Location::kRequiresRegister:
- case Location::kRequiresFpuRegister:
- result_location = AllocateFreeRegister(instruction->GetType());
- break;
- case Location::kSameAsFirstInput:
- result_location = locations->InAt(0);
- break;
- }
- locations->UpdateOut(result_location);
- }
-}
-
-void CodeGenerator::InitLocationsBaseline(HInstruction* instruction) {
- AllocateLocations(instruction);
- if (instruction->GetLocations() == nullptr) {
- if (instruction->IsTemporary()) {
- HInstruction* previous = instruction->GetPrevious();
- Location temp_location = GetTemporaryLocation(instruction->AsTemporary());
- Move(previous, temp_location, instruction);
- }
- return;
- }
- AllocateRegistersLocally(instruction);
- for (size_t i = 0, e = instruction->InputCount(); i < e; ++i) {
- Location location = instruction->GetLocations()->InAt(i);
- HInstruction* input = instruction->InputAt(i);
- if (location.IsValid()) {
- // Move the input to the desired location.
- if (input->GetNext()->IsTemporary()) {
- // If the input was stored in a temporary, use that temporary to
- // perform the move.
- Move(input->GetNext(), location, instruction);
- } else {
- Move(input, location, instruction);
- }
- }
- }
-}
-
void CodeGenerator::AllocateLocations(HInstruction* instruction) {
instruction->Accept(GetLocationBuilder());
DCHECK(CheckTypeConsistency(instruction));
@@ -789,132 +625,6 @@
}
}
-void CodeGenerator::BuildNativeGCMap(
- ArenaVector<uint8_t>* data, const CompilerDriver& compiler_driver) const {
- const std::vector<uint8_t>& gc_map_raw =
- compiler_driver.GetVerifiedMethod(&GetGraph()->GetDexFile(), GetGraph()->GetMethodIdx())
- ->GetDexGcMap();
- verifier::DexPcToReferenceMap dex_gc_map(&(gc_map_raw)[0]);
-
- uint32_t max_native_offset = stack_map_stream_.ComputeMaxNativePcOffset();
-
- size_t num_stack_maps = stack_map_stream_.GetNumberOfStackMaps();
- GcMapBuilder builder(data, num_stack_maps, max_native_offset, dex_gc_map.RegWidth());
- for (size_t i = 0; i != num_stack_maps; ++i) {
- const StackMapStream::StackMapEntry& stack_map_entry = stack_map_stream_.GetStackMap(i);
- uint32_t native_offset = stack_map_entry.native_pc_offset;
- uint32_t dex_pc = stack_map_entry.dex_pc;
- const uint8_t* references = dex_gc_map.FindBitMap(dex_pc, false);
- CHECK(references != nullptr) << "Missing ref for dex pc 0x" << std::hex << dex_pc;
- builder.AddEntry(native_offset, references);
- }
-}
-
-void CodeGenerator::BuildMappingTable(ArenaVector<uint8_t>* data) const {
- uint32_t pc2dex_data_size = 0u;
- uint32_t pc2dex_entries = stack_map_stream_.GetNumberOfStackMaps();
- uint32_t pc2dex_offset = 0u;
- int32_t pc2dex_dalvik_offset = 0;
- uint32_t dex2pc_data_size = 0u;
- uint32_t dex2pc_entries = 0u;
- uint32_t dex2pc_offset = 0u;
- int32_t dex2pc_dalvik_offset = 0;
-
- for (size_t i = 0; i < pc2dex_entries; i++) {
- const StackMapStream::StackMapEntry& stack_map_entry = stack_map_stream_.GetStackMap(i);
- pc2dex_data_size += UnsignedLeb128Size(stack_map_entry.native_pc_offset - pc2dex_offset);
- pc2dex_data_size += SignedLeb128Size(stack_map_entry.dex_pc - pc2dex_dalvik_offset);
- pc2dex_offset = stack_map_entry.native_pc_offset;
- pc2dex_dalvik_offset = stack_map_entry.dex_pc;
- }
-
- // Walk over the blocks and find which ones correspond to catch block entries.
- for (HBasicBlock* block : graph_->GetBlocks()) {
- if (block->IsCatchBlock()) {
- intptr_t native_pc = GetAddressOf(block);
- ++dex2pc_entries;
- dex2pc_data_size += UnsignedLeb128Size(native_pc - dex2pc_offset);
- dex2pc_data_size += SignedLeb128Size(block->GetDexPc() - dex2pc_dalvik_offset);
- dex2pc_offset = native_pc;
- dex2pc_dalvik_offset = block->GetDexPc();
- }
- }
-
- uint32_t total_entries = pc2dex_entries + dex2pc_entries;
- uint32_t hdr_data_size = UnsignedLeb128Size(total_entries) + UnsignedLeb128Size(pc2dex_entries);
- uint32_t data_size = hdr_data_size + pc2dex_data_size + dex2pc_data_size;
- data->resize(data_size);
-
- uint8_t* data_ptr = &(*data)[0];
- uint8_t* write_pos = data_ptr;
-
- write_pos = EncodeUnsignedLeb128(write_pos, total_entries);
- write_pos = EncodeUnsignedLeb128(write_pos, pc2dex_entries);
- DCHECK_EQ(static_cast<size_t>(write_pos - data_ptr), hdr_data_size);
- uint8_t* write_pos2 = write_pos + pc2dex_data_size;
-
- pc2dex_offset = 0u;
- pc2dex_dalvik_offset = 0u;
- dex2pc_offset = 0u;
- dex2pc_dalvik_offset = 0u;
-
- for (size_t i = 0; i < pc2dex_entries; i++) {
- const StackMapStream::StackMapEntry& stack_map_entry = stack_map_stream_.GetStackMap(i);
- DCHECK(pc2dex_offset <= stack_map_entry.native_pc_offset);
- write_pos = EncodeUnsignedLeb128(write_pos, stack_map_entry.native_pc_offset - pc2dex_offset);
- write_pos = EncodeSignedLeb128(write_pos, stack_map_entry.dex_pc - pc2dex_dalvik_offset);
- pc2dex_offset = stack_map_entry.native_pc_offset;
- pc2dex_dalvik_offset = stack_map_entry.dex_pc;
- }
-
- for (HBasicBlock* block : graph_->GetBlocks()) {
- if (block->IsCatchBlock()) {
- intptr_t native_pc = GetAddressOf(block);
- write_pos2 = EncodeUnsignedLeb128(write_pos2, native_pc - dex2pc_offset);
- write_pos2 = EncodeSignedLeb128(write_pos2, block->GetDexPc() - dex2pc_dalvik_offset);
- dex2pc_offset = native_pc;
- dex2pc_dalvik_offset = block->GetDexPc();
- }
- }
-
-
- DCHECK_EQ(static_cast<size_t>(write_pos - data_ptr), hdr_data_size + pc2dex_data_size);
- DCHECK_EQ(static_cast<size_t>(write_pos2 - data_ptr), data_size);
-
- if (kIsDebugBuild) {
- // Verify the encoded table holds the expected data.
- MappingTable table(data_ptr);
- CHECK_EQ(table.TotalSize(), total_entries);
- CHECK_EQ(table.PcToDexSize(), pc2dex_entries);
- auto it = table.PcToDexBegin();
- auto it2 = table.DexToPcBegin();
- for (size_t i = 0; i < pc2dex_entries; i++) {
- const StackMapStream::StackMapEntry& stack_map_entry = stack_map_stream_.GetStackMap(i);
- CHECK_EQ(stack_map_entry.native_pc_offset, it.NativePcOffset());
- CHECK_EQ(stack_map_entry.dex_pc, it.DexPc());
- ++it;
- }
- for (HBasicBlock* block : graph_->GetBlocks()) {
- if (block->IsCatchBlock()) {
- CHECK_EQ(GetAddressOf(block), it2.NativePcOffset());
- CHECK_EQ(block->GetDexPc(), it2.DexPc());
- ++it2;
- }
- }
- CHECK(it == table.PcToDexEnd());
- CHECK(it2 == table.DexToPcEnd());
- }
-}
-
-void CodeGenerator::BuildVMapTable(ArenaVector<uint8_t>* data) const {
- Leb128Encoder<ArenaVector<uint8_t>> vmap_encoder(data);
- // We currently don't use callee-saved registers.
- size_t size = 0 + 1 /* marker */ + 0;
- vmap_encoder.Reserve(size + 1u); // All values are likely to be one byte in ULEB128 (<128).
- vmap_encoder.PushBackUnsigned(size);
- vmap_encoder.PushBackUnsigned(VmapTable::kAdjustedFpMarker);
-}
-
size_t CodeGenerator::ComputeStackMapsSize() {
return stack_map_stream_.PrepareForFillIn();
}
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index 5958cd8..4f8f146 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -158,10 +158,8 @@
class CodeGenerator {
public:
- // Compiles the graph to executable instructions. Returns whether the compilation
- // succeeded.
- void CompileBaseline(CodeAllocator* allocator, bool is_leaf = false);
- void CompileOptimized(CodeAllocator* allocator);
+ // Compiles the graph to executable instructions.
+ void Compile(CodeAllocator* allocator);
static CodeGenerator* Create(HGraph* graph,
InstructionSet instruction_set,
const InstructionSetFeatures& isa_features,
@@ -214,7 +212,7 @@
size_t GetNumberOfCoreRegisters() const { return number_of_core_registers_; }
size_t GetNumberOfFloatingPointRegisters() const { return number_of_fpu_registers_; }
- virtual void SetupBlockedRegisters(bool is_baseline) const = 0;
+ virtual void SetupBlockedRegisters() const = 0;
virtual void ComputeSpillMask() {
core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
@@ -290,17 +288,9 @@
slow_paths_.push_back(slow_path);
}
- void BuildMappingTable(ArenaVector<uint8_t>* vector) const;
- void BuildVMapTable(ArenaVector<uint8_t>* vector) const;
- void BuildNativeGCMap(
- ArenaVector<uint8_t>* vector, const CompilerDriver& compiler_driver) const;
void BuildStackMaps(MemoryRegion region);
size_t ComputeStackMapsSize();
- bool IsBaseline() const {
- return is_baseline_;
- }
-
bool IsLeafMethod() const {
return is_leaf_;
}
@@ -489,7 +479,6 @@
fpu_callee_save_mask_(fpu_callee_save_mask),
stack_map_stream_(graph->GetArena()),
block_order_(nullptr),
- is_baseline_(false),
disasm_info_(nullptr),
stats_(stats),
graph_(graph),
@@ -502,15 +491,6 @@
slow_paths_.reserve(8);
}
- // Register allocation logic.
- void AllocateRegistersLocally(HInstruction* instruction) const;
-
- // Backend specific implementation for allocating a register.
- virtual Location AllocateFreeRegister(Primitive::Type type) const = 0;
-
- static size_t FindFreeEntry(bool* array, size_t length);
- static size_t FindTwoFreeConsecutiveAlignedEntries(bool* array, size_t length);
-
virtual Location GetStackLocation(HLoadLocal* load) const = 0;
virtual HGraphVisitor* GetLocationBuilder() = 0;
@@ -593,16 +573,11 @@
// The order to use for code generation.
const ArenaVector<HBasicBlock*>* block_order_;
- // Whether we are using baseline.
- bool is_baseline_;
-
DisassemblyInformation* disasm_info_;
private:
- void InitLocationsBaseline(HInstruction* instruction);
size_t GetStackOffsetOfSavedRegister(size_t index);
void GenerateSlowPaths();
- void CompileInternal(CodeAllocator* allocator, bool is_baseline);
void BlockIfInRegister(Location location, bool is_out = false) const;
void EmitEnvironment(HEnvironment* environment, SlowPathCode* slow_path);
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index a11ceb9..cdee355 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -47,9 +47,7 @@
static constexpr int kCurrentMethodStackOffset = 0;
static constexpr Register kMethodRegisterArgument = R0;
-// We unconditionally allocate R5 to ensure we can do long operations
-// with baseline.
-static constexpr Register kCoreSavedRegisterForBaseline = R5;
+static constexpr Register kCoreAlwaysSpillRegister = R5;
static constexpr Register kCoreCalleeSaves[] =
{ R5, R6, R7, R8, R10, R11, LR };
static constexpr SRegister kFpuCalleeSaves[] =
@@ -815,58 +813,7 @@
CodeGenerator::Finalize(allocator);
}
-Location CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type) const {
- switch (type) {
- case Primitive::kPrimLong: {
- size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs);
- ArmManagedRegister pair =
- ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
- DCHECK(!blocked_core_registers_[pair.AsRegisterPairLow()]);
- DCHECK(!blocked_core_registers_[pair.AsRegisterPairHigh()]);
-
- blocked_core_registers_[pair.AsRegisterPairLow()] = true;
- blocked_core_registers_[pair.AsRegisterPairHigh()] = true;
- UpdateBlockedPairRegisters();
- return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
- }
-
- case Primitive::kPrimByte:
- case Primitive::kPrimBoolean:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- case Primitive::kPrimInt:
- case Primitive::kPrimNot: {
- int reg = FindFreeEntry(blocked_core_registers_, kNumberOfCoreRegisters);
- // Block all register pairs that contain `reg`.
- for (int i = 0; i < kNumberOfRegisterPairs; i++) {
- ArmManagedRegister current =
- ArmManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
- if (current.AsRegisterPairLow() == reg || current.AsRegisterPairHigh() == reg) {
- blocked_register_pairs_[i] = true;
- }
- }
- return Location::RegisterLocation(reg);
- }
-
- case Primitive::kPrimFloat: {
- int reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfSRegisters);
- return Location::FpuRegisterLocation(reg);
- }
-
- case Primitive::kPrimDouble: {
- int reg = FindTwoFreeConsecutiveAlignedEntries(blocked_fpu_registers_, kNumberOfSRegisters);
- DCHECK_EQ(reg % 2, 0);
- return Location::FpuRegisterPairLocation(reg, reg + 1);
- }
-
- case Primitive::kPrimVoid:
- LOG(FATAL) << "Unreachable type " << type;
- }
-
- return Location::NoLocation();
-}
-
-void CodeGeneratorARM::SetupBlockedRegisters(bool is_baseline) const {
+void CodeGeneratorARM::SetupBlockedRegisters() const {
// Don't allocate the dalvik style register pair passing.
blocked_register_pairs_[R1_R2] = true;
@@ -881,15 +828,7 @@
// Reserve temp register.
blocked_core_registers_[IP] = true;
- if (is_baseline) {
- for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
- blocked_core_registers_[kCoreCalleeSaves[i]] = true;
- }
-
- blocked_core_registers_[kCoreSavedRegisterForBaseline] = false;
- }
-
- if (is_baseline || GetGraph()->IsDebuggable()) {
+ if (GetGraph()->IsDebuggable()) {
// Stubs do not save callee-save floating point registers. If the graph
// is debuggable, we need to deal with these registers differently. For
// now, just block them.
@@ -919,11 +858,10 @@
void CodeGeneratorARM::ComputeSpillMask() {
core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
- // Save one extra register for baseline. Note that on thumb2, there is no easy
- // instruction to restore just the PC, so this actually helps both baseline
- // and non-baseline to save and restore at least two registers at entry and exit.
- core_spill_mask_ |= (1 << kCoreSavedRegisterForBaseline);
DCHECK_NE(core_spill_mask_, 0u) << "At least the return address register must be saved";
+ // There is no easy instruction to restore just the PC on thumb2. We spill and
+ // restore another arbitrary register.
+ core_spill_mask_ |= (1 << kCoreAlwaysSpillRegister);
fpu_spill_mask_ = allocated_registers_.GetFloatingPointRegisters() & fpu_callee_save_mask_;
// We use vpush and vpop for saving and restoring floating point registers, which take
// a SRegister and the number of registers to save/restore after that SRegister. We
@@ -1972,9 +1910,9 @@
}
void LocationsBuilderARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
- // When we do not run baseline, explicit clinit checks triggered by static
- // invokes must have been pruned by art::PrepareForRegisterAllocation.
- DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+ // Explicit clinit checks triggered by static invokes must have been pruned by
+ // art::PrepareForRegisterAllocation.
+ DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
IntrinsicLocationsBuilderARM intrinsic(GetGraph()->GetArena(),
codegen_->GetAssembler(),
@@ -2004,9 +1942,9 @@
}
void InstructionCodeGeneratorARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
- // When we do not run baseline, explicit clinit checks triggered by static
- // invokes must have been pruned by art::PrepareForRegisterAllocation.
- DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+ // Explicit clinit checks triggered by static invokes must have been pruned by
+ // art::PrepareForRegisterAllocation.
+ DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
return;
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index 26d6d63..1183dda 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -340,9 +340,7 @@
return GetLabelOf(block)->Position();
}
- void SetupBlockedRegisters(bool is_baseline) const OVERRIDE;
-
- Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+ void SetupBlockedRegisters() const OVERRIDE;
Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 6ed2c5a..a874fba 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1094,7 +1094,7 @@
}
}
-void CodeGeneratorARM64::SetupBlockedRegisters(bool is_baseline) const {
+void CodeGeneratorARM64::SetupBlockedRegisters() const {
// Blocked core registers:
// lr : Runtime reserved.
// tr : Runtime reserved.
@@ -1115,40 +1115,17 @@
blocked_fpu_registers_[reserved_fp_registers.PopLowestIndex().code()] = true;
}
- if (is_baseline) {
- CPURegList reserved_core_baseline_registers = callee_saved_core_registers;
- while (!reserved_core_baseline_registers.IsEmpty()) {
- blocked_core_registers_[reserved_core_baseline_registers.PopLowestIndex().code()] = true;
- }
- }
-
- if (is_baseline || GetGraph()->IsDebuggable()) {
+ if (GetGraph()->IsDebuggable()) {
// Stubs do not save callee-save floating point registers. If the graph
// is debuggable, we need to deal with these registers differently. For
// now, just block them.
- CPURegList reserved_fp_baseline_registers = callee_saved_fp_registers;
- while (!reserved_fp_baseline_registers.IsEmpty()) {
- blocked_fpu_registers_[reserved_fp_baseline_registers.PopLowestIndex().code()] = true;
+ CPURegList reserved_fp_registers_debuggable = callee_saved_fp_registers;
+ while (!reserved_fp_registers_debuggable.IsEmpty()) {
+ blocked_fpu_registers_[reserved_fp_registers_debuggable.PopLowestIndex().code()] = true;
}
}
}
-Location CodeGeneratorARM64::AllocateFreeRegister(Primitive::Type type) const {
- if (type == Primitive::kPrimVoid) {
- LOG(FATAL) << "Unreachable type " << type;
- }
-
- if (Primitive::IsFloatingPointType(type)) {
- ssize_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfAllocatableFPRegisters);
- DCHECK_NE(reg, -1);
- return Location::FpuRegisterLocation(reg);
- } else {
- ssize_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfAllocatableRegisters);
- DCHECK_NE(reg, -1);
- return Location::RegisterLocation(reg);
- }
-}
-
size_t CodeGeneratorARM64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
Register reg = Register(VIXLRegCodeFromART(reg_id), kXRegSize);
__ Str(reg, MemOperand(sp, stack_index));
@@ -3462,9 +3439,9 @@
}
void LocationsBuilderARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
- // When we do not run baseline, explicit clinit checks triggered by static
- // invokes must have been pruned by art::PrepareForRegisterAllocation.
- DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+ // Explicit clinit checks triggered by static invokes must have been pruned by
+ // art::PrepareForRegisterAllocation.
+ DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
IntrinsicLocationsBuilderARM64 intrinsic(GetGraph()->GetArena());
if (intrinsic.TryDispatch(invoke)) {
@@ -3712,9 +3689,9 @@
void InstructionCodeGeneratorARM64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
- // When we do not run baseline, explicit clinit checks triggered by static
- // invokes must have been pruned by art::PrepareForRegisterAllocation.
- DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+ // Explicit clinit checks triggered by static invokes must have been pruned by
+ // art::PrepareForRegisterAllocation.
+ DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
return;
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index f2ff894..8eb9fcc 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -339,10 +339,7 @@
// Register allocation.
- void SetupBlockedRegisters(bool is_baseline) const OVERRIDE;
- // AllocateFreeRegister() is only used when allocating registers locally
- // during CompileBaseline().
- Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+ void SetupBlockedRegisters() const OVERRIDE;
Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc
index e34767c..5bd136a 100644
--- a/compiler/optimizing/code_generator_mips.cc
+++ b/compiler/optimizing/code_generator_mips.cc
@@ -1042,7 +1042,7 @@
__ Bind(&done);
}
-void CodeGeneratorMIPS::SetupBlockedRegisters(bool is_baseline) const {
+void CodeGeneratorMIPS::SetupBlockedRegisters() const {
// Don't allocate the dalvik style register pair passing.
blocked_register_pairs_[A1_A2] = true;
@@ -1072,16 +1072,6 @@
blocked_fpu_registers_[i] = true;
}
- if (is_baseline) {
- for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
- blocked_core_registers_[kCoreCalleeSaves[i]] = true;
- }
-
- for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
- blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
- }
- }
-
UpdateBlockedPairRegisters();
}
@@ -1096,52 +1086,6 @@
}
}
-Location CodeGeneratorMIPS::AllocateFreeRegister(Primitive::Type type) const {
- switch (type) {
- case Primitive::kPrimLong: {
- size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs);
- MipsManagedRegister pair =
- MipsManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
- DCHECK(!blocked_core_registers_[pair.AsRegisterPairLow()]);
- DCHECK(!blocked_core_registers_[pair.AsRegisterPairHigh()]);
-
- blocked_core_registers_[pair.AsRegisterPairLow()] = true;
- blocked_core_registers_[pair.AsRegisterPairHigh()] = true;
- UpdateBlockedPairRegisters();
- return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
- }
-
- case Primitive::kPrimByte:
- case Primitive::kPrimBoolean:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- case Primitive::kPrimInt:
- case Primitive::kPrimNot: {
- int reg = FindFreeEntry(blocked_core_registers_, kNumberOfCoreRegisters);
- // Block all register pairs that contain `reg`.
- for (int i = 0; i < kNumberOfRegisterPairs; i++) {
- MipsManagedRegister current =
- MipsManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
- if (current.AsRegisterPairLow() == reg || current.AsRegisterPairHigh() == reg) {
- blocked_register_pairs_[i] = true;
- }
- }
- return Location::RegisterLocation(reg);
- }
-
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble: {
- int reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfFRegisters);
- return Location::FpuRegisterLocation(reg);
- }
-
- case Primitive::kPrimVoid:
- LOG(FATAL) << "Unreachable type " << type;
- }
-
- UNREACHABLE();
-}
-
size_t CodeGeneratorMIPS::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
__ StoreToOffset(kStoreWord, Register(reg_id), SP, stack_index);
return kMipsWordSize;
@@ -3835,9 +3779,9 @@
}
void LocationsBuilderMIPS::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
- // When we do not run baseline, explicit clinit checks triggered by static
- // invokes must have been pruned by art::PrepareForRegisterAllocation.
- DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+ // Explicit clinit checks triggered by static invokes must have been pruned by
+ // art::PrepareForRegisterAllocation.
+ DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
IntrinsicLocationsBuilderMIPS intrinsic(codegen_);
if (intrinsic.TryDispatch(invoke)) {
@@ -3973,9 +3917,9 @@
}
void InstructionCodeGeneratorMIPS::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
- // When we do not run baseline, explicit clinit checks triggered by static
- // invokes must have been pruned by art::PrepareForRegisterAllocation.
- DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+ // Explicit clinit checks triggered by static invokes must have been pruned by
+ // art::PrepareForRegisterAllocation.
+ DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
return;
diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h
index c3d4851..2cde0ed 100644
--- a/compiler/optimizing/code_generator_mips.h
+++ b/compiler/optimizing/code_generator_mips.h
@@ -290,10 +290,7 @@
// Register allocation.
- void SetupBlockedRegisters(bool is_baseline) const OVERRIDE;
- // AllocateFreeRegister() is only used when allocating registers locally
- // during CompileBaseline().
- Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+ void SetupBlockedRegisters() const OVERRIDE;
Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc
index 79cd56d..0505486 100644
--- a/compiler/optimizing/code_generator_mips64.cc
+++ b/compiler/optimizing/code_generator_mips64.cc
@@ -979,7 +979,7 @@
__ Bind(&done);
}
-void CodeGeneratorMIPS64::SetupBlockedRegisters(bool is_baseline ATTRIBUTE_UNUSED) const {
+void CodeGeneratorMIPS64::SetupBlockedRegisters() const {
// ZERO, K0, K1, GP, SP, RA are always reserved and can't be allocated.
blocked_core_registers_[ZERO] = true;
blocked_core_registers_[K0] = true;
@@ -1003,8 +1003,7 @@
// TODO: review; anything else?
- // TODO: make these two for's conditional on is_baseline once
- // all the issues with register saving/restoring are sorted out.
+ // TODO: remove once all the issues with register saving/restoring are sorted out.
for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
blocked_core_registers_[kCoreCalleeSaves[i]] = true;
}
@@ -1014,20 +1013,6 @@
}
}
-Location CodeGeneratorMIPS64::AllocateFreeRegister(Primitive::Type type) const {
- if (type == Primitive::kPrimVoid) {
- LOG(FATAL) << "Unreachable type " << type;
- }
-
- if (Primitive::IsFloatingPointType(type)) {
- size_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfFpuRegisters);
- return Location::FpuRegisterLocation(reg);
- } else {
- size_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfGpuRegisters);
- return Location::RegisterLocation(reg);
- }
-}
-
size_t CodeGeneratorMIPS64::SaveCoreRegister(size_t stack_index, uint32_t reg_id) {
__ StoreToOffset(kStoreDoubleword, GpuRegister(reg_id), SP, stack_index);
return kMips64WordSize;
@@ -3031,9 +3016,9 @@
}
void LocationsBuilderMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
- // When we do not run baseline, explicit clinit checks triggered by static
- // invokes must have been pruned by art::PrepareForRegisterAllocation.
- DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+ // Explicit clinit checks triggered by static invokes must have been pruned by
+ // art::PrepareForRegisterAllocation.
+ DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
IntrinsicLocationsBuilderMIPS64 intrinsic(codegen_);
if (intrinsic.TryDispatch(invoke)) {
@@ -3182,9 +3167,9 @@
}
void InstructionCodeGeneratorMIPS64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
- // When we do not run baseline, explicit clinit checks triggered by static
- // invokes must have been pruned by art::PrepareForRegisterAllocation.
- DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+ // Explicit clinit checks triggered by static invokes must have been pruned by
+ // art::PrepareForRegisterAllocation.
+ DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
return;
diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h
index 7182e8e..140ff95 100644
--- a/compiler/optimizing/code_generator_mips64.h
+++ b/compiler/optimizing/code_generator_mips64.h
@@ -289,10 +289,7 @@
// Register allocation.
- void SetupBlockedRegisters(bool is_baseline) const OVERRIDE;
- // AllocateFreeRegister() is only used when allocating registers locally
- // during CompileBaseline().
- Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+ void SetupBlockedRegisters() const OVERRIDE;
Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 6259acd..f95cb30 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -817,65 +817,13 @@
AddAllocatedRegister(Location::RegisterLocation(kFakeReturnRegister));
}
-Location CodeGeneratorX86::AllocateFreeRegister(Primitive::Type type) const {
- switch (type) {
- case Primitive::kPrimLong: {
- size_t reg = FindFreeEntry(blocked_register_pairs_, kNumberOfRegisterPairs);
- X86ManagedRegister pair =
- X86ManagedRegister::FromRegisterPair(static_cast<RegisterPair>(reg));
- DCHECK(!blocked_core_registers_[pair.AsRegisterPairLow()]);
- DCHECK(!blocked_core_registers_[pair.AsRegisterPairHigh()]);
- blocked_core_registers_[pair.AsRegisterPairLow()] = true;
- blocked_core_registers_[pair.AsRegisterPairHigh()] = true;
- UpdateBlockedPairRegisters();
- return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
- }
-
- case Primitive::kPrimByte:
- case Primitive::kPrimBoolean:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- case Primitive::kPrimInt:
- case Primitive::kPrimNot: {
- Register reg = static_cast<Register>(
- FindFreeEntry(blocked_core_registers_, kNumberOfCpuRegisters));
- // Block all register pairs that contain `reg`.
- for (int i = 0; i < kNumberOfRegisterPairs; i++) {
- X86ManagedRegister current =
- X86ManagedRegister::FromRegisterPair(static_cast<RegisterPair>(i));
- if (current.AsRegisterPairLow() == reg || current.AsRegisterPairHigh() == reg) {
- blocked_register_pairs_[i] = true;
- }
- }
- return Location::RegisterLocation(reg);
- }
-
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble: {
- return Location::FpuRegisterLocation(
- FindFreeEntry(blocked_fpu_registers_, kNumberOfXmmRegisters));
- }
-
- case Primitive::kPrimVoid:
- LOG(FATAL) << "Unreachable type " << type;
- }
-
- return Location::NoLocation();
-}
-
-void CodeGeneratorX86::SetupBlockedRegisters(bool is_baseline) const {
+void CodeGeneratorX86::SetupBlockedRegisters() const {
// Don't allocate the dalvik style register pair passing.
blocked_register_pairs_[ECX_EDX] = true;
// Stack register is always reserved.
blocked_core_registers_[ESP] = true;
- if (is_baseline) {
- blocked_core_registers_[EBP] = true;
- blocked_core_registers_[ESI] = true;
- blocked_core_registers_[EDI] = true;
- }
-
UpdateBlockedPairRegisters();
}
@@ -1981,9 +1929,9 @@
}
void LocationsBuilderX86::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
- // When we do not run baseline, explicit clinit checks triggered by static
- // invokes must have been pruned by art::PrepareForRegisterAllocation.
- DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+ // Explicit clinit checks triggered by static invokes must have been pruned by
+ // art::PrepareForRegisterAllocation.
+ DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
IntrinsicLocationsBuilderX86 intrinsic(codegen_);
if (intrinsic.TryDispatch(invoke)) {
@@ -1999,17 +1947,6 @@
if (invoke->HasPcRelativeDexCache()) {
invoke->GetLocations()->SetInAt(invoke->GetSpecialInputIndex(), Location::RequiresRegister());
}
-
- if (codegen_->IsBaseline()) {
- // Baseline does not have enough registers if the current method also
- // needs a register. We therefore do not require a register for it, and let
- // the code generation of the invoke handle it.
- LocationSummary* locations = invoke->GetLocations();
- Location location = locations->InAt(invoke->GetSpecialInputIndex());
- if (location.IsUnallocated() && location.GetPolicy() == Location::kRequiresRegister) {
- locations->SetInAt(invoke->GetSpecialInputIndex(), Location::NoLocation());
- }
- }
}
static bool TryGenerateIntrinsicCode(HInvoke* invoke, CodeGeneratorX86* codegen) {
@@ -2022,9 +1959,9 @@
}
void InstructionCodeGeneratorX86::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
- // When we do not run baseline, explicit clinit checks triggered by static
- // invokes must have been pruned by art::PrepareForRegisterAllocation.
- DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+ // Explicit clinit checks triggered by static invokes must have been pruned by
+ // art::PrepareForRegisterAllocation.
+ DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
return;
@@ -4286,7 +4223,7 @@
if (current_method.IsRegister()) {
method_reg = current_method.AsRegister<Register>();
} else {
- DCHECK(IsBaseline() || invoke->GetLocations()->Intrinsified());
+ DCHECK(invoke->GetLocations()->Intrinsified());
DCHECK(!current_method.IsValid());
method_reg = reg;
__ movl(reg, Address(ESP, kCurrentMethodStackOffset));
@@ -5076,11 +5013,6 @@
}
void LocationsBuilderX86::VisitArraySet(HArraySet* instruction) {
- // This location builder might end up asking to up to four registers, which is
- // not currently possible for baseline. The situation in which we need four
- // registers cannot be met by baseline though, because it has not run any
- // optimization.
-
Primitive::Type value_type = instruction->GetComponentType();
bool needs_write_barrier =
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index c65c423..34f983f 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -359,9 +359,7 @@
return GetLabelOf(block)->Position();
}
- void SetupBlockedRegisters(bool is_baseline) const OVERRIDE;
-
- Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+ void SetupBlockedRegisters() const OVERRIDE;
Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index e024ce2..31f3660 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -1002,47 +1002,12 @@
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
-Location CodeGeneratorX86_64::AllocateFreeRegister(Primitive::Type type) const {
- switch (type) {
- case Primitive::kPrimLong:
- case Primitive::kPrimByte:
- case Primitive::kPrimBoolean:
- case Primitive::kPrimChar:
- case Primitive::kPrimShort:
- case Primitive::kPrimInt:
- case Primitive::kPrimNot: {
- size_t reg = FindFreeEntry(blocked_core_registers_, kNumberOfCpuRegisters);
- return Location::RegisterLocation(reg);
- }
-
- case Primitive::kPrimFloat:
- case Primitive::kPrimDouble: {
- size_t reg = FindFreeEntry(blocked_fpu_registers_, kNumberOfFloatRegisters);
- return Location::FpuRegisterLocation(reg);
- }
-
- case Primitive::kPrimVoid:
- LOG(FATAL) << "Unreachable type " << type;
- }
-
- return Location::NoLocation();
-}
-
-void CodeGeneratorX86_64::SetupBlockedRegisters(bool is_baseline) const {
+void CodeGeneratorX86_64::SetupBlockedRegisters() const {
// Stack register is always reserved.
blocked_core_registers_[RSP] = true;
// Block the register used as TMP.
blocked_core_registers_[TMP] = true;
-
- if (is_baseline) {
- for (size_t i = 0; i < arraysize(kCoreCalleeSaves); ++i) {
- blocked_core_registers_[kCoreCalleeSaves[i]] = true;
- }
- for (size_t i = 0; i < arraysize(kFpuCalleeSaves); ++i) {
- blocked_fpu_registers_[kFpuCalleeSaves[i]] = true;
- }
- }
}
static dwarf::Reg DWARFReg(Register reg) {
@@ -2161,9 +2126,9 @@
}
void LocationsBuilderX86_64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
- // When we do not run baseline, explicit clinit checks triggered by static
- // invokes must have been pruned by art::PrepareForRegisterAllocation.
- DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+ // Explicit clinit checks triggered by static invokes must have been pruned by
+ // art::PrepareForRegisterAllocation.
+ DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
IntrinsicLocationsBuilderX86_64 intrinsic(codegen_);
if (intrinsic.TryDispatch(invoke)) {
@@ -2183,9 +2148,9 @@
}
void InstructionCodeGeneratorX86_64::VisitInvokeStaticOrDirect(HInvokeStaticOrDirect* invoke) {
- // When we do not run baseline, explicit clinit checks triggered by static
- // invokes must have been pruned by art::PrepareForRegisterAllocation.
- DCHECK(codegen_->IsBaseline() || !invoke->IsStaticWithExplicitClinitCheck());
+ // Explicit clinit checks triggered by static invokes must have been pruned by
+ // art::PrepareForRegisterAllocation.
+ DCHECK(!invoke->IsStaticWithExplicitClinitCheck());
if (TryGenerateIntrinsicCode(invoke, codegen_)) {
return;
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 505c9dc..9de9d9e 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -347,8 +347,7 @@
Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
- void SetupBlockedRegisters(bool is_baseline) const OVERRIDE;
- Location AllocateFreeRegister(Primitive::Type type) const OVERRIDE;
+ void SetupBlockedRegisters() const OVERRIDE;
void DumpCoreRegister(std::ostream& stream, int reg) const OVERRIDE;
void DumpFloatingPointRegister(std::ostream& stream, int reg) const OVERRIDE;
void Finalize(CodeAllocator* allocator) OVERRIDE;
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index d970704..19d63de 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -40,6 +40,7 @@
#include "dex_file.h"
#include "dex_instruction.h"
#include "driver/compiler_options.h"
+#include "graph_checker.h"
#include "nodes.h"
#include "optimizing_unit_test.h"
#include "prepare_for_register_allocation.h"
@@ -70,8 +71,8 @@
AddAllocatedRegister(Location::RegisterLocation(arm::R7));
}
- void SetupBlockedRegisters(bool is_baseline) const OVERRIDE {
- arm::CodeGeneratorARM::SetupBlockedRegisters(is_baseline);
+ void SetupBlockedRegisters() const OVERRIDE {
+ arm::CodeGeneratorARM::SetupBlockedRegisters();
blocked_core_registers_[arm::R4] = true;
blocked_core_registers_[arm::R6] = false;
blocked_core_registers_[arm::R7] = false;
@@ -90,8 +91,8 @@
AddAllocatedRegister(Location::RegisterLocation(x86::EDI));
}
- void SetupBlockedRegisters(bool is_baseline) const OVERRIDE {
- x86::CodeGeneratorX86::SetupBlockedRegisters(is_baseline);
+ void SetupBlockedRegisters() const OVERRIDE {
+ x86::CodeGeneratorX86::SetupBlockedRegisters();
// ebx is a callee-save register in C, but caller-save for ART.
blocked_core_registers_[x86::EBX] = true;
blocked_register_pairs_[x86::EAX_EBX] = true;
@@ -200,259 +201,228 @@
}
template <typename Expected>
-static void RunCodeBaseline(InstructionSet target_isa,
- HGraph* graph,
- bool has_result,
- Expected expected) {
- InternalCodeAllocator allocator;
+static void RunCode(CodeGenerator* codegen,
+ HGraph* graph,
+ std::function<void(HGraph*)> hook_before_codegen,
+ bool has_result,
+ Expected expected) {
+ ASSERT_TRUE(graph->IsInSsaForm());
- CompilerOptions compiler_options;
- std::unique_ptr<const X86InstructionSetFeatures> features_x86(
- X86InstructionSetFeatures::FromCppDefines());
- TestCodeGeneratorX86 codegenX86(graph, *features_x86.get(), compiler_options);
- // We avoid doing a stack overflow check that requires the runtime being setup,
- // by making sure the compiler knows the methods we are running are leaf methods.
- codegenX86.CompileBaseline(&allocator, true);
- if (target_isa == kX86) {
- Run(allocator, codegenX86, has_result, expected);
- }
+ SSAChecker graph_checker(graph);
+ graph_checker.Run();
+ ASSERT_TRUE(graph_checker.IsValid());
- std::unique_ptr<const ArmInstructionSetFeatures> features_arm(
- ArmInstructionSetFeatures::FromCppDefines());
- TestCodeGeneratorARM codegenARM(graph, *features_arm.get(), compiler_options);
- codegenARM.CompileBaseline(&allocator, true);
- if (target_isa == kArm || target_isa == kThumb2) {
- Run(allocator, codegenARM, has_result, expected);
- }
-
- std::unique_ptr<const X86_64InstructionSetFeatures> features_x86_64(
- X86_64InstructionSetFeatures::FromCppDefines());
- x86_64::CodeGeneratorX86_64 codegenX86_64(graph, *features_x86_64.get(), compiler_options);
- codegenX86_64.CompileBaseline(&allocator, true);
- if (target_isa == kX86_64) {
- Run(allocator, codegenX86_64, has_result, expected);
- }
-
- std::unique_ptr<const Arm64InstructionSetFeatures> features_arm64(
- Arm64InstructionSetFeatures::FromCppDefines());
- arm64::CodeGeneratorARM64 codegenARM64(graph, *features_arm64.get(), compiler_options);
- codegenARM64.CompileBaseline(&allocator, true);
- if (target_isa == kArm64) {
- Run(allocator, codegenARM64, has_result, expected);
- }
-
- std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
- MipsInstructionSetFeatures::FromCppDefines());
- mips::CodeGeneratorMIPS codegenMIPS(graph, *features_mips.get(), compiler_options);
- codegenMIPS.CompileBaseline(&allocator, true);
- if (kRuntimeISA == kMips) {
- Run(allocator, codegenMIPS, has_result, expected);
- }
-
- std::unique_ptr<const Mips64InstructionSetFeatures> features_mips64(
- Mips64InstructionSetFeatures::FromCppDefines());
- mips64::CodeGeneratorMIPS64 codegenMIPS64(graph, *features_mips64.get(), compiler_options);
- codegenMIPS64.CompileBaseline(&allocator, true);
- if (target_isa == kMips64) {
- Run(allocator, codegenMIPS64, has_result, expected);
- }
-}
-
-template <typename Expected>
-static void RunCodeOptimized(CodeGenerator* codegen,
- HGraph* graph,
- std::function<void(HGraph*)> hook_before_codegen,
- bool has_result,
- Expected expected) {
- // Tests may have already computed it.
- if (graph->GetReversePostOrder().empty()) {
- graph->BuildDominatorTree();
- }
SsaLivenessAnalysis liveness(graph, codegen);
- liveness.Analyze();
- RegisterAllocator register_allocator(graph->GetArena(), codegen, liveness);
- register_allocator.AllocateRegisters();
+ PrepareForRegisterAllocation(graph).Run();
+ liveness.Analyze();
+ RegisterAllocator(graph->GetArena(), codegen, liveness).AllocateRegisters();
hook_before_codegen(graph);
InternalCodeAllocator allocator;
- codegen->CompileOptimized(&allocator);
+ codegen->Compile(&allocator);
Run(allocator, *codegen, has_result, expected);
}
template <typename Expected>
-static void RunCodeOptimized(InstructionSet target_isa,
- HGraph* graph,
- std::function<void(HGraph*)> hook_before_codegen,
- bool has_result,
- Expected expected) {
+static void RunCode(InstructionSet target_isa,
+ HGraph* graph,
+ std::function<void(HGraph*)> hook_before_codegen,
+ bool has_result,
+ Expected expected) {
CompilerOptions compiler_options;
if (target_isa == kArm || target_isa == kThumb2) {
std::unique_ptr<const ArmInstructionSetFeatures> features_arm(
ArmInstructionSetFeatures::FromCppDefines());
TestCodeGeneratorARM codegenARM(graph, *features_arm.get(), compiler_options);
- RunCodeOptimized(&codegenARM, graph, hook_before_codegen, has_result, expected);
+ RunCode(&codegenARM, graph, hook_before_codegen, has_result, expected);
} else if (target_isa == kArm64) {
std::unique_ptr<const Arm64InstructionSetFeatures> features_arm64(
Arm64InstructionSetFeatures::FromCppDefines());
arm64::CodeGeneratorARM64 codegenARM64(graph, *features_arm64.get(), compiler_options);
- RunCodeOptimized(&codegenARM64, graph, hook_before_codegen, has_result, expected);
+ RunCode(&codegenARM64, graph, hook_before_codegen, has_result, expected);
} else if (target_isa == kX86) {
std::unique_ptr<const X86InstructionSetFeatures> features_x86(
X86InstructionSetFeatures::FromCppDefines());
x86::CodeGeneratorX86 codegenX86(graph, *features_x86.get(), compiler_options);
- RunCodeOptimized(&codegenX86, graph, hook_before_codegen, has_result, expected);
+ RunCode(&codegenX86, graph, hook_before_codegen, has_result, expected);
} else if (target_isa == kX86_64) {
std::unique_ptr<const X86_64InstructionSetFeatures> features_x86_64(
X86_64InstructionSetFeatures::FromCppDefines());
x86_64::CodeGeneratorX86_64 codegenX86_64(graph, *features_x86_64.get(), compiler_options);
- RunCodeOptimized(&codegenX86_64, graph, hook_before_codegen, has_result, expected);
+ RunCode(&codegenX86_64, graph, hook_before_codegen, has_result, expected);
} else if (target_isa == kMips) {
std::unique_ptr<const MipsInstructionSetFeatures> features_mips(
MipsInstructionSetFeatures::FromCppDefines());
mips::CodeGeneratorMIPS codegenMIPS(graph, *features_mips.get(), compiler_options);
- RunCodeOptimized(&codegenMIPS, graph, hook_before_codegen, has_result, expected);
+ RunCode(&codegenMIPS, graph, hook_before_codegen, has_result, expected);
} else if (target_isa == kMips64) {
std::unique_ptr<const Mips64InstructionSetFeatures> features_mips64(
Mips64InstructionSetFeatures::FromCppDefines());
mips64::CodeGeneratorMIPS64 codegenMIPS64(graph, *features_mips64.get(), compiler_options);
- RunCodeOptimized(&codegenMIPS64, graph, hook_before_codegen, has_result, expected);
+ RunCode(&codegenMIPS64, graph, hook_before_codegen, has_result, expected);
}
}
-static void TestCode(InstructionSet target_isa,
- const uint16_t* data,
+static ::std::vector<InstructionSet> GetTargetISAs() {
+ ::std::vector<InstructionSet> v;
+ // Add all ISAs that are executable on hardware or on simulator.
+ const ::std::vector<InstructionSet> executable_isa_candidates = {
+ kArm,
+ kArm64,
+ kThumb2,
+ kX86,
+ kX86_64,
+ kMips,
+ kMips64
+ };
+
+ for (auto target_isa : executable_isa_candidates) {
+ if (CanExecute(target_isa)) {
+ v.push_back(target_isa);
+ }
+ }
+
+ return v;
+}
+
+static void TestCode(const uint16_t* data,
bool has_result = false,
int32_t expected = 0) {
- ArenaPool pool;
- ArenaAllocator arena(&pool);
- HGraph* graph = CreateGraph(&arena);
- HGraphBuilder builder(graph);
- const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
- bool graph_built = builder.BuildGraph(*item);
- ASSERT_TRUE(graph_built);
- // Remove suspend checks, they cannot be executed in this context.
- RemoveSuspendChecks(graph);
- RunCodeBaseline(target_isa, graph, has_result, expected);
+ for (InstructionSet target_isa : GetTargetISAs()) {
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+ HGraph* graph = CreateGraph(&arena);
+ HGraphBuilder builder(graph);
+ const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
+ bool graph_built = builder.BuildGraph(*item);
+ ASSERT_TRUE(graph_built);
+ // Remove suspend checks, they cannot be executed in this context.
+ RemoveSuspendChecks(graph);
+ TransformToSsa(graph);
+ RunCode(target_isa, graph, [](HGraph*) {}, has_result, expected);
+ }
}
-static void TestCodeLong(InstructionSet target_isa,
- const uint16_t* data,
+static void TestCodeLong(const uint16_t* data,
bool has_result,
int64_t expected) {
- ArenaPool pool;
- ArenaAllocator arena(&pool);
- HGraph* graph = CreateGraph(&arena);
- HGraphBuilder builder(graph, Primitive::kPrimLong);
- const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
- bool graph_built = builder.BuildGraph(*item);
- ASSERT_TRUE(graph_built);
- // Remove suspend checks, they cannot be executed in this context.
- RemoveSuspendChecks(graph);
- RunCodeBaseline(target_isa, graph, has_result, expected);
+ for (InstructionSet target_isa : GetTargetISAs()) {
+ ArenaPool pool;
+ ArenaAllocator arena(&pool);
+ HGraph* graph = CreateGraph(&arena);
+ HGraphBuilder builder(graph, Primitive::kPrimLong);
+ const DexFile::CodeItem* item = reinterpret_cast<const DexFile::CodeItem*>(data);
+ bool graph_built = builder.BuildGraph(*item);
+ ASSERT_TRUE(graph_built);
+ // Remove suspend checks, they cannot be executed in this context.
+ RemoveSuspendChecks(graph);
+ TransformToSsa(graph);
+ RunCode(target_isa, graph, [](HGraph*) {}, has_result, expected);
+ }
}
-class CodegenTest: public ::testing::TestWithParam<InstructionSet> {};
+class CodegenTest : public CommonCompilerTest {};
-TEST_P(CodegenTest, ReturnVoid) {
+TEST_F(CodegenTest, ReturnVoid) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(Instruction::RETURN_VOID);
- TestCode(GetParam(), data);
+ TestCode(data);
}
-TEST_P(CodegenTest, CFG1) {
+TEST_F(CodegenTest, CFG1) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x100,
Instruction::RETURN_VOID);
- TestCode(GetParam(), data);
+ TestCode(data);
}
-TEST_P(CodegenTest, CFG2) {
+TEST_F(CodegenTest, CFG2) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x100,
Instruction::GOTO | 0x100,
Instruction::RETURN_VOID);
- TestCode(GetParam(), data);
+ TestCode(data);
}
-TEST_P(CodegenTest, CFG3) {
+TEST_F(CodegenTest, CFG3) {
const uint16_t data1[] = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO | 0x200,
Instruction::RETURN_VOID,
Instruction::GOTO | 0xFF00);
- TestCode(GetParam(), data1);
+ TestCode(data1);
const uint16_t data2[] = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO_16, 3,
Instruction::RETURN_VOID,
Instruction::GOTO_16, 0xFFFF);
- TestCode(GetParam(), data2);
+ TestCode(data2);
const uint16_t data3[] = ZERO_REGISTER_CODE_ITEM(
Instruction::GOTO_32, 4, 0,
Instruction::RETURN_VOID,
Instruction::GOTO_32, 0xFFFF, 0xFFFF);
- TestCode(GetParam(), data3);
+ TestCode(data3);
}
-TEST_P(CodegenTest, CFG4) {
+TEST_F(CodegenTest, CFG4) {
const uint16_t data[] = ZERO_REGISTER_CODE_ITEM(
Instruction::RETURN_VOID,
Instruction::GOTO | 0x100,
Instruction::GOTO | 0xFE00);
- TestCode(GetParam(), data);
+ TestCode(data);
}
-TEST_P(CodegenTest, CFG5) {
+TEST_F(CodegenTest, CFG5) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::IF_EQ, 3,
Instruction::GOTO | 0x100,
Instruction::RETURN_VOID);
- TestCode(GetParam(), data);
+ TestCode(data);
}
-TEST_P(CodegenTest, IntConstant) {
+TEST_F(CodegenTest, IntConstant) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN_VOID);
- TestCode(GetParam(), data);
+ TestCode(data);
}
-TEST_P(CodegenTest, Return1) {
+TEST_F(CodegenTest, Return1) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::RETURN | 0);
- TestCode(GetParam(), data, true, 0);
+ TestCode(data, true, 0);
}
-TEST_P(CodegenTest, Return2) {
+TEST_F(CodegenTest, Return2) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::CONST_4 | 0 | 1 << 8,
Instruction::RETURN | 1 << 8);
- TestCode(GetParam(), data, true, 0);
+ TestCode(data, true, 0);
}
-TEST_P(CodegenTest, Return3) {
+TEST_F(CodegenTest, Return3) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::CONST_4 | 1 << 8 | 1 << 12,
Instruction::RETURN | 1 << 8);
- TestCode(GetParam(), data, true, 1);
+ TestCode(data, true, 1);
}
-TEST_P(CodegenTest, ReturnIf1) {
+TEST_F(CodegenTest, ReturnIf1) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::CONST_4 | 1 << 8 | 1 << 12,
@@ -460,10 +430,10 @@
Instruction::RETURN | 0 << 8,
Instruction::RETURN | 1 << 8);
- TestCode(GetParam(), data, true, 1);
+ TestCode(data, true, 1);
}
-TEST_P(CodegenTest, ReturnIf2) {
+TEST_F(CodegenTest, ReturnIf2) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 0 | 0,
Instruction::CONST_4 | 1 << 8 | 1 << 12,
@@ -471,12 +441,12 @@
Instruction::RETURN | 0 << 8,
Instruction::RETURN | 1 << 8);
- TestCode(GetParam(), data, true, 0);
+ TestCode(data, true, 0);
}
// Exercise bit-wise (one's complement) not-int instruction.
#define NOT_INT_TEST(TEST_NAME, INPUT, EXPECTED_OUTPUT) \
-TEST_P(CodegenTest, TEST_NAME) { \
+TEST_F(CodegenTest, TEST_NAME) { \
const int32_t input = INPUT; \
const uint16_t input_lo = Low16Bits(input); \
const uint16_t input_hi = High16Bits(input); \
@@ -485,7 +455,7 @@
Instruction::NOT_INT | 1 << 8 | 0 << 12 , \
Instruction::RETURN | 1 << 8); \
\
- TestCode(GetParam(), data, true, EXPECTED_OUTPUT); \
+ TestCode(data, true, EXPECTED_OUTPUT); \
}
NOT_INT_TEST(ReturnNotIntMinus2, -2, 1)
@@ -501,7 +471,7 @@
// Exercise bit-wise (one's complement) not-long instruction.
#define NOT_LONG_TEST(TEST_NAME, INPUT, EXPECTED_OUTPUT) \
-TEST_P(CodegenTest, TEST_NAME) { \
+TEST_F(CodegenTest, TEST_NAME) { \
const int64_t input = INPUT; \
const uint16_t word0 = Low16Bits(Low32Bits(input)); /* LSW. */ \
const uint16_t word1 = High16Bits(Low32Bits(input)); \
@@ -512,7 +482,7 @@
Instruction::NOT_LONG | 2 << 8 | 0 << 12, \
Instruction::RETURN_WIDE | 2 << 8); \
\
- TestCodeLong(GetParam(), data, true, EXPECTED_OUTPUT); \
+ TestCodeLong(data, true, EXPECTED_OUTPUT); \
}
NOT_LONG_TEST(ReturnNotLongMinus2, INT64_C(-2), INT64_C(1))
@@ -551,7 +521,7 @@
#undef NOT_LONG_TEST
-TEST_P(CodegenTest, IntToLongOfLongToInt) {
+TEST_F(CodegenTest, IntToLongOfLongToInt) {
const int64_t input = INT64_C(4294967296); // 2^32
const uint16_t word0 = Low16Bits(Low32Bits(input)); // LSW.
const uint16_t word1 = High16Bits(Low32Bits(input));
@@ -565,192 +535,146 @@
Instruction::INT_TO_LONG | 2 << 8 | 4 << 12,
Instruction::RETURN_WIDE | 2 << 8);
- TestCodeLong(GetParam(), data, true, 1);
+ TestCodeLong(data, true, 1);
}
-TEST_P(CodegenTest, ReturnAdd1) {
+TEST_F(CodegenTest, ReturnAdd1) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
Instruction::CONST_4 | 4 << 12 | 1 << 8,
Instruction::ADD_INT, 1 << 8 | 0,
Instruction::RETURN);
- TestCode(GetParam(), data, true, 7);
+ TestCode(data, true, 7);
}
-TEST_P(CodegenTest, ReturnAdd2) {
+TEST_F(CodegenTest, ReturnAdd2) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
Instruction::CONST_4 | 4 << 12 | 1 << 8,
Instruction::ADD_INT_2ADDR | 1 << 12,
Instruction::RETURN);
- TestCode(GetParam(), data, true, 7);
+ TestCode(data, true, 7);
}
-TEST_P(CodegenTest, ReturnAdd3) {
+TEST_F(CodegenTest, ReturnAdd3) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::ADD_INT_LIT8, 3 << 8 | 0,
Instruction::RETURN);
- TestCode(GetParam(), data, true, 7);
+ TestCode(data, true, 7);
}
-TEST_P(CodegenTest, ReturnAdd4) {
+TEST_F(CodegenTest, ReturnAdd4) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::ADD_INT_LIT16, 3,
Instruction::RETURN);
- TestCode(GetParam(), data, true, 7);
+ TestCode(data, true, 7);
}
-TEST_P(CodegenTest, NonMaterializedCondition) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
-
- HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(entry);
- graph->SetEntryBlock(entry);
- entry->AddInstruction(new (&allocator) HGoto());
-
- HBasicBlock* first_block = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(first_block);
- entry->AddSuccessor(first_block);
- HIntConstant* constant0 = graph->GetIntConstant(0);
- HIntConstant* constant1 = graph->GetIntConstant(1);
- HEqual* equal = new (&allocator) HEqual(constant0, constant0);
- first_block->AddInstruction(equal);
- first_block->AddInstruction(new (&allocator) HIf(equal));
-
- HBasicBlock* then = new (&allocator) HBasicBlock(graph);
- HBasicBlock* else_ = new (&allocator) HBasicBlock(graph);
- HBasicBlock* exit = new (&allocator) HBasicBlock(graph);
-
- graph->AddBlock(then);
- graph->AddBlock(else_);
- graph->AddBlock(exit);
- first_block->AddSuccessor(then);
- first_block->AddSuccessor(else_);
- then->AddSuccessor(exit);
- else_->AddSuccessor(exit);
-
- exit->AddInstruction(new (&allocator) HExit());
- then->AddInstruction(new (&allocator) HReturn(constant0));
- else_->AddInstruction(new (&allocator) HReturn(constant1));
-
- ASSERT_TRUE(equal->NeedsMaterialization());
- graph->BuildDominatorTree();
- PrepareForRegisterAllocation(graph).Run();
- ASSERT_FALSE(equal->NeedsMaterialization());
-
- auto hook_before_codegen = [](HGraph* graph_in) {
- HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0];
- HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
- block->InsertInstructionBefore(move, block->GetLastInstruction());
- };
-
- RunCodeOptimized(GetParam(), graph, hook_before_codegen, true, 0);
-}
-
-TEST_P(CodegenTest, ReturnMulInt) {
+TEST_F(CodegenTest, ReturnMulInt) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
Instruction::CONST_4 | 4 << 12 | 1 << 8,
Instruction::MUL_INT, 1 << 8 | 0,
Instruction::RETURN);
- TestCode(GetParam(), data, true, 12);
+ TestCode(data, true, 12);
}
-TEST_P(CodegenTest, ReturnMulInt2addr) {
+TEST_F(CodegenTest, ReturnMulInt2addr) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 3 << 12 | 0,
Instruction::CONST_4 | 4 << 12 | 1 << 8,
Instruction::MUL_INT_2ADDR | 1 << 12,
Instruction::RETURN);
- TestCode(GetParam(), data, true, 12);
+ TestCode(data, true, 12);
}
-TEST_P(CodegenTest, ReturnMulLong) {
+TEST_F(CodegenTest, ReturnMulLong) {
const uint16_t data[] = FOUR_REGISTERS_CODE_ITEM(
- Instruction::CONST_4 | 3 << 12 | 0,
- Instruction::CONST_4 | 0 << 12 | 1 << 8,
- Instruction::CONST_4 | 4 << 12 | 2 << 8,
- Instruction::CONST_4 | 0 << 12 | 3 << 8,
+ Instruction::CONST_WIDE | 0 << 8, 3, 0, 0, 0,
+ Instruction::CONST_WIDE | 2 << 8, 4, 0, 0, 0,
Instruction::MUL_LONG, 2 << 8 | 0,
Instruction::RETURN_WIDE);
- TestCodeLong(GetParam(), data, true, 12);
+ TestCodeLong(data, true, 12);
}
-TEST_P(CodegenTest, ReturnMulLong2addr) {
+TEST_F(CodegenTest, ReturnMulLong2addr) {
const uint16_t data[] = FOUR_REGISTERS_CODE_ITEM(
- Instruction::CONST_4 | 3 << 12 | 0 << 8,
- Instruction::CONST_4 | 0 << 12 | 1 << 8,
- Instruction::CONST_4 | 4 << 12 | 2 << 8,
- Instruction::CONST_4 | 0 << 12 | 3 << 8,
+ Instruction::CONST_WIDE | 0 << 8, 3, 0, 0, 0,
+ Instruction::CONST_WIDE | 2 << 8, 4, 0, 0, 0,
Instruction::MUL_LONG_2ADDR | 2 << 12,
Instruction::RETURN_WIDE);
- TestCodeLong(GetParam(), data, true, 12);
+ TestCodeLong(data, true, 12);
}
-TEST_P(CodegenTest, ReturnMulIntLit8) {
+TEST_F(CodegenTest, ReturnMulIntLit8) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::MUL_INT_LIT8, 3 << 8 | 0,
Instruction::RETURN);
- TestCode(GetParam(), data, true, 12);
+ TestCode(data, true, 12);
}
-TEST_P(CodegenTest, ReturnMulIntLit16) {
+TEST_F(CodegenTest, ReturnMulIntLit16) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::MUL_INT_LIT16, 3,
Instruction::RETURN);
- TestCode(GetParam(), data, true, 12);
+ TestCode(data, true, 12);
}
-TEST_P(CodegenTest, MaterializedCondition1) {
- // Check that condition are materialized correctly. A materialized condition
- // should yield `1` if it evaluated to true, and `0` otherwise.
- // We force the materialization of comparisons for different combinations of
- // inputs and check the results.
-
- int lhs[] = {1, 2, -1, 2, 0xabc};
- int rhs[] = {2, 1, 2, -1, 0xabc};
-
- for (size_t i = 0; i < arraysize(lhs); i++) {
+TEST_F(CodegenTest, NonMaterializedCondition) {
+ for (InstructionSet target_isa : GetTargetISAs()) {
ArenaPool pool;
ArenaAllocator allocator(&pool);
+
HGraph* graph = CreateGraph(&allocator);
+ HBasicBlock* entry = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(entry);
+ graph->SetEntryBlock(entry);
+ entry->AddInstruction(new (&allocator) HGoto());
- HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(entry_block);
- graph->SetEntryBlock(entry_block);
- entry_block->AddInstruction(new (&allocator) HGoto());
- HBasicBlock* code_block = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(code_block);
+ HBasicBlock* first_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(first_block);
+ entry->AddSuccessor(first_block);
+ HIntConstant* constant0 = graph->GetIntConstant(0);
+ HIntConstant* constant1 = graph->GetIntConstant(1);
+ HEqual* equal = new (&allocator) HEqual(constant0, constant0);
+ first_block->AddInstruction(equal);
+ first_block->AddInstruction(new (&allocator) HIf(equal));
+
+ HBasicBlock* then_block = new (&allocator) HBasicBlock(graph);
+ HBasicBlock* else_block = new (&allocator) HBasicBlock(graph);
HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(exit_block);
- exit_block->AddInstruction(new (&allocator) HExit());
-
- entry_block->AddSuccessor(code_block);
- code_block->AddSuccessor(exit_block);
graph->SetExitBlock(exit_block);
- HIntConstant* cst_lhs = graph->GetIntConstant(lhs[i]);
- HIntConstant* cst_rhs = graph->GetIntConstant(rhs[i]);
- HLessThan cmp_lt(cst_lhs, cst_rhs);
- code_block->AddInstruction(&cmp_lt);
- HReturn ret(&cmp_lt);
- code_block->AddInstruction(&ret);
+ graph->AddBlock(then_block);
+ graph->AddBlock(else_block);
+ graph->AddBlock(exit_block);
+ first_block->AddSuccessor(then_block);
+ first_block->AddSuccessor(else_block);
+ then_block->AddSuccessor(exit_block);
+ else_block->AddSuccessor(exit_block);
+
+ exit_block->AddInstruction(new (&allocator) HExit());
+ then_block->AddInstruction(new (&allocator) HReturn(constant0));
+ else_block->AddInstruction(new (&allocator) HReturn(constant1));
+
+ ASSERT_TRUE(equal->NeedsMaterialization());
+ TransformToSsa(graph);
+ PrepareForRegisterAllocation(graph).Run();
+ ASSERT_FALSE(equal->NeedsMaterialization());
auto hook_before_codegen = [](HGraph* graph_in) {
HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0];
@@ -758,93 +682,143 @@
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
- RunCodeOptimized(GetParam(), graph, hook_before_codegen, true, lhs[i] < rhs[i]);
+ RunCode(target_isa, graph, hook_before_codegen, true, 0);
}
}
-TEST_P(CodegenTest, MaterializedCondition2) {
- // Check that HIf correctly interprets a materialized condition.
- // We force the materialization of comparisons for different combinations of
- // inputs. An HIf takes the materialized combination as input and returns a
- // value that we verify.
+TEST_F(CodegenTest, MaterializedCondition1) {
+ for (InstructionSet target_isa : GetTargetISAs()) {
+ // Check that condition are materialized correctly. A materialized condition
+ // should yield `1` if it evaluated to true, and `0` otherwise.
+ // We force the materialization of comparisons for different combinations of
- int lhs[] = {1, 2, -1, 2, 0xabc};
- int rhs[] = {2, 1, 2, -1, 0xabc};
+ // inputs and check the results.
+ int lhs[] = {1, 2, -1, 2, 0xabc};
+ int rhs[] = {2, 1, 2, -1, 0xabc};
- for (size_t i = 0; i < arraysize(lhs); i++) {
- ArenaPool pool;
- ArenaAllocator allocator(&pool);
- HGraph* graph = CreateGraph(&allocator);
+ for (size_t i = 0; i < arraysize(lhs); i++) {
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HGraph* graph = CreateGraph(&allocator);
- HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(entry_block);
- graph->SetEntryBlock(entry_block);
- entry_block->AddInstruction(new (&allocator) HGoto());
+ HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(entry_block);
+ graph->SetEntryBlock(entry_block);
+ entry_block->AddInstruction(new (&allocator) HGoto());
+ HBasicBlock* code_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(code_block);
+ HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(exit_block);
+ exit_block->AddInstruction(new (&allocator) HExit());
- HBasicBlock* if_block = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(if_block);
- HBasicBlock* if_true_block = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(if_true_block);
- HBasicBlock* if_false_block = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(if_false_block);
- HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
- graph->AddBlock(exit_block);
- exit_block->AddInstruction(new (&allocator) HExit());
+ entry_block->AddSuccessor(code_block);
+ code_block->AddSuccessor(exit_block);
+ graph->SetExitBlock(exit_block);
- graph->SetEntryBlock(entry_block);
- entry_block->AddSuccessor(if_block);
- if_block->AddSuccessor(if_true_block);
- if_block->AddSuccessor(if_false_block);
- if_true_block->AddSuccessor(exit_block);
- if_false_block->AddSuccessor(exit_block);
- graph->SetExitBlock(exit_block);
+ HIntConstant* cst_lhs = graph->GetIntConstant(lhs[i]);
+ HIntConstant* cst_rhs = graph->GetIntConstant(rhs[i]);
+ HLessThan cmp_lt(cst_lhs, cst_rhs);
+ code_block->AddInstruction(&cmp_lt);
+ HReturn ret(&cmp_lt);
+ code_block->AddInstruction(&ret);
- HIntConstant* cst_lhs = graph->GetIntConstant(lhs[i]);
- HIntConstant* cst_rhs = graph->GetIntConstant(rhs[i]);
- HLessThan cmp_lt(cst_lhs, cst_rhs);
- if_block->AddInstruction(&cmp_lt);
- // We insert a temporary to separate the HIf from the HLessThan and force
- // the materialization of the condition.
- HTemporary force_materialization(0);
- if_block->AddInstruction(&force_materialization);
- HIf if_lt(&cmp_lt);
- if_block->AddInstruction(&if_lt);
-
- HIntConstant* cst_lt = graph->GetIntConstant(1);
- HReturn ret_lt(cst_lt);
- if_true_block->AddInstruction(&ret_lt);
- HIntConstant* cst_ge = graph->GetIntConstant(0);
- HReturn ret_ge(cst_ge);
- if_false_block->AddInstruction(&ret_ge);
-
- auto hook_before_codegen = [](HGraph* graph_in) {
- HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0];
- HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
- block->InsertInstructionBefore(move, block->GetLastInstruction());
- };
-
- RunCodeOptimized(GetParam(), graph, hook_before_codegen, true, lhs[i] < rhs[i]);
+ TransformToSsa(graph);
+ auto hook_before_codegen = [](HGraph* graph_in) {
+ HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0];
+ HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
+ block->InsertInstructionBefore(move, block->GetLastInstruction());
+ };
+ RunCode(target_isa, graph, hook_before_codegen, true, lhs[i] < rhs[i]);
+ }
}
}
-TEST_P(CodegenTest, ReturnDivIntLit8) {
+TEST_F(CodegenTest, MaterializedCondition2) {
+ for (InstructionSet target_isa : GetTargetISAs()) {
+ // Check that HIf correctly interprets a materialized condition.
+ // We force the materialization of comparisons for different combinations of
+ // inputs. An HIf takes the materialized combination as input and returns a
+ // value that we verify.
+
+ int lhs[] = {1, 2, -1, 2, 0xabc};
+ int rhs[] = {2, 1, 2, -1, 0xabc};
+
+
+ for (size_t i = 0; i < arraysize(lhs); i++) {
+ ArenaPool pool;
+ ArenaAllocator allocator(&pool);
+ HGraph* graph = CreateGraph(&allocator);
+
+ HBasicBlock* entry_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(entry_block);
+ graph->SetEntryBlock(entry_block);
+ entry_block->AddInstruction(new (&allocator) HGoto());
+
+ HBasicBlock* if_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(if_block);
+ HBasicBlock* if_true_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(if_true_block);
+ HBasicBlock* if_false_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(if_false_block);
+ HBasicBlock* exit_block = new (&allocator) HBasicBlock(graph);
+ graph->AddBlock(exit_block);
+ exit_block->AddInstruction(new (&allocator) HExit());
+
+ graph->SetEntryBlock(entry_block);
+ entry_block->AddSuccessor(if_block);
+ if_block->AddSuccessor(if_true_block);
+ if_block->AddSuccessor(if_false_block);
+ if_true_block->AddSuccessor(exit_block);
+ if_false_block->AddSuccessor(exit_block);
+ graph->SetExitBlock(exit_block);
+
+ HIntConstant* cst_lhs = graph->GetIntConstant(lhs[i]);
+ HIntConstant* cst_rhs = graph->GetIntConstant(rhs[i]);
+ HLessThan cmp_lt(cst_lhs, cst_rhs);
+ if_block->AddInstruction(&cmp_lt);
+ // We insert a temporary to separate the HIf from the HLessThan and force
+ // the materialization of the condition.
+ HTemporary force_materialization(0);
+ if_block->AddInstruction(&force_materialization);
+ HIf if_lt(&cmp_lt);
+ if_block->AddInstruction(&if_lt);
+
+ HIntConstant* cst_lt = graph->GetIntConstant(1);
+ HReturn ret_lt(cst_lt);
+ if_true_block->AddInstruction(&ret_lt);
+ HIntConstant* cst_ge = graph->GetIntConstant(0);
+ HReturn ret_ge(cst_ge);
+ if_false_block->AddInstruction(&ret_ge);
+
+ TransformToSsa(graph);
+ auto hook_before_codegen = [](HGraph* graph_in) {
+ HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors()[0];
+ HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
+ block->InsertInstructionBefore(move, block->GetLastInstruction());
+ };
+ RunCode(target_isa, graph, hook_before_codegen, true, lhs[i] < rhs[i]);
+ }
+ }
+}
+
+TEST_F(CodegenTest, ReturnDivIntLit8) {
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::DIV_INT_LIT8, 3 << 8 | 0,
Instruction::RETURN);
- TestCode(GetParam(), data, true, 1);
+ TestCode(data, true, 1);
}
-TEST_P(CodegenTest, ReturnDivInt2Addr) {
+TEST_F(CodegenTest, ReturnDivInt2Addr) {
const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0,
Instruction::CONST_4 | 2 << 12 | 1 << 8,
Instruction::DIV_INT_2ADDR | 1 << 12,
Instruction::RETURN);
- TestCode(GetParam(), data, true, 2);
+ TestCode(data, true, 2);
}
// Helper method.
@@ -933,80 +907,55 @@
block->AddInstruction(comparison);
block->AddInstruction(new (&allocator) HReturn(comparison));
- auto hook_before_codegen = [](HGraph*) {
- };
- RunCodeOptimized(target_isa, graph, hook_before_codegen, true, expected_result);
+ TransformToSsa(graph);
+ RunCode(target_isa, graph, [](HGraph*) {}, true, expected_result);
}
-TEST_P(CodegenTest, ComparisonsInt) {
- const InstructionSet target_isa = GetParam();
- for (int64_t i = -1; i <= 1; i++) {
- for (int64_t j = -1; j <= 1; j++) {
- TestComparison(kCondEQ, i, j, Primitive::kPrimInt, target_isa);
- TestComparison(kCondNE, i, j, Primitive::kPrimInt, target_isa);
- TestComparison(kCondLT, i, j, Primitive::kPrimInt, target_isa);
- TestComparison(kCondLE, i, j, Primitive::kPrimInt, target_isa);
- TestComparison(kCondGT, i, j, Primitive::kPrimInt, target_isa);
- TestComparison(kCondGE, i, j, Primitive::kPrimInt, target_isa);
- TestComparison(kCondB, i, j, Primitive::kPrimInt, target_isa);
- TestComparison(kCondBE, i, j, Primitive::kPrimInt, target_isa);
- TestComparison(kCondA, i, j, Primitive::kPrimInt, target_isa);
- TestComparison(kCondAE, i, j, Primitive::kPrimInt, target_isa);
+TEST_F(CodegenTest, ComparisonsInt) {
+ for (InstructionSet target_isa : GetTargetISAs()) {
+ for (int64_t i = -1; i <= 1; i++) {
+ for (int64_t j = -1; j <= 1; j++) {
+ TestComparison(kCondEQ, i, j, Primitive::kPrimInt, target_isa);
+ TestComparison(kCondNE, i, j, Primitive::kPrimInt, target_isa);
+ TestComparison(kCondLT, i, j, Primitive::kPrimInt, target_isa);
+ TestComparison(kCondLE, i, j, Primitive::kPrimInt, target_isa);
+ TestComparison(kCondGT, i, j, Primitive::kPrimInt, target_isa);
+ TestComparison(kCondGE, i, j, Primitive::kPrimInt, target_isa);
+ TestComparison(kCondB, i, j, Primitive::kPrimInt, target_isa);
+ TestComparison(kCondBE, i, j, Primitive::kPrimInt, target_isa);
+ TestComparison(kCondA, i, j, Primitive::kPrimInt, target_isa);
+ TestComparison(kCondAE, i, j, Primitive::kPrimInt, target_isa);
+ }
}
}
}
-TEST_P(CodegenTest, ComparisonsLong) {
+TEST_F(CodegenTest, ComparisonsLong) {
// TODO: make MIPS work for long
if (kRuntimeISA == kMips || kRuntimeISA == kMips64) {
return;
}
- const InstructionSet target_isa = GetParam();
- if (target_isa == kMips || target_isa == kMips64) {
- return;
- }
+ for (InstructionSet target_isa : GetTargetISAs()) {
+ if (target_isa == kMips || target_isa == kMips64) {
+ continue;
+ }
- for (int64_t i = -1; i <= 1; i++) {
- for (int64_t j = -1; j <= 1; j++) {
- TestComparison(kCondEQ, i, j, Primitive::kPrimLong, target_isa);
- TestComparison(kCondNE, i, j, Primitive::kPrimLong, target_isa);
- TestComparison(kCondLT, i, j, Primitive::kPrimLong, target_isa);
- TestComparison(kCondLE, i, j, Primitive::kPrimLong, target_isa);
- TestComparison(kCondGT, i, j, Primitive::kPrimLong, target_isa);
- TestComparison(kCondGE, i, j, Primitive::kPrimLong, target_isa);
- TestComparison(kCondB, i, j, Primitive::kPrimLong, target_isa);
- TestComparison(kCondBE, i, j, Primitive::kPrimLong, target_isa);
- TestComparison(kCondA, i, j, Primitive::kPrimLong, target_isa);
- TestComparison(kCondAE, i, j, Primitive::kPrimLong, target_isa);
+ for (int64_t i = -1; i <= 1; i++) {
+ for (int64_t j = -1; j <= 1; j++) {
+ TestComparison(kCondEQ, i, j, Primitive::kPrimLong, target_isa);
+ TestComparison(kCondNE, i, j, Primitive::kPrimLong, target_isa);
+ TestComparison(kCondLT, i, j, Primitive::kPrimLong, target_isa);
+ TestComparison(kCondLE, i, j, Primitive::kPrimLong, target_isa);
+ TestComparison(kCondGT, i, j, Primitive::kPrimLong, target_isa);
+ TestComparison(kCondGE, i, j, Primitive::kPrimLong, target_isa);
+ TestComparison(kCondB, i, j, Primitive::kPrimLong, target_isa);
+ TestComparison(kCondBE, i, j, Primitive::kPrimLong, target_isa);
+ TestComparison(kCondA, i, j, Primitive::kPrimLong, target_isa);
+ TestComparison(kCondAE, i, j, Primitive::kPrimLong, target_isa);
+ }
}
}
}
-static ::std::vector<InstructionSet> GetTargetISAs() {
- ::std::vector<InstructionSet> v;
- // Add all ISAs that are executable on hardware or on simulator.
- const ::std::vector<InstructionSet> executable_isa_candidates = {
- kArm,
- kArm64,
- kThumb2,
- kX86,
- kX86_64,
- kMips,
- kMips64
- };
-
- for (auto target_isa : executable_isa_candidates) {
- if (CanExecute(target_isa)) {
- v.push_back(target_isa);
- }
- }
-
- return v;
-}
-
-INSTANTIATE_TEST_CASE_P(MultipleTargets,
- CodegenTest,
- ::testing::ValuesIn(GetTargetISAs()));
-
} // namespace art
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index 9f50d18..3bf3f7f 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -85,9 +85,9 @@
InvokeDexCallingConventionVisitor* calling_convention_visitor) {
if (kIsDebugBuild && invoke->IsInvokeStaticOrDirect()) {
HInvokeStaticOrDirect* invoke_static_or_direct = invoke->AsInvokeStaticOrDirect();
- // When we do not run baseline, explicit clinit checks triggered by static
- // invokes must have been pruned by art::PrepareForRegisterAllocation.
- DCHECK(codegen->IsBaseline() || !invoke_static_or_direct->IsStaticWithExplicitClinitCheck());
+ // Explicit clinit checks triggered by static invokes must have been
+ // pruned by art::PrepareForRegisterAllocation.
+ DCHECK(!invoke_static_or_direct->IsStaticWithExplicitClinitCheck());
}
if (invoke->GetNumberOfArguments() == 0) {
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index bb840ea..eeffe93 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -305,30 +305,19 @@
SHARED_REQUIRES(Locks::mutator_lock_);
private:
- // Whether we should run any optimization or register allocation. If false, will
- // just run the code generation after the graph was built.
- const bool run_optimizations_;
-
// Create a 'CompiledMethod' for an optimized graph.
- CompiledMethod* EmitOptimized(ArenaAllocator* arena,
- CodeVectorAllocator* code_allocator,
- CodeGenerator* codegen,
- CompilerDriver* driver) const;
-
- // Create a 'CompiledMethod' for a non-optimized graph.
- CompiledMethod* EmitBaseline(ArenaAllocator* arena,
- CodeVectorAllocator* code_allocator,
- CodeGenerator* codegen,
- CompilerDriver* driver) const;
+ CompiledMethod* Emit(ArenaAllocator* arena,
+ CodeVectorAllocator* code_allocator,
+ CodeGenerator* codegen,
+ CompilerDriver* driver) const;
// Try compiling a method and return the code generator used for
// compiling it.
// This method:
// 1) Builds the graph. Returns null if it failed to build it.
- // 2) If `run_optimizations_` is set:
- // 2.1) Transform the graph to SSA. Returns null if it failed.
- // 2.2) Run optimizations on the graph, including register allocator.
- // 3) Generate code with the `code_allocator` provided.
+ // 2) Transforms the graph to SSA. Returns null if it failed.
+ // 3) Runs optimizations on the graph, including register allocator.
+ // 4) Generates code with the `code_allocator` provided.
CodeGenerator* TryCompile(ArenaAllocator* arena,
CodeVectorAllocator* code_allocator,
const DexFile::CodeItem* code_item,
@@ -350,9 +339,7 @@
static const int kMaximumCompilationTimeBeforeWarning = 100; /* ms */
OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver)
- : Compiler(driver, kMaximumCompilationTimeBeforeWarning),
- run_optimizations_(
- driver->GetCompilerOptions().GetCompilerFilter() != CompilerOptions::kTime) {}
+ : Compiler(driver, kMaximumCompilationTimeBeforeWarning) {}
void OptimizingCompiler::Init() {
// Enable C1visualizer output. Must be done in Init() because the compiler
@@ -577,17 +564,6 @@
AllocateRegisters(graph, codegen, pass_observer);
}
-// The stack map we generate must be 4-byte aligned on ARM. Since existing
-// maps are generated alongside these stack maps, we must also align them.
-static ArrayRef<const uint8_t> AlignVectorSize(ArenaVector<uint8_t>& vector) {
- size_t size = vector.size();
- size_t aligned_size = RoundUp(size, 4);
- for (; size < aligned_size; ++size) {
- vector.push_back(0);
- }
- return ArrayRef<const uint8_t>(vector);
-}
-
static ArenaVector<LinkerPatch> EmitAndSortLinkerPatches(CodeGenerator* codegen) {
ArenaVector<LinkerPatch> linker_patches(codegen->GetGraph()->GetArena()->Adapter());
codegen->EmitLinkerPatches(&linker_patches);
@@ -601,10 +577,10 @@
return linker_patches;
}
-CompiledMethod* OptimizingCompiler::EmitOptimized(ArenaAllocator* arena,
- CodeVectorAllocator* code_allocator,
- CodeGenerator* codegen,
- CompilerDriver* compiler_driver) const {
+CompiledMethod* OptimizingCompiler::Emit(ArenaAllocator* arena,
+ CodeVectorAllocator* code_allocator,
+ CodeGenerator* codegen,
+ CompilerDriver* compiler_driver) const {
ArenaVector<LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen);
ArenaVector<uint8_t> stack_map(arena->Adapter(kArenaAllocStackMaps));
stack_map.resize(codegen->ComputeStackMapsSize());
@@ -630,39 +606,6 @@
return compiled_method;
}
-CompiledMethod* OptimizingCompiler::EmitBaseline(
- ArenaAllocator* arena,
- CodeVectorAllocator* code_allocator,
- CodeGenerator* codegen,
- CompilerDriver* compiler_driver) const {
- ArenaVector<LinkerPatch> linker_patches = EmitAndSortLinkerPatches(codegen);
-
- ArenaVector<uint8_t> mapping_table(arena->Adapter(kArenaAllocBaselineMaps));
- codegen->BuildMappingTable(&mapping_table);
- ArenaVector<uint8_t> vmap_table(arena->Adapter(kArenaAllocBaselineMaps));
- codegen->BuildVMapTable(&vmap_table);
- ArenaVector<uint8_t> gc_map(arena->Adapter(kArenaAllocBaselineMaps));
- codegen->BuildNativeGCMap(&gc_map, *compiler_driver);
-
- CompiledMethod* compiled_method = CompiledMethod::SwapAllocCompiledMethod(
- compiler_driver,
- codegen->GetInstructionSet(),
- ArrayRef<const uint8_t>(code_allocator->GetMemory()),
- // Follow Quick's behavior and set the frame size to zero if it is
- // considered "empty" (see the definition of
- // art::CodeGenerator::HasEmptyFrame).
- codegen->HasEmptyFrame() ? 0 : codegen->GetFrameSize(),
- codegen->GetCoreSpillMask(),
- codegen->GetFpuSpillMask(),
- ArrayRef<const SrcMapElem>(),
- AlignVectorSize(mapping_table),
- AlignVectorSize(vmap_table),
- AlignVectorSize(gc_map),
- ArrayRef<const uint8_t>(*codegen->GetAssembler()->cfi().data()),
- ArrayRef<const LinkerPatch>(linker_patches));
- return compiled_method;
-}
-
CodeGenerator* OptimizingCompiler::TryCompile(ArenaAllocator* arena,
CodeVectorAllocator* code_allocator,
const DexFile::CodeItem* code_item,
@@ -775,41 +718,37 @@
VLOG(compiler) << "Optimizing " << pass_observer.GetMethodName();
- if (run_optimizations_) {
- ScopedObjectAccess soa(Thread::Current());
- StackHandleScopeCollection handles(soa.Self());
- ScopedThreadSuspension sts(soa.Self(), kNative);
+ ScopedObjectAccess soa(Thread::Current());
+ StackHandleScopeCollection handles(soa.Self());
+ ScopedThreadSuspension sts(soa.Self(), kNative);
- {
- PassScope scope(SsaBuilder::kSsaBuilderPassName, &pass_observer);
- GraphAnalysisResult result = graph->TryBuildingSsa(&handles);
- if (result != kAnalysisSuccess) {
- switch (result) {
- case kAnalysisFailThrowCatchLoop:
- MaybeRecordStat(MethodCompilationStat::kNotCompiledThrowCatchLoop);
- break;
- case kAnalysisFailAmbiguousArrayOp:
- MaybeRecordStat(MethodCompilationStat::kNotCompiledAmbiguousArrayOp);
- break;
- case kAnalysisSuccess:
- UNREACHABLE();
- }
- pass_observer.SetGraphInBadState();
- return nullptr;
+ {
+ PassScope scope(SsaBuilder::kSsaBuilderPassName, &pass_observer);
+ GraphAnalysisResult result = graph->TryBuildingSsa(&handles);
+ if (result != kAnalysisSuccess) {
+ switch (result) {
+ case kAnalysisFailThrowCatchLoop:
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledThrowCatchLoop);
+ break;
+ case kAnalysisFailAmbiguousArrayOp:
+ MaybeRecordStat(MethodCompilationStat::kNotCompiledAmbiguousArrayOp);
+ break;
+ case kAnalysisSuccess:
+ UNREACHABLE();
}
+ pass_observer.SetGraphInBadState();
+ return nullptr;
}
-
- RunOptimizations(graph,
- codegen.get(),
- compiler_driver,
- compilation_stats_.get(),
- dex_compilation_unit,
- &pass_observer,
- &handles);
- codegen->CompileOptimized(code_allocator);
- } else {
- codegen->CompileBaseline(code_allocator);
}
+
+ RunOptimizations(graph,
+ codegen.get(),
+ compiler_driver,
+ compilation_stats_.get(),
+ dex_compilation_unit,
+ &pass_observer,
+ &handles);
+ codegen->Compile(code_allocator);
pass_observer.DumpDisassembly();
if (kArenaAllocatorCountAllocations) {
@@ -861,11 +800,7 @@
dex_cache));
if (codegen.get() != nullptr) {
MaybeRecordStat(MethodCompilationStat::kCompiled);
- if (run_optimizations_) {
- method = EmitOptimized(&arena, &code_allocator, codegen.get(), compiler_driver);
- } else {
- method = EmitBaseline(&arena, &code_allocator, codegen.get(), compiler_driver);
- }
+ method = Emit(&arena, &code_allocator, codegen.get(), compiler_driver);
}
} else {
if (compiler_driver->GetCompilerOptions().VerifyAtRuntime()) {
@@ -928,8 +863,6 @@
{
// Go to native so that we don't block GC during compilation.
ScopedThreadSuspension sts(self, kNative);
-
- DCHECK(run_optimizations_);
codegen.reset(
TryCompile(&arena,
&code_allocator,
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 2bae4bc..a966b62 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -72,8 +72,7 @@
float_spill_slots_.reserve(kDefaultNumberOfSpillSlots);
double_spill_slots_.reserve(kDefaultNumberOfSpillSlots);
- static constexpr bool kIsBaseline = false;
- codegen->SetupBlockedRegisters(kIsBaseline);
+ codegen->SetupBlockedRegisters();
physical_core_register_intervals_.resize(codegen->GetNumberOfCoreRegisters(), nullptr);
physical_fp_register_intervals_.resize(codegen->GetNumberOfFloatingPointRegisters(), nullptr);
// Always reserve for the current method and the graph's max out registers.