summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/builder.cc33
-rw-r--r--compiler/optimizing/code_generator.cc4
-rw-r--r--compiler/optimizing/code_generator.h16
-rw-r--r--compiler/optimizing/code_generator_arm.cc183
-rw-r--r--compiler/optimizing/code_generator_arm.h2
-rw-r--r--compiler/optimizing/code_generator_arm64.cc58
-rw-r--r--compiler/optimizing/code_generator_arm64.h4
-rw-r--r--compiler/optimizing/code_generator_x86.cc171
-rw-r--r--compiler/optimizing/code_generator_x86.h2
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc153
-rw-r--r--compiler/optimizing/code_generator_x86_64.h2
-rw-r--r--compiler/optimizing/codegen_test.cc26
-rw-r--r--compiler/optimizing/constant_folding.cc6
-rw-r--r--compiler/optimizing/gvn.cc5
-rw-r--r--compiler/optimizing/gvn.h4
-rw-r--r--compiler/optimizing/locations.h44
-rw-r--r--compiler/optimizing/nodes.cc12
-rw-r--r--compiler/optimizing/nodes.h143
-rw-r--r--compiler/optimizing/optimizing_compiler.cc5
-rw-r--r--compiler/optimizing/optimizing_unit_test.h2
-rw-r--r--compiler/optimizing/parallel_move_resolver.cc6
-rw-r--r--compiler/optimizing/parallel_move_test.cc4
-rw-r--r--compiler/optimizing/prepare_for_register_allocation.cc14
-rw-r--r--compiler/optimizing/register_allocator.cc133
-rw-r--r--compiler/optimizing/register_allocator.h1
-rw-r--r--compiler/optimizing/register_allocator_test.cc11
-rw-r--r--compiler/optimizing/ssa_builder.cc4
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.cc26
-rw-r--r--compiler/optimizing/ssa_liveness_analysis.h66
-rw-r--r--compiler/optimizing/ssa_phi_elimination.cc22
-rw-r--r--compiler/optimizing/stack_map_stream.h28
31 files changed, 896 insertions, 294 deletions
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index e4ccd9651b..d168fc80f1 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -505,11 +505,11 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
}
HLoadClass* constant = new (arena_) HLoadClass(
- storage_index, is_referrers_class, is_initialized, dex_offset);
+ storage_index, is_referrers_class, dex_offset);
current_block_->AddInstruction(constant);
HInstruction* cls = constant;
- if (constant->NeedsInitialization()) {
+ if (!is_initialized) {
cls = new (arena_) HClinitCheck(constant, dex_offset);
current_block_->AddInstruction(cls);
}
@@ -1173,6 +1173,35 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
break;
}
+ case Instruction::CONST_STRING: {
+ current_block_->AddInstruction(new (arena_) HLoadString(instruction.VRegB_21c(), dex_offset));
+ UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
+ break;
+ }
+
+ case Instruction::CONST_STRING_JUMBO: {
+ current_block_->AddInstruction(new (arena_) HLoadString(instruction.VRegB_31c(), dex_offset));
+ UpdateLocal(instruction.VRegA_31c(), current_block_->GetLastInstruction());
+ break;
+ }
+
+ case Instruction::CONST_CLASS: {
+ uint16_t type_index = instruction.VRegB_21c();
+ bool type_known_final;
+ bool type_known_abstract;
+ bool is_referrers_class;
+ bool can_access = compiler_driver_->CanAccessTypeWithoutChecks(
+ dex_compilation_unit_->GetDexMethodIndex(), *dex_file_, type_index,
+ &type_known_final, &type_known_abstract, &is_referrers_class);
+ if (!can_access) {
+ return false;
+ }
+ current_block_->AddInstruction(
+ new (arena_) HLoadClass(instruction.VRegB_21c(), is_referrers_class, dex_offset));
+ UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
+ break;
+ }
+
default:
return false;
}
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index c61e991956..ac72a333c3 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -122,8 +122,8 @@ size_t CodeGenerator::FindFreeEntry(bool* array, size_t length) {
return -1;
}
-size_t CodeGenerator::FindTwoFreeConsecutiveEntries(bool* array, size_t length) {
- for (size_t i = 0; i < length - 1; ++i) {
+size_t CodeGenerator::FindTwoFreeConsecutiveAlignedEntries(bool* array, size_t length) {
+ for (size_t i = 0; i < length - 1; i += 2) {
if (!array[i] && !array[i + 1]) {
array[i] = true;
array[i + 1] = true;
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index bf9d2c0c12..01c5cc9637 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -51,7 +51,7 @@ struct PcInfo {
uintptr_t native_pc;
};
-class SlowPathCode : public ArenaObject {
+class SlowPathCode : public ArenaObject<kArenaAllocSlowPaths> {
public:
SlowPathCode() {}
virtual ~SlowPathCode() {}
@@ -62,7 +62,7 @@ class SlowPathCode : public ArenaObject {
DISALLOW_COPY_AND_ASSIGN(SlowPathCode);
};
-class CodeGenerator : public ArenaObject {
+class CodeGenerator : public ArenaObject<kArenaAllocMisc> {
public:
// Compiles the graph to executable instructions. Returns whether the compilation
// succeeded.
@@ -115,12 +115,14 @@ class CodeGenerator : public ArenaObject {
// Restores the register from the stack. Returns the size taken on stack.
virtual size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) = 0;
virtual size_t SaveFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
- LOG(FATAL) << "Unimplemented";
- return 0u;
+ UNUSED(stack_index, reg_id);
+ UNIMPLEMENTED(FATAL);
+ UNREACHABLE();
}
virtual size_t RestoreFloatingPointRegister(size_t stack_index, uint32_t reg_id) {
- LOG(FATAL) << "Unimplemented";
- return 0u;
+ UNUSED(stack_index, reg_id);
+ UNIMPLEMENTED(FATAL);
+ UNREACHABLE();
}
void RecordPcInfo(HInstruction* instruction, uint32_t dex_pc);
@@ -190,7 +192,7 @@ class CodeGenerator : public ArenaObject {
virtual Location AllocateFreeRegister(Primitive::Type type) const = 0;
static size_t FindFreeEntry(bool* array, size_t length);
- static size_t FindTwoFreeConsecutiveEntries(bool* array, size_t length);
+ static size_t FindTwoFreeConsecutiveAlignedEntries(bool* array, size_t length);
virtual Location GetStackLocation(HLoadLocal* load) const = 0;
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index a06860a5b6..6e6d64cbfc 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -170,33 +170,89 @@ class BoundsCheckSlowPathARM : public SlowPathCodeARM {
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM);
};
-class ClinitCheckSlowPathARM : public SlowPathCodeARM {
+class LoadClassSlowPathARM : public SlowPathCodeARM {
public:
- explicit ClinitCheckSlowPathARM(HClinitCheck* instruction) : instruction_(instruction) {}
+ LoadClassSlowPathARM(HLoadClass* cls,
+ HInstruction* at,
+ uint32_t dex_pc,
+ bool do_clinit)
+ : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+ }
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = at_->GetLocations();
+
CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
__ Bind(GetEntryLabel());
- codegen->SaveLiveRegisters(instruction_->GetLocations());
+ codegen->SaveLiveRegisters(locations);
- HLoadClass* cls = instruction_->GetLoadClass();
InvokeRuntimeCallingConvention calling_convention;
- __ LoadImmediate(calling_convention.GetRegisterAt(0), cls->GetTypeIndex());
+ __ LoadImmediate(calling_convention.GetRegisterAt(0), cls_->GetTypeIndex());
arm_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
+ int32_t entry_point_offset = do_clinit_
+ ? QUICK_ENTRY_POINT(pInitializeStaticStorage)
+ : QUICK_ENTRY_POINT(pInitializeType);
+ arm_codegen->InvokeRuntime(entry_point_offset, at_, dex_pc_);
+
+ // Move the class to the desired location.
+ if (locations->Out().IsValid()) {
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
+ }
+ codegen->RestoreLiveRegisters(locations);
+ __ b(GetExitLabel());
+ }
+
+ private:
+ // The class this slow path will load.
+ HLoadClass* const cls_;
+
+ // The instruction where this slow path is happening.
+ // (Might be the load class or an initialization check).
+ HInstruction* const at_;
+
+ // The dex PC of `at_`.
+ const uint32_t dex_pc_;
+
+ // Whether to initialize the class.
+ const bool do_clinit_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathARM);
+};
+
+class LoadStringSlowPathARM : public SlowPathCodeARM {
+ public:
+ explicit LoadStringSlowPathARM(HLoadString* instruction) : instruction_(instruction) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+
+ CodeGeneratorARM* arm_codegen = down_cast<CodeGeneratorARM*>(codegen);
+ __ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ arm_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(0));
+ __ LoadImmediate(calling_convention.GetRegisterAt(1), instruction_->GetStringIndex());
arm_codegen->InvokeRuntime(
- QUICK_ENTRY_POINT(pInitializeStaticStorage), instruction_, instruction_->GetDexPc());
- arm_codegen->Move32(instruction_->GetLocations()->InAt(0), Location::RegisterLocation(R0));
- codegen->RestoreLiveRegisters(instruction_->GetLocations());
+ QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc());
+ arm_codegen->Move32(locations->Out(), Location::RegisterLocation(R0));
+
+ codegen->RestoreLiveRegisters(locations);
__ b(GetExitLabel());
}
private:
- HClinitCheck* const instruction_;
+ HLoadString* const instruction_;
- DISALLOW_COPY_AND_ASSIGN(ClinitCheckSlowPathARM);
+ DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM);
};
#undef __
+
+#undef __
#define __ reinterpret_cast<ArmAssembler*>(GetAssembler())->
inline Condition ARMCondition(IfCondition cond) {
@@ -296,7 +352,8 @@ Location CodeGeneratorARM::AllocateFreeRegister(Primitive::Type type) const {
}
case Primitive::kPrimDouble: {
- int reg = FindTwoFreeConsecutiveEntries(blocked_fpu_registers_, kNumberOfSRegisters);
+ int reg = FindTwoFreeConsecutiveAlignedEntries(blocked_fpu_registers_, kNumberOfSRegisters);
+ DCHECK_EQ(reg % 2, 0);
return Location::FpuRegisterPairLocation(reg, reg + 1);
}
@@ -341,6 +398,14 @@ void CodeGeneratorARM::SetupBlockedRegisters() const {
blocked_fpu_registers_[S21] = true;
blocked_fpu_registers_[S22] = true;
blocked_fpu_registers_[S23] = true;
+ blocked_fpu_registers_[S24] = true;
+ blocked_fpu_registers_[S25] = true;
+ blocked_fpu_registers_[S26] = true;
+ blocked_fpu_registers_[S27] = true;
+ blocked_fpu_registers_[S28] = true;
+ blocked_fpu_registers_[S29] = true;
+ blocked_fpu_registers_[S30] = true;
+ blocked_fpu_registers_[S31] = true;
UpdateBlockedPairRegisters();
}
@@ -446,7 +511,7 @@ Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type
calling_convention.GetRegisterPairAt(index));
return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
} else if (index + 1 == calling_convention.GetNumberOfRegisters()) {
- return Location::QuickParameter(stack_index);
+ return Location::QuickParameter(index, stack_index);
} else {
return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(stack_index));
}
@@ -561,12 +626,13 @@ void CodeGeneratorARM::Move64(Location destination, Location source) {
} else if (source.IsFpuRegister()) {
UNIMPLEMENTED(FATAL);
} else if (source.IsQuickParameter()) {
- uint32_t argument_index = source.GetQuickParameterIndex();
+ uint16_t register_index = source.GetQuickParameterRegisterIndex();
+ uint16_t stack_index = source.GetQuickParameterStackIndex();
InvokeDexCallingConvention calling_convention;
__ Mov(destination.AsRegisterPairLow<Register>(),
- calling_convention.GetRegisterAt(argument_index));
+ calling_convention.GetRegisterAt(register_index));
__ LoadFromOffset(kLoadWord, destination.AsRegisterPairHigh<Register>(),
- SP, calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize());
+ SP, calling_convention.GetStackOffsetOf(stack_index + 1) + GetFrameSize());
} else {
DCHECK(source.IsDoubleStackSlot());
if (destination.AsRegisterPairLow<Register>() == R1) {
@@ -588,20 +654,21 @@ void CodeGeneratorARM::Move64(Location destination, Location source) {
}
} else if (destination.IsQuickParameter()) {
InvokeDexCallingConvention calling_convention;
- uint32_t argument_index = destination.GetQuickParameterIndex();
+ uint16_t register_index = destination.GetQuickParameterRegisterIndex();
+ uint16_t stack_index = destination.GetQuickParameterStackIndex();
if (source.IsRegisterPair()) {
- __ Mov(calling_convention.GetRegisterAt(argument_index),
+ __ Mov(calling_convention.GetRegisterAt(register_index),
source.AsRegisterPairLow<Register>());
__ StoreToOffset(kStoreWord, source.AsRegisterPairHigh<Register>(),
- SP, calling_convention.GetStackOffsetOf(argument_index + 1));
+ SP, calling_convention.GetStackOffsetOf(stack_index + 1));
} else if (source.IsFpuRegister()) {
UNIMPLEMENTED(FATAL);
} else {
DCHECK(source.IsDoubleStackSlot());
__ LoadFromOffset(
- kLoadWord, calling_convention.GetRegisterAt(argument_index), SP, source.GetStackIndex());
+ kLoadWord, calling_convention.GetRegisterAt(register_index), SP, source.GetStackIndex());
__ LoadFromOffset(kLoadWord, R0, SP, source.GetHighStackIndex(kArmWordSize));
- __ StoreToOffset(kStoreWord, R0, SP, calling_convention.GetStackOffsetOf(argument_index + 1));
+ __ StoreToOffset(kStoreWord, R0, SP, calling_convention.GetStackOffsetOf(stack_index + 1));
}
} else {
DCHECK(destination.IsDoubleStackSlot());
@@ -616,11 +683,12 @@ void CodeGeneratorARM::Move64(Location destination, Location source) {
}
} else if (source.IsQuickParameter()) {
InvokeDexCallingConvention calling_convention;
- uint32_t argument_index = source.GetQuickParameterIndex();
- __ StoreToOffset(kStoreWord, calling_convention.GetRegisterAt(argument_index),
+ uint16_t register_index = source.GetQuickParameterRegisterIndex();
+ uint16_t stack_index = source.GetQuickParameterStackIndex();
+ __ StoreToOffset(kStoreWord, calling_convention.GetRegisterAt(register_index),
SP, destination.GetStackIndex());
__ LoadFromOffset(kLoadWord, R0,
- SP, calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize());
+ SP, calling_convention.GetStackOffsetOf(stack_index + 1) + GetFrameSize());
__ StoreToOffset(kStoreWord, R0, SP, destination.GetHighStackIndex(kArmWordSize));
} else if (source.IsFpuRegisterPair()) {
__ StoreDToOffset(FromLowSToD(source.AsFpuRegisterPairLow<SRegister>()),
@@ -751,6 +819,7 @@ void LocationsBuilderARM::VisitExit(HExit* exit) {
}
void InstructionCodeGeneratorARM::VisitExit(HExit* exit) {
+ UNUSED(exit);
if (kIsDebugBuild) {
__ Comment("Unreachable");
__ bkpt(0);
@@ -916,6 +985,7 @@ void LocationsBuilderARM::VisitLoadLocal(HLoadLocal* load) {
void InstructionCodeGeneratorARM::VisitLoadLocal(HLoadLocal* load) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(load);
}
void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
@@ -943,6 +1013,7 @@ void LocationsBuilderARM::VisitStoreLocal(HStoreLocal* store) {
}
void InstructionCodeGeneratorARM::VisitStoreLocal(HStoreLocal* store) {
+ UNUSED(store);
}
void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
@@ -953,6 +1024,7 @@ void LocationsBuilderARM::VisitIntConstant(HIntConstant* constant) {
void InstructionCodeGeneratorARM::VisitIntConstant(HIntConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
@@ -963,6 +1035,7 @@ void LocationsBuilderARM::VisitLongConstant(HLongConstant* constant) {
void InstructionCodeGeneratorARM::VisitLongConstant(HLongConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
@@ -973,6 +1046,7 @@ void LocationsBuilderARM::VisitFloatConstant(HFloatConstant* constant) {
void InstructionCodeGeneratorARM::VisitFloatConstant(HFloatConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
@@ -983,6 +1057,7 @@ void LocationsBuilderARM::VisitDoubleConstant(HDoubleConstant* constant) {
void InstructionCodeGeneratorARM::VisitDoubleConstant(HDoubleConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
@@ -990,6 +1065,7 @@ void LocationsBuilderARM::VisitReturnVoid(HReturnVoid* ret) {
}
void InstructionCodeGeneratorARM::VisitReturnVoid(HReturnVoid* ret) {
+ UNUSED(ret);
codegen_->GenerateFrameExit();
}
@@ -1000,6 +1076,7 @@ void LocationsBuilderARM::VisitReturn(HReturn* ret) {
}
void InstructionCodeGeneratorARM::VisitReturn(HReturn* ret) {
+ UNUSED(ret);
codegen_->GenerateFrameExit();
}
@@ -1465,6 +1542,7 @@ void LocationsBuilderARM::VisitParameterValue(HParameterValue* instruction) {
void InstructionCodeGeneratorARM::VisitParameterValue(HParameterValue* instruction) {
// Nothing to do, the parameter is already at its location.
+ UNUSED(instruction);
}
void LocationsBuilderARM::VisitNot(HNot* not_) {
@@ -1508,7 +1586,6 @@ void LocationsBuilderARM::VisitCompare(HCompare* compare) {
}
void InstructionCodeGeneratorARM::VisitCompare(HCompare* compare) {
- Label greater, done;
LocationSummary* locations = compare->GetLocations();
switch (compare->InputAt(0)->GetType()) {
case Primitive::kPrimLong: {
@@ -1553,6 +1630,7 @@ void LocationsBuilderARM::VisitPhi(HPhi* instruction) {
}
void InstructionCodeGeneratorARM::VisitPhi(HPhi* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
@@ -1955,9 +2033,11 @@ void LocationsBuilderARM::VisitTemporary(HTemporary* temp) {
void InstructionCodeGeneratorARM::VisitTemporary(HTemporary* temp) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(temp);
}
void LocationsBuilderARM::VisitParallelMove(HParallelMove* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
@@ -2087,21 +2167,38 @@ void ParallelMoveResolverARM::RestoreScratch(int reg) {
}
void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
+ LocationSummary::CallKind call_kind = cls->CanCallRuntime()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(cls, LocationSummary::kNoCall);
+ new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
locations->SetOut(Location::RequiresRegister());
}
void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
Register out = cls->GetLocations()->Out().As<Register>();
if (cls->IsReferrersClass()) {
+ DCHECK(!cls->CanCallRuntime());
+ DCHECK(!cls->MustGenerateClinitCheck());
codegen_->LoadCurrentMethod(out);
__ LoadFromOffset(kLoadWord, out, out, mirror::ArtMethod::DeclaringClassOffset().Int32Value());
} else {
+ DCHECK(cls->CanCallRuntime());
codegen_->LoadCurrentMethod(out);
__ LoadFromOffset(
kLoadWord, out, out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value());
__ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
+
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
+ cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+ codegen_->AddSlowPath(slow_path);
+ __ cmp(out, ShifterOperand(0));
+ __ b(slow_path->GetEntryLabel(), EQ);
+ if (cls->MustGenerateClinitCheck()) {
+ GenerateClassInitializationCheck(slow_path, out);
+ } else {
+ __ Bind(slow_path->GetExitLabel());
+ }
}
}
@@ -2115,17 +2212,15 @@ void LocationsBuilderARM::VisitClinitCheck(HClinitCheck* check) {
}
void InstructionCodeGeneratorARM::VisitClinitCheck(HClinitCheck* check) {
- SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) ClinitCheckSlowPathARM(check);
+ // We assume the class is not null.
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
+ check->GetLoadClass(), check, check->GetDexPc(), true);
codegen_->AddSlowPath(slow_path);
+ GenerateClassInitializationCheck(slow_path, check->GetLocations()->InAt(0).As<Register>());
+}
- LocationSummary* locations = check->GetLocations();
- // We remove the class as a live register, we know it's null or unused in the slow path.
- RegisterSet* register_set = locations->GetLiveRegisters();
- register_set->Remove(locations->InAt(0));
-
- Register class_reg = locations->InAt(0).As<Register>();
- __ cmp(class_reg, ShifterOperand(0));
- __ b(slow_path->GetEntryLabel(), EQ);
+void InstructionCodeGeneratorARM::GenerateClassInitializationCheck(
+ SlowPathCodeARM* slow_path, Register class_reg) {
__ LoadFromOffset(kLoadWord, IP, class_reg, mirror::Class::StatusOffset().Int32Value());
__ cmp(IP, ShifterOperand(mirror::Class::kStatusInitialized));
__ b(slow_path->GetEntryLabel(), LT);
@@ -2258,5 +2353,25 @@ void InstructionCodeGeneratorARM::VisitStaticFieldSet(HStaticFieldSet* instructi
}
}
+void LocationsBuilderARM::VisitLoadString(HLoadString* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
+ SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load);
+ codegen_->AddSlowPath(slow_path);
+
+ Register out = load->GetLocations()->Out().As<Register>();
+ codegen_->LoadCurrentMethod(out);
+ __ LoadFromOffset(
+ kLoadWord, out, out, mirror::ArtMethod::DexCacheStringsOffset().Int32Value());
+ __ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
+ __ cmp(out, ShifterOperand(0));
+ __ b(slow_path->GetEntryLabel(), EQ);
+ __ Bind(slow_path->GetExitLabel());
+}
+
} // namespace arm
} // namespace art
diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h
index c65b42649e..5076a4bc38 100644
--- a/compiler/optimizing/code_generator_arm.h
+++ b/compiler/optimizing/code_generator_arm.h
@@ -26,6 +26,7 @@ namespace art {
namespace arm {
class CodeGeneratorARM;
+class SlowPathCodeARM;
static constexpr size_t kArmWordSize = 4;
@@ -131,6 +132,7 @@ class InstructionCodeGeneratorARM : public HGraphVisitor {
// is the block to branch to if the suspend check is not needed, and after
// the suspend call.
void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
+ void GenerateClassInitializationCheck(SlowPathCodeARM* slow_path, Register class_reg);
ArmAssembler* const assembler_;
CodeGeneratorARM* const codegen_;
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index fe999c2be0..90d7c35975 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -538,8 +538,8 @@ InstructionCodeGeneratorARM64::InstructionCodeGeneratorARM64(HGraph* graph,
M(DoubleConstant) \
M(Div) \
M(FloatConstant) \
- M(Mul) \
M(LoadClass) \
+ M(LoadString) \
M(Neg) \
M(NewArray) \
M(ParallelMove) \
@@ -556,6 +556,7 @@ enum UnimplementedInstructionBreakCode {
#define DEFINE_UNIMPLEMENTED_INSTRUCTION_VISITORS(name) \
void InstructionCodeGeneratorARM64::Visit##name(H##name* instr) { \
+ UNUSED(instr); \
__ Brk(UNIMPLEMENTED_INSTRUCTION_BREAK_CODE(name)); \
} \
void LocationsBuilderARM64::Visit##name(H##name* instr) { \
@@ -711,6 +712,7 @@ void LocationsBuilderARM64::VisitExit(HExit* exit) {
}
void InstructionCodeGeneratorARM64::VisitExit(HExit* exit) {
+ UNUSED(exit);
if (kIsDebugBuild) {
down_cast<Arm64Assembler*>(GetAssembler())->Comment("Unreachable");
__ Brk(0); // TODO: Introduce special markers for such code locations.
@@ -758,16 +760,16 @@ void InstructionCodeGeneratorARM64::VisitIf(HIf* if_instr) {
// the comparison and its condition as the branch condition.
Register lhs = InputRegisterAt(condition, 0);
Operand rhs = InputOperandAt(condition, 1);
- Condition cond = ARM64Condition(condition->GetCondition());
- if ((cond == eq || cond == ne) && rhs.IsImmediate() && (rhs.immediate() == 0)) {
- if (cond == eq) {
+ Condition arm64_cond = ARM64Condition(condition->GetCondition());
+ if ((arm64_cond == eq || arm64_cond == ne) && rhs.IsImmediate() && (rhs.immediate() == 0)) {
+ if (arm64_cond == eq) {
__ Cbz(lhs, true_target);
} else {
__ Cbnz(lhs, true_target);
}
} else {
__ Cmp(lhs, rhs);
- __ B(cond, true_target);
+ __ B(arm64_cond, true_target);
}
}
@@ -877,6 +879,7 @@ void LocationsBuilderARM64::VisitIntConstant(HIntConstant* constant) {
void InstructionCodeGeneratorARM64::VisitIntConstant(HIntConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderARM64::VisitInvokeStatic(HInvokeStatic* invoke) {
@@ -967,6 +970,7 @@ void LocationsBuilderARM64::VisitLoadLocal(HLoadLocal* load) {
void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(load);
}
void LocationsBuilderARM64::VisitLocal(HLocal* local) {
@@ -984,6 +988,45 @@ void LocationsBuilderARM64::VisitLongConstant(HLongConstant* constant) {
void InstructionCodeGeneratorARM64::VisitLongConstant(HLongConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
+}
+
+void LocationsBuilderARM64::VisitMul(HMul* mul) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(mul, LocationSummary::kNoCall);
+ switch (mul->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ locations->SetInAt(0, Location::RequiresRegister());
+ locations->SetInAt(1, Location::RequiresRegister());
+ locations->SetOut(Location::RequiresRegister());
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Unimplemented mul type " << mul->GetResultType();
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ }
+}
+
+void InstructionCodeGeneratorARM64::VisitMul(HMul* mul) {
+ switch (mul->GetResultType()) {
+ case Primitive::kPrimInt:
+ case Primitive::kPrimLong:
+ __ Mul(OutputRegister(mul), InputRegisterAt(mul, 0), InputRegisterAt(mul, 1));
+ break;
+
+ case Primitive::kPrimFloat:
+ case Primitive::kPrimDouble:
+ LOG(FATAL) << "Unimplemented mul type " << mul->GetResultType();
+ break;
+
+ default:
+ LOG(FATAL) << "Unexpected mul type " << mul->GetResultType();
+ }
}
void LocationsBuilderARM64::VisitNewInstance(HNewInstance* instruction) {
@@ -1071,6 +1114,7 @@ void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) {
void InstructionCodeGeneratorARM64::VisitParameterValue(HParameterValue* instruction) {
// Nothing to do, the parameter is already at its location.
+ UNUSED(instruction);
}
void LocationsBuilderARM64::VisitPhi(HPhi* instruction) {
@@ -1082,6 +1126,7 @@ void LocationsBuilderARM64::VisitPhi(HPhi* instruction) {
}
void InstructionCodeGeneratorARM64::VisitPhi(HPhi* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
@@ -1126,6 +1171,7 @@ void LocationsBuilderARM64::VisitReturnVoid(HReturnVoid* instruction) {
}
void InstructionCodeGeneratorARM64::VisitReturnVoid(HReturnVoid* instruction) {
+ UNUSED(instruction);
codegen_->GenerateFrameExit();
__ Br(lr);
}
@@ -1153,6 +1199,7 @@ void LocationsBuilderARM64::VisitStoreLocal(HStoreLocal* store) {
}
void InstructionCodeGeneratorARM64::VisitStoreLocal(HStoreLocal* store) {
+ UNUSED(store);
}
void LocationsBuilderARM64::VisitSub(HSub* instruction) {
@@ -1204,6 +1251,7 @@ void LocationsBuilderARM64::VisitTemporary(HTemporary* temp) {
void InstructionCodeGeneratorARM64::VisitTemporary(HTemporary* temp) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(temp);
}
} // namespace arm64
diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h
index a4003ffea5..5530f46065 100644
--- a/compiler/optimizing/code_generator_arm64.h
+++ b/compiler/optimizing/code_generator_arm64.h
@@ -180,11 +180,15 @@ class CodeGeneratorARM64 : public CodeGenerator {
virtual Location GetStackLocation(HLoadLocal* load) const OVERRIDE;
virtual size_t SaveCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE {
+ UNUSED(stack_index);
+ UNUSED(reg_id);
UNIMPLEMENTED(INFO) << "TODO: SaveCoreRegister";
return 0;
}
virtual size_t RestoreCoreRegister(size_t stack_index, uint32_t reg_id) OVERRIDE {
+ UNUSED(stack_index);
+ UNUSED(reg_id);
UNIMPLEMENTED(INFO) << "TODO: RestoreCoreRegister";
return 0;
}
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index 267edca2b0..1e37909be9 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -157,30 +157,83 @@ class SuspendCheckSlowPathX86 : public SlowPathCodeX86 {
DISALLOW_COPY_AND_ASSIGN(SuspendCheckSlowPathX86);
};
-class ClinitCheckSlowPathX86 : public SlowPathCodeX86 {
+class LoadStringSlowPathX86 : public SlowPathCodeX86 {
public:
- explicit ClinitCheckSlowPathX86(HClinitCheck* instruction) : instruction_(instruction) {}
+ explicit LoadStringSlowPathX86(HLoadString* instruction) : instruction_(instruction) {}
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+
CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
__ Bind(GetEntryLabel());
- codegen->SaveLiveRegisters(instruction_->GetLocations());
+ codegen->SaveLiveRegisters(locations);
- HLoadClass* cls = instruction_->GetLoadClass();
InvokeRuntimeCallingConvention calling_convention;
- __ movl(calling_convention.GetRegisterAt(0), Immediate(cls->GetTypeIndex()));
- x86_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
- __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeStaticStorage)));
+ x86_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(0));
+ __ movl(calling_convention.GetRegisterAt(1), Immediate(instruction_->GetStringIndex()));
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pResolveString)));
codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
- x86_codegen->Move32(instruction_->GetLocations()->InAt(0), Location::RegisterLocation(EAX));
- codegen->RestoreLiveRegisters(instruction_->GetLocations());
+ x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
+ codegen->RestoreLiveRegisters(locations);
+
+ __ jmp(GetExitLabel());
+ }
+
+ private:
+ HLoadString* const instruction_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86);
+};
+
+class LoadClassSlowPathX86 : public SlowPathCodeX86 {
+ public:
+ LoadClassSlowPathX86(HLoadClass* cls,
+ HInstruction* at,
+ uint32_t dex_pc,
+ bool do_clinit)
+ : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+ }
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = at_->GetLocations();
+ CodeGeneratorX86* x86_codegen = down_cast<CodeGeneratorX86*>(codegen);
+ __ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ __ movl(calling_convention.GetRegisterAt(0), Immediate(cls_->GetTypeIndex()));
+ x86_codegen->LoadCurrentMethod(calling_convention.GetRegisterAt(1));
+ __ fs()->call(Address::Absolute(do_clinit_
+ ? QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeStaticStorage)
+ : QUICK_ENTRYPOINT_OFFSET(kX86WordSize, pInitializeType)));
+ codegen->RecordPcInfo(at_, dex_pc_);
+
+ // Move the class to the desired location.
+ if (locations->Out().IsValid()) {
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ x86_codegen->Move32(locations->Out(), Location::RegisterLocation(EAX));
+ }
+ codegen->RestoreLiveRegisters(locations);
__ jmp(GetExitLabel());
}
private:
- HClinitCheck* const instruction_;
+ // The class this slow path will load.
+ HLoadClass* const cls_;
+
+ // The instruction where this slow path is happening.
+ // (Might be the load class or an initialization check).
+ HInstruction* const at_;
- DISALLOW_COPY_AND_ASSIGN(ClinitCheckSlowPathX86);
+ // The dex PC of `at_`.
+ const uint32_t dex_pc_;
+
+ // Whether to initialize the class.
+ const bool do_clinit_;
+
+ DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathX86);
};
#undef __
@@ -393,7 +446,9 @@ Location InvokeDexCallingConventionVisitor::GetNextLocation(Primitive::Type type
calling_convention.GetRegisterPairAt(index));
return Location::RegisterPairLocation(pair.AsRegisterPairLow(), pair.AsRegisterPairHigh());
} else if (index + 1 == calling_convention.GetNumberOfRegisters()) {
- return Location::QuickParameter(index);
+ // On X86, the register index and stack index of a quick parameter is the same, since
+ // we are passing floating pointer values in core registers.
+ return Location::QuickParameter(index, index);
} else {
return Location::DoubleStackSlot(calling_convention.GetStackOffsetOf(index));
}
@@ -453,12 +508,13 @@ void CodeGeneratorX86::Move64(Location destination, Location source) {
} else if (source.IsFpuRegister()) {
LOG(FATAL) << "Unimplemented";
} else if (source.IsQuickParameter()) {
- uint32_t argument_index = source.GetQuickParameterIndex();
+ uint16_t register_index = source.GetQuickParameterRegisterIndex();
+ uint16_t stack_index = source.GetQuickParameterStackIndex();
InvokeDexCallingConvention calling_convention;
__ movl(destination.AsRegisterPairLow<Register>(),
- calling_convention.GetRegisterAt(argument_index));
+ calling_convention.GetRegisterAt(register_index));
__ movl(destination.AsRegisterPairHigh<Register>(), Address(ESP,
- calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize()));
+ calling_convention.GetStackOffsetOf(stack_index + 1) + GetFrameSize()));
} else {
DCHECK(source.IsDoubleStackSlot());
__ movl(destination.AsRegisterPairLow<Register>(), Address(ESP, source.GetStackIndex()));
@@ -467,19 +523,20 @@ void CodeGeneratorX86::Move64(Location destination, Location source) {
}
} else if (destination.IsQuickParameter()) {
InvokeDexCallingConvention calling_convention;
- uint32_t argument_index = destination.GetQuickParameterIndex();
+ uint16_t register_index = destination.GetQuickParameterRegisterIndex();
+ uint16_t stack_index = destination.GetQuickParameterStackIndex();
if (source.IsRegister()) {
- __ movl(calling_convention.GetRegisterAt(argument_index), source.AsRegisterPairLow<Register>());
- __ movl(Address(ESP, calling_convention.GetStackOffsetOf(argument_index + 1)),
+ __ movl(calling_convention.GetRegisterAt(register_index), source.AsRegisterPairLow<Register>());
+ __ movl(Address(ESP, calling_convention.GetStackOffsetOf(stack_index + 1)),
source.AsRegisterPairHigh<Register>());
} else if (source.IsFpuRegister()) {
LOG(FATAL) << "Unimplemented";
} else {
DCHECK(source.IsDoubleStackSlot());
- __ movl(calling_convention.GetRegisterAt(argument_index),
+ __ movl(calling_convention.GetRegisterAt(register_index),
Address(ESP, source.GetStackIndex()));
__ pushl(Address(ESP, source.GetHighStackIndex(kX86WordSize)));
- __ popl(Address(ESP, calling_convention.GetStackOffsetOf(argument_index + 1)));
+ __ popl(Address(ESP, calling_convention.GetStackOffsetOf(stack_index + 1)));
}
} else if (destination.IsFpuRegister()) {
if (source.IsDoubleStackSlot()) {
@@ -495,10 +552,11 @@ void CodeGeneratorX86::Move64(Location destination, Location source) {
source.AsRegisterPairHigh<Register>());
} else if (source.IsQuickParameter()) {
InvokeDexCallingConvention calling_convention;
- uint32_t argument_index = source.GetQuickParameterIndex();
+ uint16_t register_index = source.GetQuickParameterRegisterIndex();
+ uint16_t stack_index = source.GetQuickParameterStackIndex();
__ movl(Address(ESP, destination.GetStackIndex()),
- calling_convention.GetRegisterAt(argument_index));
- DCHECK_EQ(calling_convention.GetStackOffsetOf(argument_index + 1) + GetFrameSize(),
+ calling_convention.GetRegisterAt(register_index));
+ DCHECK_EQ(calling_convention.GetStackOffsetOf(stack_index + 1) + GetFrameSize(),
static_cast<size_t>(destination.GetHighStackIndex(kX86WordSize)));
} else if (source.IsFpuRegister()) {
__ movsd(Address(ESP, destination.GetStackIndex()), source.As<XmmRegister>());
@@ -611,6 +669,7 @@ void LocationsBuilderX86::VisitExit(HExit* exit) {
}
void InstructionCodeGeneratorX86::VisitExit(HExit* exit) {
+ UNUSED(exit);
if (kIsDebugBuild) {
__ Comment("Unreachable");
__ int3();
@@ -700,6 +759,7 @@ void LocationsBuilderX86::VisitLoadLocal(HLoadLocal* local) {
void InstructionCodeGeneratorX86::VisitLoadLocal(HLoadLocal* load) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(load);
}
void LocationsBuilderX86::VisitStoreLocal(HStoreLocal* store) {
@@ -728,6 +788,7 @@ void LocationsBuilderX86::VisitStoreLocal(HStoreLocal* store) {
}
void InstructionCodeGeneratorX86::VisitStoreLocal(HStoreLocal* store) {
+ UNUSED(store);
}
void LocationsBuilderX86::VisitCondition(HCondition* comp) {
@@ -817,6 +878,7 @@ void LocationsBuilderX86::VisitIntConstant(HIntConstant* constant) {
void InstructionCodeGeneratorX86::VisitIntConstant(HIntConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86::VisitLongConstant(HLongConstant* constant) {
@@ -827,6 +889,7 @@ void LocationsBuilderX86::VisitLongConstant(HLongConstant* constant) {
void InstructionCodeGeneratorX86::VisitLongConstant(HLongConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86::VisitFloatConstant(HFloatConstant* constant) {
@@ -837,6 +900,7 @@ void LocationsBuilderX86::VisitFloatConstant(HFloatConstant* constant) {
void InstructionCodeGeneratorX86::VisitFloatConstant(HFloatConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86::VisitDoubleConstant(HDoubleConstant* constant) {
@@ -847,6 +911,7 @@ void LocationsBuilderX86::VisitDoubleConstant(HDoubleConstant* constant) {
void InstructionCodeGeneratorX86::VisitDoubleConstant(HDoubleConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86::VisitReturnVoid(HReturnVoid* ret) {
@@ -854,6 +919,7 @@ void LocationsBuilderX86::VisitReturnVoid(HReturnVoid* ret) {
}
void InstructionCodeGeneratorX86::VisitReturnVoid(HReturnVoid* ret) {
+ UNUSED(ret);
codegen_->GenerateFrameExit();
__ ret();
}
@@ -1422,6 +1488,7 @@ void LocationsBuilderX86::VisitParameterValue(HParameterValue* instruction) {
}
void InstructionCodeGeneratorX86::VisitParameterValue(HParameterValue* instruction) {
+ UNUSED(instruction);
}
void LocationsBuilderX86::VisitNot(HNot* not_) {
@@ -1464,7 +1531,6 @@ void LocationsBuilderX86::VisitCompare(HCompare* compare) {
}
void InstructionCodeGeneratorX86::VisitCompare(HCompare* compare) {
- Label greater, done;
LocationSummary* locations = compare->GetLocations();
switch (compare->InputAt(0)->GetType()) {
case Primitive::kPrimLong: {
@@ -1516,6 +1582,7 @@ void LocationsBuilderX86::VisitPhi(HPhi* instruction) {
}
void InstructionCodeGeneratorX86::VisitPhi(HPhi* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
@@ -1992,9 +2059,11 @@ void LocationsBuilderX86::VisitTemporary(HTemporary* temp) {
void InstructionCodeGeneratorX86::VisitTemporary(HTemporary* temp) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(temp);
}
void LocationsBuilderX86::VisitParallelMove(HParallelMove* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unreachable";
}
@@ -2135,20 +2204,37 @@ void ParallelMoveResolverX86::RestoreScratch(int reg) {
}
void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) {
+ LocationSummary::CallKind call_kind = cls->CanCallRuntime()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(cls, LocationSummary::kNoCall);
+ new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
locations->SetOut(Location::RequiresRegister());
}
void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) {
Register out = cls->GetLocations()->Out().As<Register>();
if (cls->IsReferrersClass()) {
+ DCHECK(!cls->CanCallRuntime());
+ DCHECK(!cls->MustGenerateClinitCheck());
codegen_->LoadCurrentMethod(out);
__ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
} else {
+ DCHECK(cls->CanCallRuntime());
codegen_->LoadCurrentMethod(out);
__ movl(out, Address(out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
__ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
+
+ SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86(
+ cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+ codegen_->AddSlowPath(slow_path);
+ __ testl(out, out);
+ __ j(kEqual, slow_path->GetEntryLabel());
+ if (cls->MustGenerateClinitCheck()) {
+ GenerateClassInitializationCheck(slow_path, out);
+ } else {
+ __ Bind(slow_path->GetExitLabel());
+ }
}
}
@@ -2162,17 +2248,15 @@ void LocationsBuilderX86::VisitClinitCheck(HClinitCheck* check) {
}
void InstructionCodeGeneratorX86::VisitClinitCheck(HClinitCheck* check) {
- SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) ClinitCheckSlowPathX86(check);
+ // We assume the class to not be null.
+ SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86(
+ check->GetLoadClass(), check, check->GetDexPc(), true);
codegen_->AddSlowPath(slow_path);
+ GenerateClassInitializationCheck(slow_path, check->GetLocations()->InAt(0).As<Register>());
+}
- LocationSummary* locations = check->GetLocations();
- // We remove the class as a live register, we know it's null or unused in the slow path.
- RegisterSet* register_set = locations->GetLiveRegisters();
- register_set->Remove(locations->InAt(0));
-
- Register class_reg = locations->InAt(0).As<Register>();
- __ testl(class_reg, class_reg);
- __ j(kEqual, slow_path->GetEntryLabel());
+void InstructionCodeGeneratorX86::GenerateClassInitializationCheck(
+ SlowPathCodeX86* slow_path, Register class_reg) {
__ cmpl(Address(class_reg, mirror::Class::StatusOffset().Int32Value()),
Immediate(mirror::Class::kStatusInitialized));
__ j(kLess, slow_path->GetEntryLabel());
@@ -2316,5 +2400,24 @@ void InstructionCodeGeneratorX86::VisitStaticFieldSet(HStaticFieldSet* instructi
}
}
+void LocationsBuilderX86::VisitLoadString(HLoadString* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) {
+ SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86(load);
+ codegen_->AddSlowPath(slow_path);
+
+ Register out = load->GetLocations()->Out().As<Register>();
+ codegen_->LoadCurrentMethod(out);
+ __ movl(out, Address(out, mirror::ArtMethod::DexCacheStringsOffset().Int32Value()));
+ __ movl(out, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
+ __ testl(out, out);
+ __ j(kEqual, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
} // namespace x86
} // namespace art
diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h
index bcceaad00f..176a269ac4 100644
--- a/compiler/optimizing/code_generator_x86.h
+++ b/compiler/optimizing/code_generator_x86.h
@@ -28,6 +28,7 @@ namespace x86 {
static constexpr size_t kX86WordSize = 4;
class CodeGeneratorX86;
+class SlowPathCodeX86;
static constexpr Register kParameterCoreRegisters[] = { ECX, EDX, EBX };
static constexpr RegisterPair kParameterCorePairRegisters[] = { ECX_EDX, EDX_EBX };
@@ -126,6 +127,7 @@ class InstructionCodeGeneratorX86 : public HGraphVisitor {
// is the block to branch to if the suspend check is not needed, and after
// the suspend call.
void GenerateSuspendCheck(HSuspendCheck* check, HBasicBlock* successor);
+ void GenerateClassInitializationCheck(SlowPathCodeX86* slow_path, Register class_reg);
X86Assembler* const assembler_;
CodeGeneratorX86* const codegen_;
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index e8d34e3888..40eec9b15d 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -168,32 +168,86 @@ class BoundsCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86_64);
};
-class ClinitCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
+class LoadClassSlowPathX86_64 : public SlowPathCodeX86_64 {
public:
- explicit ClinitCheckSlowPathX86_64(HClinitCheck* instruction) : instruction_(instruction) {}
+ LoadClassSlowPathX86_64(HLoadClass* cls,
+ HInstruction* at,
+ uint32_t dex_pc,
+ bool do_clinit)
+ : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) {
+ DCHECK(at->IsLoadClass() || at->IsClinitCheck());
+ }
virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = at_->GetLocations();
CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
__ Bind(GetEntryLabel());
- codegen->SaveLiveRegisters(instruction_->GetLocations());
- HLoadClass* cls = instruction_->GetLoadClass();
+ codegen->SaveLiveRegisters(locations);
+
InvokeRuntimeCallingConvention calling_convention;
- __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(cls->GetTypeIndex()));
+ __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(cls_->GetTypeIndex()));
x64_codegen->LoadCurrentMethod(CpuRegister(calling_convention.GetRegisterAt(1)));
- __ gs()->call(Address::Absolute(
- QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeStaticStorage), true));
+ __ gs()->call(Address::Absolute((do_clinit_
+ ? QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeStaticStorage)
+ : QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pInitializeType)) , true));
+ codegen->RecordPcInfo(at_, dex_pc_);
+
+ // Move the class to the desired location.
+ if (locations->Out().IsValid()) {
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+ x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
+ }
+
+ codegen->RestoreLiveRegisters(locations);
+ __ jmp(GetExitLabel());
+ }
+
+ private:
+ // The class this slow path will load.
+ HLoadClass* const cls_;
+
+ // The instruction where this slow path is happening.
+ // (Might be the load class or an initialization check).
+ HInstruction* const at_;
+
+ // The dex PC of `at_`.
+ const uint32_t dex_pc_;
+
+ // Whether to initialize the class.
+ const bool do_clinit_;
+ DISALLOW_COPY_AND_ASSIGN(LoadClassSlowPathX86_64);
+};
+
+class LoadStringSlowPathX86_64 : public SlowPathCodeX86_64 {
+ public:
+ explicit LoadStringSlowPathX86_64(HLoadString* instruction) : instruction_(instruction) {}
+
+ virtual void EmitNativeCode(CodeGenerator* codegen) OVERRIDE {
+ LocationSummary* locations = instruction_->GetLocations();
+ DCHECK(!locations->GetLiveRegisters()->ContainsCoreRegister(locations->Out().reg()));
+
+ CodeGeneratorX86_64* x64_codegen = down_cast<CodeGeneratorX86_64*>(codegen);
+ __ Bind(GetEntryLabel());
+ codegen->SaveLiveRegisters(locations);
+
+ InvokeRuntimeCallingConvention calling_convention;
+ x64_codegen->LoadCurrentMethod(CpuRegister(calling_convention.GetRegisterAt(0)));
+ __ movl(CpuRegister(calling_convention.GetRegisterAt(1)),
+ Immediate(instruction_->GetStringIndex()));
+ __ gs()->call(Address::Absolute(
+ QUICK_ENTRYPOINT_OFFSET(kX86_64WordSize, pResolveString), true));
codegen->RecordPcInfo(instruction_, instruction_->GetDexPc());
- x64_codegen->Move(instruction_->GetLocations()->InAt(0), Location::RegisterLocation(RAX));
- codegen->RestoreLiveRegisters(instruction_->GetLocations());
+ x64_codegen->Move(locations->Out(), Location::RegisterLocation(RAX));
+ codegen->RestoreLiveRegisters(locations);
__ jmp(GetExitLabel());
}
private:
- HClinitCheck* const instruction_;
+ HLoadString* const instruction_;
- DISALLOW_COPY_AND_ASSIGN(ClinitCheckSlowPathX86_64);
+ DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86_64);
};
#undef __
@@ -526,6 +580,7 @@ void LocationsBuilderX86_64::VisitExit(HExit* exit) {
}
void InstructionCodeGeneratorX86_64::VisitExit(HExit* exit) {
+ UNUSED(exit);
if (kIsDebugBuild) {
__ Comment("Unreachable");
__ int3();
@@ -614,6 +669,7 @@ void LocationsBuilderX86_64::VisitLoadLocal(HLoadLocal* local) {
void InstructionCodeGeneratorX86_64::VisitLoadLocal(HLoadLocal* load) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(load);
}
void LocationsBuilderX86_64::VisitStoreLocal(HStoreLocal* store) {
@@ -641,6 +697,7 @@ void LocationsBuilderX86_64::VisitStoreLocal(HStoreLocal* store) {
}
void InstructionCodeGeneratorX86_64::VisitStoreLocal(HStoreLocal* store) {
+ UNUSED(store);
}
void LocationsBuilderX86_64::VisitCondition(HCondition* comp) {
@@ -763,6 +820,7 @@ void LocationsBuilderX86_64::VisitIntConstant(HIntConstant* constant) {
void InstructionCodeGeneratorX86_64::VisitIntConstant(HIntConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86_64::VisitLongConstant(HLongConstant* constant) {
@@ -773,6 +831,7 @@ void LocationsBuilderX86_64::VisitLongConstant(HLongConstant* constant) {
void InstructionCodeGeneratorX86_64::VisitLongConstant(HLongConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86_64::VisitFloatConstant(HFloatConstant* constant) {
@@ -783,6 +842,7 @@ void LocationsBuilderX86_64::VisitFloatConstant(HFloatConstant* constant) {
void InstructionCodeGeneratorX86_64::VisitFloatConstant(HFloatConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86_64::VisitDoubleConstant(HDoubleConstant* constant) {
@@ -793,6 +853,7 @@ void LocationsBuilderX86_64::VisitDoubleConstant(HDoubleConstant* constant) {
void InstructionCodeGeneratorX86_64::VisitDoubleConstant(HDoubleConstant* constant) {
// Will be generated at use site.
+ UNUSED(constant);
}
void LocationsBuilderX86_64::VisitReturnVoid(HReturnVoid* ret) {
@@ -800,6 +861,7 @@ void LocationsBuilderX86_64::VisitReturnVoid(HReturnVoid* ret) {
}
void InstructionCodeGeneratorX86_64::VisitReturnVoid(HReturnVoid* ret) {
+ UNUSED(ret);
codegen_->GenerateFrameExit();
__ ret();
}
@@ -1351,6 +1413,7 @@ void LocationsBuilderX86_64::VisitParameterValue(HParameterValue* instruction) {
void InstructionCodeGeneratorX86_64::VisitParameterValue(HParameterValue* instruction) {
// Nothing to do, the parameter is already at its location.
+ UNUSED(instruction);
}
void LocationsBuilderX86_64::VisitNot(HNot* not_) {
@@ -1393,6 +1456,7 @@ void LocationsBuilderX86_64::VisitPhi(HPhi* instruction) {
}
void InstructionCodeGeneratorX86_64::VisitPhi(HPhi* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unimplemented";
}
@@ -1872,9 +1936,11 @@ void LocationsBuilderX86_64::VisitTemporary(HTemporary* temp) {
void InstructionCodeGeneratorX86_64::VisitTemporary(HTemporary* temp) {
// Nothing to do, this is driven by the code generator.
+ UNUSED(temp);
}
void LocationsBuilderX86_64::VisitParallelMove(HParallelMove* instruction) {
+ UNUSED(instruction);
LOG(FATAL) << "Unimplemented";
}
@@ -2109,21 +2175,46 @@ void ParallelMoveResolverX86_64::RestoreScratch(int reg) {
__ popq(CpuRegister(reg));
}
+void InstructionCodeGeneratorX86_64::GenerateClassInitializationCheck(
+ SlowPathCodeX86_64* slow_path, CpuRegister class_reg) {
+ __ cmpl(Address(class_reg, mirror::Class::StatusOffset().Int32Value()),
+ Immediate(mirror::Class::kStatusInitialized));
+ __ j(kLess, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+ // No need for memory fence, thanks to the X86_64 memory model.
+}
+
void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
+ LocationSummary::CallKind call_kind = cls->CanCallRuntime()
+ ? LocationSummary::kCallOnSlowPath
+ : LocationSummary::kNoCall;
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(cls, LocationSummary::kNoCall);
+ new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
locations->SetOut(Location::RequiresRegister());
}
void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) {
CpuRegister out = cls->GetLocations()->Out().As<CpuRegister>();
if (cls->IsReferrersClass()) {
+ DCHECK(!cls->CanCallRuntime());
+ DCHECK(!cls->MustGenerateClinitCheck());
codegen_->LoadCurrentMethod(out);
__ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
} else {
+ DCHECK(cls->CanCallRuntime());
codegen_->LoadCurrentMethod(out);
__ movl(out, Address(out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
__ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
+ SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64(
+ cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
+ codegen_->AddSlowPath(slow_path);
+ __ testl(out, out);
+ __ j(kEqual, slow_path->GetEntryLabel());
+ if (cls->MustGenerateClinitCheck()) {
+ GenerateClassInitializationCheck(slow_path, out);
+ } else {
+ __ Bind(slow_path->GetExitLabel());
+ }
}
}
@@ -2137,22 +2228,11 @@ void LocationsBuilderX86_64::VisitClinitCheck(HClinitCheck* check) {
}
void InstructionCodeGeneratorX86_64::VisitClinitCheck(HClinitCheck* check) {
- SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) ClinitCheckSlowPathX86_64(check);
+ // We assume the class to not be null.
+ SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64(
+ check->GetLoadClass(), check, check->GetDexPc(), true);
codegen_->AddSlowPath(slow_path);
-
- LocationSummary* locations = check->GetLocations();
- // We remove the class as a live register, we know it's null or unused in the slow path.
- RegisterSet* register_set = locations->GetLiveRegisters();
- register_set->Remove(locations->InAt(0));
-
- CpuRegister class_reg = locations->InAt(0).As<CpuRegister>();
- __ testl(class_reg, class_reg);
- __ j(kEqual, slow_path->GetEntryLabel());
- __ cmpl(Address(class_reg, mirror::Class::StatusOffset().Int32Value()),
- Immediate(mirror::Class::kStatusInitialized));
- __ j(kLess, slow_path->GetEntryLabel());
- __ Bind(slow_path->GetExitLabel());
- // No need for memory fence, thanks to the X86_64 memory model.
+ GenerateClassInitializationCheck(slow_path, check->GetLocations()->InAt(0).As<CpuRegister>());
}
void LocationsBuilderX86_64::VisitStaticFieldGet(HStaticFieldGet* instruction) {
@@ -2270,5 +2350,24 @@ void InstructionCodeGeneratorX86_64::VisitStaticFieldSet(HStaticFieldSet* instru
}
}
+void LocationsBuilderX86_64::VisitLoadString(HLoadString* load) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
+ locations->SetOut(Location::RequiresRegister());
+}
+
+void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) {
+ SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86_64(load);
+ codegen_->AddSlowPath(slow_path);
+
+ CpuRegister out = load->GetLocations()->Out().As<CpuRegister>();
+ codegen_->LoadCurrentMethod(CpuRegister(out));
+ __ movl(out, Address(out, mirror::ArtMethod::DexCacheStringsOffset().Int32Value()));
+ __ movl(out, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
+ __ testl(out, out);
+ __ j(kEqual, slow_path->GetEntryLabel());
+ __ Bind(slow_path->GetExitLabel());
+}
+
} // namespace x86_64
} // namespace art
diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h
index 32d2702d72..0de304538f 100644
--- a/compiler/optimizing/code_generator_x86_64.h
+++ b/compiler/optimizing/code_generator_x86_64.h
@@ -65,6 +65,7 @@ class InvokeDexCallingConventionVisitor {
};
class CodeGeneratorX86_64;
+class SlowPathCodeX86_64;
class ParallelMoveResolverX86_64 : public ParallelMoveResolver {
public:
@@ -130,6 +131,7 @@ class InstructionCodeGeneratorX86_64 : public HGraphVisitor {
// is the block to branch to if the suspend check is not needed, and after
// the suspend call.
void GenerateSuspendCheck(HSuspendCheck* instruction, HBasicBlock* successor);
+ void GenerateClassInitializationCheck(SlowPathCodeX86_64* slow_path, CpuRegister class_reg);
X86_64Assembler* const assembler_;
CodeGeneratorX86_64* const codegen_;
diff --git a/compiler/optimizing/codegen_test.cc b/compiler/optimizing/codegen_test.cc
index 03951e29dd..68fcb25036 100644
--- a/compiler/optimizing/codegen_test.cc
+++ b/compiler/optimizing/codegen_test.cc
@@ -373,9 +373,9 @@ TEST(CodegenTest, NonMaterializedCondition) {
PrepareForRegisterAllocation(graph).Run();
ASSERT_FALSE(equal->NeedsMaterialization());
- auto hook_before_codegen = [](HGraph* graph) {
- HBasicBlock* block = graph->GetEntryBlock()->GetSuccessors().Get(0);
- HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+ auto hook_before_codegen = [](HGraph* graph_in) {
+ HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors().Get(0);
+ HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
@@ -408,11 +408,7 @@ MUL_TEST(INT, MulInt);
MUL_TEST(LONG, MulLong);
#endif
-#if defined(__aarch64__)
-TEST(CodegenTest, DISABLED_ReturnMulIntLit8) {
-#else
TEST(CodegenTest, ReturnMulIntLit8) {
-#endif
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::MUL_INT_LIT8, 3 << 8 | 0,
@@ -421,11 +417,7 @@ TEST(CodegenTest, ReturnMulIntLit8) {
TestCode(data, true, 12);
}
-#if defined(__aarch64__)
-TEST(CodegenTest, DISABLED_ReturnMulIntLit16) {
-#else
TEST(CodegenTest, ReturnMulIntLit16) {
-#endif
const uint16_t data[] = ONE_REGISTER_CODE_ITEM(
Instruction::CONST_4 | 4 << 12 | 0 << 8,
Instruction::MUL_INT_LIT16, 3,
@@ -471,9 +463,9 @@ TEST(CodegenTest, MaterializedCondition1) {
HReturn ret(&cmp_lt);
code_block->AddInstruction(&ret);
- auto hook_before_codegen = [](HGraph* graph) {
- HBasicBlock* block = graph->GetEntryBlock()->GetSuccessors().Get(0);
- HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+ auto hook_before_codegen = [](HGraph* graph_in) {
+ HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors().Get(0);
+ HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
@@ -541,9 +533,9 @@ TEST(CodegenTest, MaterializedCondition2) {
HReturn ret_ge(&cst_ge);
if_false_block->AddInstruction(&ret_ge);
- auto hook_before_codegen = [](HGraph* graph) {
- HBasicBlock* block = graph->GetEntryBlock()->GetSuccessors().Get(0);
- HParallelMove* move = new (graph->GetArena()) HParallelMove(graph->GetArena());
+ auto hook_before_codegen = [](HGraph* graph_in) {
+ HBasicBlock* block = graph_in->GetEntryBlock()->GetSuccessors().Get(0);
+ HParallelMove* move = new (graph_in->GetArena()) HParallelMove(graph_in->GetArena());
block->InsertInstructionBefore(move, block->GetLastInstruction());
};
diff --git a/compiler/optimizing/constant_folding.cc b/compiler/optimizing/constant_folding.cc
index 10a7e46299..fca9933872 100644
--- a/compiler/optimizing/constant_folding.cc
+++ b/compiler/optimizing/constant_folding.cc
@@ -28,9 +28,9 @@ void HConstantFolding::Run() {
// Traverse this block's instructions in (forward) order and
// replace the ones that can be statically evaluated by a
// compile-time counterpart.
- for (HInstructionIterator it(block->GetInstructions());
- !it.Done(); it.Advance()) {
- HInstruction* inst = it.Current();
+ for (HInstructionIterator inst_it(block->GetInstructions());
+ !inst_it.Done(); inst_it.Advance()) {
+ HInstruction* inst = inst_it.Current();
if (inst->IsBinaryOperation()) {
// Constant folding: replace `op(a, b)' with a constant at
// compile time if `a' and `b' are both constants.
diff --git a/compiler/optimizing/gvn.cc b/compiler/optimizing/gvn.cc
index 027b3d4ff3..25168b5b0c 100644
--- a/compiler/optimizing/gvn.cc
+++ b/compiler/optimizing/gvn.cc
@@ -54,8 +54,9 @@ void GlobalValueNumberer::ComputeSideEffects() {
SideEffects effects = SideEffects::None();
// Update `effects` with the side effects of all instructions in this block.
- for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* instruction = it.Current();
+ for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done();
+ inst_it.Advance()) {
+ HInstruction* instruction = inst_it.Current();
effects = effects.Union(instruction->GetSideEffects());
if (effects.HasAllSideEffects()) {
break;
diff --git a/compiler/optimizing/gvn.h b/compiler/optimizing/gvn.h
index a98d714476..8d2c77475c 100644
--- a/compiler/optimizing/gvn.h
+++ b/compiler/optimizing/gvn.h
@@ -25,7 +25,7 @@ namespace art {
* A node in the collision list of a ValueSet. Encodes the instruction,
* the hash code, and the next node in the collision list.
*/
-class ValueSetNode : public ArenaObject {
+class ValueSetNode : public ArenaObject<kArenaAllocMisc> {
public:
ValueSetNode(HInstruction* instruction, size_t hash_code, ValueSetNode* next)
: instruction_(instruction), hash_code_(hash_code), next_(next) {}
@@ -52,7 +52,7 @@ class ValueSetNode : public ArenaObject {
* if there is one in the set. In GVN, we would say those instructions have the
* same "number".
*/
-class ValueSet : public ArenaObject {
+class ValueSet : public ArenaObject<kArenaAllocMisc> {
public:
explicit ValueSet(ArenaAllocator* allocator)
: allocator_(allocator), number_of_entries_(0), collisions_(nullptr) {
diff --git a/compiler/optimizing/locations.h b/compiler/optimizing/locations.h
index 94aded6c87..bed688b5e3 100644
--- a/compiler/optimizing/locations.h
+++ b/compiler/optimizing/locations.h
@@ -27,6 +27,9 @@ namespace art {
class HConstant;
class HInstruction;
+class Location;
+
+std::ostream& operator<<(std::ostream& os, const Location& location);
/**
* A Location is an abstraction over the potential location
@@ -71,16 +74,16 @@ class Location : public ValueObject {
Location() : value_(kInvalid) {
// Verify that non-constant location kinds do not interfere with kConstant.
- COMPILE_ASSERT((kInvalid & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kUnallocated & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kStackSlot & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kDoubleStackSlot & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kRegister & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kQuickParameter & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kFpuRegister & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kRegisterPair & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kFpuRegisterPair & kLocationConstantMask) != kConstant, TagError);
- COMPILE_ASSERT((kConstant & kLocationConstantMask) == kConstant, TagError);
+ static_assert((kInvalid & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kUnallocated & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kStackSlot & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kDoubleStackSlot & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kRegister & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kQuickParameter & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kFpuRegister & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kRegisterPair & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kFpuRegisterPair & kLocationConstantMask) != kConstant, "TagError");
+ static_assert((kConstant & kLocationConstantMask) == kConstant, "TagError");
DCHECK(!IsValid());
}
@@ -228,13 +231,18 @@ class Location : public ValueObject {
return GetPayload() - kStackIndexBias + word_size;
}
- static Location QuickParameter(uint32_t parameter_index) {
- return Location(kQuickParameter, parameter_index);
+ static Location QuickParameter(uint16_t register_index, uint16_t stack_index) {
+ return Location(kQuickParameter, register_index << 16 | stack_index);
}
- uint32_t GetQuickParameterIndex() const {
+ uint32_t GetQuickParameterRegisterIndex() const {
DCHECK(IsQuickParameter());
- return GetPayload();
+ return GetPayload() >> 16;
+ }
+
+ uint32_t GetQuickParameterStackIndex() const {
+ DCHECK(IsQuickParameter());
+ return GetPayload() & 0xFFFF;
}
bool IsQuickParameter() const {
@@ -346,6 +354,8 @@ class Location : public ValueObject {
// way that none of them can be interpreted as a kConstant tag.
uintptr_t value_;
};
+std::ostream& operator<<(std::ostream& os, const Location::Kind& rhs);
+std::ostream& operator<<(std::ostream& os, const Location::Policy& rhs);
class RegisterSet : public ValueObject {
public:
@@ -364,7 +374,7 @@ class RegisterSet : public ValueObject {
if (loc.IsRegister()) {
core_registers_ &= ~(1 << loc.reg());
} else {
- DCHECK(loc.IsFpuRegister());
+ DCHECK(loc.IsFpuRegister()) << loc;
floating_point_registers_ &= ~(1 << loc.reg());
}
}
@@ -396,7 +406,7 @@ class RegisterSet : public ValueObject {
* The intent is to have the code for generating the instruction independent of
* register allocation. A register allocator just has to provide a LocationSummary.
*/
-class LocationSummary : public ArenaObject {
+class LocationSummary : public ArenaObject<kArenaAllocMisc> {
public:
enum CallKind {
kNoCall,
@@ -521,8 +531,6 @@ class LocationSummary : public ArenaObject {
DISALLOW_COPY_AND_ASSIGN(LocationSummary);
};
-std::ostream& operator<<(std::ostream& os, const Location& location);
-
} // namespace art
#endif // ART_COMPILER_OPTIMIZING_LOCATIONS_H_
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index d624ad5e5e..8cb2ef6de8 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -647,4 +647,16 @@ bool HInstruction::Equals(HInstruction* other) const {
return true;
}
+std::ostream& operator<<(std::ostream& os, const HInstruction::InstructionKind& rhs) {
+#define DECLARE_CASE(type, super) case HInstruction::k##type: os << #type; break;
+ switch (rhs) {
+ FOR_EACH_INSTRUCTION(DECLARE_CASE)
+ default:
+ os << "Unknown instruction kind " << static_cast<int>(rhs);
+ break;
+ }
+#undef DECLARE_CASE
+ return os;
+}
+
} // namespace art
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 86c36b8313..79638b3545 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -79,12 +79,14 @@ class HInstructionList {
};
// Control-flow graph of a method. Contains a list of basic blocks.
-class HGraph : public ArenaObject {
+class HGraph : public ArenaObject<kArenaAllocMisc> {
public:
explicit HGraph(ArenaAllocator* arena)
: arena_(arena),
blocks_(arena, kDefaultNumberOfBlocks),
reverse_post_order_(arena, kDefaultNumberOfBlocks),
+ entry_block_(nullptr),
+ exit_block_(nullptr),
maximum_number_of_out_vregs_(0),
number_of_vregs_(0),
number_of_in_vregs_(0),
@@ -199,7 +201,7 @@ class HGraph : public ArenaObject {
DISALLOW_COPY_AND_ASSIGN(HGraph);
};
-class HLoopInformation : public ArenaObject {
+class HLoopInformation : public ArenaObject<kArenaAllocMisc> {
public:
HLoopInformation(HBasicBlock* header, HGraph* graph)
: header_(header),
@@ -278,7 +280,7 @@ static constexpr uint32_t kNoDexPc = -1;
// as a double linked list. Each block knows its predecessors and
// successors.
-class HBasicBlock : public ArenaObject {
+class HBasicBlock : public ArenaObject<kArenaAllocMisc> {
public:
explicit HBasicBlock(HGraph* graph, uint32_t dex_pc = kNoDexPc)
: graph_(graph),
@@ -489,10 +491,11 @@ class HBasicBlock : public ArenaObject {
M(IntConstant, Constant) \
M(InvokeStatic, Invoke) \
M(InvokeVirtual, Invoke) \
- M(LoadClass, Instruction) \
M(LessThan, Condition) \
M(LessThanOrEqual, Condition) \
+ M(LoadClass, Instruction) \
M(LoadLocal, Instruction) \
+ M(LoadString, Instruction) \
M(Local, Instruction) \
M(LongConstant, Constant) \
M(Mul, BinaryOperation) \
@@ -536,7 +539,7 @@ FOR_EACH_INSTRUCTION(FORWARD_DECLARATION)
virtual void Accept(HGraphVisitor* visitor)
template <typename T>
-class HUseListNode : public ArenaObject {
+class HUseListNode : public ArenaObject<kArenaAllocMisc> {
public:
HUseListNode(T* user, size_t index, HUseListNode* tail)
: user_(user), index_(index), tail_(tail) {}
@@ -618,7 +621,7 @@ class SideEffects : public ValueObject {
size_t flags_;
};
-class HInstruction : public ArenaObject {
+class HInstruction : public ArenaObject<kArenaAllocMisc> {
public:
explicit HInstruction(SideEffects side_effects)
: previous_(nullptr),
@@ -737,12 +740,18 @@ class HInstruction : public ArenaObject {
virtual bool CanBeMoved() const { return false; }
// Returns whether the two instructions are of the same kind.
- virtual bool InstructionTypeEquals(HInstruction* other) const { return false; }
+ virtual bool InstructionTypeEquals(HInstruction* other) const {
+ UNUSED(other);
+ return false;
+ }
// Returns whether any data encoded in the two instructions is equal.
// This method does not look at the inputs. Both instructions must be
// of the same type, otherwise the method has undefined behavior.
- virtual bool InstructionDataEquals(HInstruction* other) const { return false; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return false;
+ }
// Returns whether two instructions are equal, that is:
// 1) They have the same type and contain the same data,
@@ -807,6 +816,7 @@ class HInstruction : public ArenaObject {
DISALLOW_COPY_AND_ASSIGN(HInstruction);
};
+std::ostream& operator<<(std::ostream& os, const HInstruction::InstructionKind& rhs);
template<typename T>
class HUseIterator : public ValueObject {
@@ -832,7 +842,7 @@ class HUseIterator : public ValueObject {
};
// A HEnvironment object contains the values of virtual registers at a given location.
-class HEnvironment : public ArenaObject {
+class HEnvironment : public ArenaObject<kArenaAllocMisc> {
public:
HEnvironment(ArenaAllocator* arena, size_t number_of_vregs) : vregs_(arena, number_of_vregs) {
vregs_.SetSize(number_of_vregs);
@@ -964,14 +974,14 @@ class EmbeddedArray<T, 0> {
public:
intptr_t length() const { return 0; }
const T& operator[](intptr_t i) const {
+ UNUSED(i);
LOG(FATAL) << "Unreachable";
- static T sentinel = 0;
- return sentinel;
+ UNREACHABLE();
}
T& operator[](intptr_t i) {
+ UNUSED(i);
LOG(FATAL) << "Unreachable";
- static T sentinel = 0;
- return sentinel;
+ UNREACHABLE();
}
};
@@ -1109,7 +1119,10 @@ class HUnaryOperation : public HExpression<1> {
Primitive::Type GetResultType() const { return GetType(); }
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
// Try to statically evaluate `operation` and return a HConstant
// containing the result of this evaluation. If `operation` cannot
@@ -1142,7 +1155,10 @@ class HBinaryOperation : public HExpression<2> {
virtual bool IsCommutative() { return false; }
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
// Try to statically evaluate `operation` and return a HConstant
// containing the result of this evaluation. If `operation` cannot
@@ -1731,7 +1747,10 @@ class HNot : public HUnaryOperation {
: HUnaryOperation(result_type, input) {}
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
virtual int32_t Evaluate(int32_t x) const OVERRIDE { return ~x; }
virtual int64_t Evaluate(int64_t x) const OVERRIDE { return ~x; }
@@ -1791,7 +1810,10 @@ class HNullCheck : public HExpression<1> {
}
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
virtual bool NeedsEnvironment() const { return true; }
@@ -1883,7 +1905,10 @@ class HArrayGet : public HExpression<2> {
}
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
void SetType(Primitive::Type type) { type_ = type; }
DECLARE_INSTRUCTION(ArrayGet);
@@ -1947,7 +1972,10 @@ class HArrayLength : public HExpression<1> {
}
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
DECLARE_INSTRUCTION(ArrayLength);
@@ -1965,7 +1993,10 @@ class HBoundsCheck : public HExpression<2> {
}
virtual bool CanBeMoved() const { return true; }
- virtual bool InstructionDataEquals(HInstruction* other) const { return true; }
+ virtual bool InstructionDataEquals(HInstruction* other) const {
+ UNUSED(other);
+ return true;
+ }
virtual bool NeedsEnvironment() const { return true; }
@@ -2021,8 +2052,6 @@ class HSuspendCheck : public HTemplateInstruction<0> {
DISALLOW_COPY_AND_ASSIGN(HSuspendCheck);
};
-// TODO: Make this class handle the case the load is null (dex cache
-// is null).
/**
* Instruction to load a Class object.
*/
@@ -2030,13 +2059,14 @@ class HLoadClass : public HExpression<0> {
public:
HLoadClass(uint16_t type_index,
bool is_referrers_class,
- bool is_initialized,
uint32_t dex_pc)
: HExpression(Primitive::kPrimNot, SideEffects::None()),
type_index_(type_index),
is_referrers_class_(is_referrers_class),
- is_initialized_(is_initialized),
- dex_pc_(dex_pc) {}
+ dex_pc_(dex_pc),
+ generate_clinit_check_(false) {}
+
+ bool CanBeMoved() const OVERRIDE { return true; }
bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
return other->AsLoadClass()->type_index_ == type_index_;
@@ -2046,24 +2076,69 @@ class HLoadClass : public HExpression<0> {
uint32_t GetDexPc() const { return dex_pc_; }
uint16_t GetTypeIndex() const { return type_index_; }
+ bool IsReferrersClass() const { return is_referrers_class_; }
- bool NeedsInitialization() const {
- return !is_initialized_ && !is_referrers_class_;
+ bool NeedsEnvironment() const OVERRIDE {
+ // Will call runtime and load the class if the class is not loaded yet.
+ // TODO: finer grain decision.
+ return !is_referrers_class_;
}
- bool IsReferrersClass() const { return is_referrers_class_; }
+ bool MustGenerateClinitCheck() const {
+ return generate_clinit_check_;
+ }
+
+ void SetMustGenerateClinitCheck() {
+ generate_clinit_check_ = true;
+ }
+
+ bool CanCallRuntime() const {
+ return MustGenerateClinitCheck() || !is_referrers_class_;
+ }
DECLARE_INSTRUCTION(LoadClass);
private:
const uint16_t type_index_;
const bool is_referrers_class_;
- const bool is_initialized_;
const uint32_t dex_pc_;
+ // Whether this instruction must generate the initialization check.
+ // Used for code generation.
+ bool generate_clinit_check_;
DISALLOW_COPY_AND_ASSIGN(HLoadClass);
};
+class HLoadString : public HExpression<0> {
+ public:
+ HLoadString(uint32_t string_index, uint32_t dex_pc)
+ : HExpression(Primitive::kPrimNot, SideEffects::None()),
+ string_index_(string_index),
+ dex_pc_(dex_pc) {}
+
+ bool CanBeMoved() const OVERRIDE { return true; }
+
+ bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
+ return other->AsLoadString()->string_index_ == string_index_;
+ }
+
+ size_t ComputeHashCode() const OVERRIDE { return string_index_; }
+
+ uint32_t GetDexPc() const { return dex_pc_; }
+ uint32_t GetStringIndex() const { return string_index_; }
+
+ // TODO: Can we deopt or debug when we resolve a string?
+ bool NeedsEnvironment() const OVERRIDE { return false; }
+
+ DECLARE_INSTRUCTION(LoadString);
+
+ private:
+ const uint32_t string_index_;
+ const uint32_t dex_pc_;
+
+ DISALLOW_COPY_AND_ASSIGN(HLoadString);
+};
+
// TODO: Pass this check to HInvokeStatic nodes.
/**
* Performs an initialization check on its Class object input.
@@ -2076,6 +2151,12 @@ class HClinitCheck : public HExpression<1> {
SetRawInputAt(0, constant);
}
+ bool CanBeMoved() const OVERRIDE { return true; }
+ bool InstructionDataEquals(HInstruction* other) const OVERRIDE {
+ UNUSED(other);
+ return true;
+ }
+
bool NeedsEnvironment() const OVERRIDE {
// May call runtime to initialize the class.
return true;
@@ -2147,7 +2228,7 @@ class HStaticFieldSet : public HTemplateInstruction<2> {
DISALLOW_COPY_AND_ASSIGN(HStaticFieldSet);
};
-class MoveOperands : public ArenaObject {
+class MoveOperands : public ArenaObject<kArenaAllocMisc> {
public:
MoveOperands(Location source, Location destination, HInstruction* instruction)
: source_(source), destination_(destination), instruction_(instruction) {}
@@ -2248,7 +2329,7 @@ class HGraphVisitor : public ValueObject {
explicit HGraphVisitor(HGraph* graph) : graph_(graph) {}
virtual ~HGraphVisitor() {}
- virtual void VisitInstruction(HInstruction* instruction) {}
+ virtual void VisitInstruction(HInstruction* instruction) { UNUSED(instruction); }
virtual void VisitBasicBlock(HBasicBlock* block);
// Visit the graph following basic block insertion order.
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index 5350dcb7b6..08b74c7988 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -213,6 +213,7 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
uint32_t method_idx,
jobject class_loader,
const DexFile& dex_file) const {
+ UNUSED(invoke_type);
total_compiled_methods_++;
InstructionSet instruction_set = GetCompilerDriver()->GetInstructionSet();
// Always use the thumb2 assembler: some runtime functionality (like implicit stack
@@ -226,6 +227,10 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
return nullptr;
}
+ if (Compiler::IsPathologicalCase(*code_item, method_idx, dex_file)) {
+ return nullptr;
+ }
+
DexCompilationUnit dex_compilation_unit(
nullptr, class_loader, art::Runtime::Current()->GetClassLinker(), dex_file, code_item,
class_def_idx, method_idx, access_flags,
diff --git a/compiler/optimizing/optimizing_unit_test.h b/compiler/optimizing/optimizing_unit_test.h
index 5b693dde07..aae7f9b95e 100644
--- a/compiler/optimizing/optimizing_unit_test.h
+++ b/compiler/optimizing/optimizing_unit_test.h
@@ -46,7 +46,7 @@ LiveInterval* BuildInterval(const size_t ranges[][2],
size_t number_of_ranges,
ArenaAllocator* allocator,
int reg = -1) {
- LiveInterval* interval = new (allocator) LiveInterval(allocator, Primitive::kPrimInt);
+ LiveInterval* interval = LiveInterval::MakeInterval(allocator, Primitive::kPrimInt);
for (size_t i = number_of_ranges; i > 0; --i) {
interval->AddRange(ranges[i - 1][0], ranges[i - 1][1]);
}
diff --git a/compiler/optimizing/parallel_move_resolver.cc b/compiler/optimizing/parallel_move_resolver.cc
index c71d93ebe5..1e93ece2ef 100644
--- a/compiler/optimizing/parallel_move_resolver.cc
+++ b/compiler/optimizing/parallel_move_resolver.cc
@@ -130,13 +130,13 @@ void ParallelMoveResolver::PerformMove(size_t index) {
// this move's source or destination needs to have their source
// changed to reflect the state of affairs after the swap.
Location source = move->GetSource();
- Location destination = move->GetDestination();
+ Location swap_destination = move->GetDestination();
move->Eliminate();
for (size_t i = 0; i < moves_.Size(); ++i) {
const MoveOperands& other_move = *moves_.Get(i);
if (other_move.Blocks(source)) {
- moves_.Get(i)->SetSource(destination);
- } else if (other_move.Blocks(destination)) {
+ moves_.Get(i)->SetSource(swap_destination);
+ } else if (other_move.Blocks(swap_destination)) {
moves_.Get(i)->SetSource(source);
}
}
diff --git a/compiler/optimizing/parallel_move_test.cc b/compiler/optimizing/parallel_move_test.cc
index 2bdcc61b04..62629bcd0c 100644
--- a/compiler/optimizing/parallel_move_test.cc
+++ b/compiler/optimizing/parallel_move_test.cc
@@ -50,8 +50,8 @@ class TestParallelMoveResolver : public ParallelMoveResolver {
<< ")";
}
- virtual void SpillScratch(int reg) {}
- virtual void RestoreScratch(int reg) {}
+ virtual void SpillScratch(int reg ATTRIBUTE_UNUSED) {}
+ virtual void RestoreScratch(int reg ATTRIBUTE_UNUSED) {}
std::string GetMessage() const {
return message_.str();
diff --git a/compiler/optimizing/prepare_for_register_allocation.cc b/compiler/optimizing/prepare_for_register_allocation.cc
index 2387141a39..c4db840f33 100644
--- a/compiler/optimizing/prepare_for_register_allocation.cc
+++ b/compiler/optimizing/prepare_for_register_allocation.cc
@@ -23,8 +23,9 @@ void PrepareForRegisterAllocation::Run() {
for (HReversePostOrderIterator it(*GetGraph()); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
// No need to visit the phis.
- for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- it.Current()->Accept(this);
+ for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done();
+ inst_it.Advance()) {
+ inst_it.Current()->Accept(this);
}
}
}
@@ -38,7 +39,14 @@ void PrepareForRegisterAllocation::VisitBoundsCheck(HBoundsCheck* check) {
}
void PrepareForRegisterAllocation::VisitClinitCheck(HClinitCheck* check) {
- check->ReplaceWith(check->InputAt(0));
+ HLoadClass* cls = check->GetLoadClass();
+ check->ReplaceWith(cls);
+ if (check->GetPrevious() == cls) {
+ // Pass the initialization duty to the `HLoadClass` instruction,
+ // and remove the instruction from the graph.
+ cls->SetMustGenerateClinitCheck();
+ check->GetBlock()->RemoveInstruction(check);
+ }
}
void PrepareForRegisterAllocation::VisitCondition(HCondition* condition) {
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index f95c4a47e3..2a9c88506d 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -126,11 +126,12 @@ void RegisterAllocator::AllocateRegistersInternal() {
// is the one with the lowest start position.
for (HLinearPostOrderIterator it(liveness_); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
- for (HBackwardInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- ProcessInstruction(it.Current());
+ for (HBackwardInstructionIterator back_it(block->GetInstructions()); !back_it.Done();
+ back_it.Advance()) {
+ ProcessInstruction(back_it.Current());
}
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- ProcessInstruction(it.Current());
+ for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ ProcessInstruction(inst_it.Current());
}
}
@@ -141,6 +142,10 @@ void RegisterAllocator::AllocateRegistersInternal() {
for (size_t i = 0, e = physical_core_register_intervals_.Size(); i < e; ++i) {
LiveInterval* fixed = physical_core_register_intervals_.Get(i);
if (fixed != nullptr) {
+ // Fixed interval is added to inactive_ instead of unhandled_.
+ // It's also the only type of inactive interval whose start position
+ // can be after the current interval during linear scan.
+ // Fixed interval is never split and never moves to unhandled_.
inactive_.Add(fixed);
}
}
@@ -160,6 +165,10 @@ void RegisterAllocator::AllocateRegistersInternal() {
for (size_t i = 0, e = physical_fp_register_intervals_.Size(); i < e; ++i) {
LiveInterval* fixed = physical_fp_register_intervals_.Get(i);
if (fixed != nullptr) {
+ // Fixed interval is added to inactive_ instead of unhandled_.
+ // It's also the only type of inactive interval whose start position
+ // can be after the current interval during linear scan.
+ // Fixed interval is never split and never moves to unhandled_.
inactive_.Add(fixed);
}
}
@@ -253,9 +262,6 @@ void RegisterAllocator::ProcessInstruction(HInstruction* instruction) {
current->SetFrom(position + 1);
current->SetRegister(output.reg());
BlockRegister(output, position, position + 1);
- } else if (!locations->OutputOverlapsWithInputs()) {
- // Shift the interval's start by one to not interfere with the inputs.
- current->SetFrom(position + 1);
} else if (output.IsStackSlot() || output.IsDoubleStackSlot()) {
current->SetSpillSlot(output.GetStackIndex());
}
@@ -266,15 +272,17 @@ void RegisterAllocator::ProcessInstruction(HInstruction* instruction) {
size_t first_register_use = current->FirstRegisterUse();
if (first_register_use != kNoLifetime) {
LiveInterval* split = Split(current, first_register_use - 1);
- // Don't add direclty to `unhandled`, it needs to be sorted and the start
+ // Don't add directly to `unhandled`, it needs to be sorted and the start
// of this new interval might be after intervals already in the list.
AddSorted(&unhandled, split);
} else {
// Nothing to do, we won't allocate a register for this value.
}
} else {
- DCHECK(unhandled.IsEmpty() || current->StartsBeforeOrAt(unhandled.Peek()));
- unhandled.Add(current);
+ // Don't add directly to `unhandled`, temp or safepoint intervals
+ // for this instruction may have been added, and those can be
+ // processed first.
+ AddSorted(&unhandled, current);
}
}
@@ -432,6 +440,27 @@ void RegisterAllocator::DumpInterval(std::ostream& stream, LiveInterval* interva
stream << std::endl;
}
+void RegisterAllocator::DumpAllIntervals(std::ostream& stream) const {
+ stream << "inactive: " << std::endl;
+ for (size_t i = 0; i < inactive_.Size(); i ++) {
+ DumpInterval(stream, inactive_.Get(i));
+ }
+ stream << "active: " << std::endl;
+ for (size_t i = 0; i < active_.Size(); i ++) {
+ DumpInterval(stream, active_.Get(i));
+ }
+ stream << "unhandled: " << std::endl;
+ auto unhandled = (unhandled_ != nullptr) ?
+ unhandled_ : &unhandled_core_intervals_;
+ for (size_t i = 0; i < unhandled->Size(); i ++) {
+ DumpInterval(stream, unhandled->Get(i));
+ }
+ stream << "handled: " << std::endl;
+ for (size_t i = 0; i < handled_.Size(); i ++) {
+ DumpInterval(stream, handled_.Get(i));
+ }
+}
+
// By the book implementation of a linear scan register allocator.
void RegisterAllocator::LinearScan() {
while (!unhandled_->IsEmpty()) {
@@ -441,6 +470,10 @@ void RegisterAllocator::LinearScan() {
DCHECK(unhandled_->IsEmpty() || unhandled_->Peek()->GetStart() >= current->GetStart());
size_t position = current->GetStart();
+ // Remember the inactive_ size here since the ones moved to inactive_ from
+ // active_ below shouldn't need to be re-checked.
+ size_t inactive_intervals_to_handle = inactive_.Size();
+
// (2) Remove currently active intervals that are dead at this position.
// Move active intervals that have a lifetime hole at this position
// to inactive.
@@ -459,15 +492,18 @@ void RegisterAllocator::LinearScan() {
// (3) Remove currently inactive intervals that are dead at this position.
// Move inactive intervals that cover this position to active.
- for (size_t i = 0; i < inactive_.Size(); ++i) {
+ for (size_t i = 0; i < inactive_intervals_to_handle; ++i) {
LiveInterval* interval = inactive_.Get(i);
+ DCHECK(interval->GetStart() < position || interval->IsFixed());
if (interval->IsDeadAt(position)) {
inactive_.Delete(interval);
--i;
+ --inactive_intervals_to_handle;
handled_.Add(interval);
} else if (interval->Covers(position)) {
inactive_.Delete(interval);
--i;
+ --inactive_intervals_to_handle;
active_.Add(interval);
}
}
@@ -506,13 +542,33 @@ bool RegisterAllocator::TryAllocateFreeReg(LiveInterval* current) {
free_until[i] = kMaxLifetimePosition;
}
+ // For each active interval, set its register to not free.
+ for (size_t i = 0, e = active_.Size(); i < e; ++i) {
+ LiveInterval* interval = active_.Get(i);
+ DCHECK(interval->HasRegister());
+ free_until[interval->GetRegister()] = 0;
+ }
+
// For each inactive interval, set its register to be free until
// the next intersection with `current`.
- // Thanks to SSA, this should only be needed for intervals
- // that are the result of a split.
for (size_t i = 0, e = inactive_.Size(); i < e; ++i) {
LiveInterval* inactive = inactive_.Get(i);
+ // Temp/Slow-path-safepoint interval has no holes.
+ DCHECK(!inactive->IsTemp() && !inactive->IsSlowPathSafepoint());
+ if (!current->IsSplit() && !inactive->IsFixed()) {
+ // Neither current nor inactive are fixed.
+ // Thanks to SSA, a non-split interval starting in a hole of an
+ // inactive interval should never intersect with that inactive interval.
+ // Only if it's not fixed though, because fixed intervals don't come from SSA.
+ DCHECK_EQ(inactive->FirstIntersectionWith(current), kNoLifetime);
+ continue;
+ }
+
DCHECK(inactive->HasRegister());
+ if (free_until[inactive->GetRegister()] == 0) {
+ // Already used by some active interval. No need to intersect.
+ continue;
+ }
size_t next_intersection = inactive->FirstIntersectionWith(current);
if (next_intersection != kNoLifetime) {
free_until[inactive->GetRegister()] =
@@ -520,13 +576,6 @@ bool RegisterAllocator::TryAllocateFreeReg(LiveInterval* current) {
}
}
- // For each active interval, set its register to not free.
- for (size_t i = 0, e = active_.Size(); i < e; ++i) {
- LiveInterval* interval = active_.Get(i);
- DCHECK(interval->HasRegister());
- free_until[interval->GetRegister()] = 0;
- }
-
int reg = -1;
if (current->HasRegister()) {
// Some instructions have a fixed register output.
@@ -605,10 +654,18 @@ bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
// For each inactive interval, find the next use of its register after the
// start of current.
- // Thanks to SSA, this should only be needed for intervals
- // that are the result of a split.
for (size_t i = 0, e = inactive_.Size(); i < e; ++i) {
LiveInterval* inactive = inactive_.Get(i);
+ // Temp/Slow-path-safepoint interval has no holes.
+ DCHECK(!inactive->IsTemp() && !inactive->IsSlowPathSafepoint());
+ if (!current->IsSplit() && !inactive->IsFixed()) {
+ // Neither current nor inactive are fixed.
+ // Thanks to SSA, a non-split interval starting in a hole of an
+ // inactive interval should never intersect with that inactive interval.
+ // Only if it's not fixed though, because fixed intervals don't come from SSA.
+ DCHECK_EQ(inactive->FirstIntersectionWith(current), kNoLifetime);
+ continue;
+ }
DCHECK(inactive->HasRegister());
size_t next_intersection = inactive->FirstIntersectionWith(current);
if (next_intersection != kNoLifetime) {
@@ -660,20 +717,29 @@ bool RegisterAllocator::AllocateBlockedReg(LiveInterval* current) {
}
}
- for (size_t i = 0; i < inactive_.Size(); ++i) {
+ for (size_t i = 0, e = inactive_.Size(); i < e; ++i) {
LiveInterval* inactive = inactive_.Get(i);
if (inactive->GetRegister() == reg) {
+ if (!current->IsSplit() && !inactive->IsFixed()) {
+ // Neither current nor inactive are fixed.
+ // Thanks to SSA, a non-split interval starting in a hole of an
+ // inactive interval should never intersect with that inactive interval.
+ // Only if it's not fixed though, because fixed intervals don't come from SSA.
+ DCHECK_EQ(inactive->FirstIntersectionWith(current), kNoLifetime);
+ continue;
+ }
size_t next_intersection = inactive->FirstIntersectionWith(current);
if (next_intersection != kNoLifetime) {
if (inactive->IsFixed()) {
LiveInterval* split = Split(current, next_intersection);
AddSorted(unhandled_, split);
} else {
- LiveInterval* split = Split(inactive, current->GetStart());
+ LiveInterval* split = Split(inactive, next_intersection);
inactive_.DeleteAt(i);
+ --i;
+ --e;
handled_.Add(inactive);
AddSorted(unhandled_, split);
- --i;
}
}
}
@@ -812,7 +878,7 @@ void RegisterAllocator::InsertParallelMoveAt(size_t position,
HInstruction* at = liveness_.GetInstructionFromPosition(position / 2);
if (at == nullptr) {
- // Block boundary, don't no anything the connection of split siblings will handle it.
+ // Block boundary, don't do anything the connection of split siblings will handle it.
return;
}
HParallelMove* move;
@@ -973,7 +1039,14 @@ void RegisterAllocator::ConnectSiblings(LiveInterval* interval) {
HInstruction* safepoint = safepoints_.Get(i);
size_t position = safepoint->GetLifetimePosition();
LocationSummary* locations = safepoint->GetLocations();
- if (!current->Covers(position)) continue;
+ if (!current->Covers(position)) {
+ continue;
+ }
+ if (interval->GetStart() == position) {
+ // The safepoint is for this instruction, so the location of the instruction
+ // does not need to be saved.
+ continue;
+ }
if ((current->GetType() == Primitive::kPrimNot) && current->GetParent()->HasSpillSlot()) {
locations->SetStackBit(current->GetParent()->GetSpillSlot() / kVRegSize);
@@ -1016,7 +1089,7 @@ void RegisterAllocator::ConnectSplitSiblings(LiveInterval* interval,
}
size_t from_position = from->GetLifetimeEnd() - 1;
- // When an instructions dies at entry of another, and the latter is the beginning
+ // When an instruction dies at entry of another, and the latter is the beginning
// of a block, the register allocator ensures the former has a register
// at block->GetLifetimeStart() + 1. Since this is at a block boundary, it must
// must be handled in this method.
@@ -1129,8 +1202,8 @@ void RegisterAllocator::Resolve() {
// Resolve phi inputs. Order does not matter.
for (HLinearOrderIterator it(liveness_); !it.Done(); it.Advance()) {
HBasicBlock* current = it.Current();
- for (HInstructionIterator it(current->GetPhis()); !it.Done(); it.Advance()) {
- HInstruction* phi = it.Current();
+ for (HInstructionIterator inst_it(current->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ HInstruction* phi = inst_it.Current();
for (size_t i = 0, e = current->GetPredecessors().Size(); i < e; ++i) {
HBasicBlock* predecessor = current->GetPredecessors().Get(i);
DCHECK_EQ(predecessor->GetSuccessors().Size(), 1u);
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index b88153969b..976ee39ca8 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -126,6 +126,7 @@ class RegisterAllocator {
void ProcessInstruction(HInstruction* instruction);
bool ValidateInternal(bool log_fatal_on_failure) const;
void DumpInterval(std::ostream& stream, LiveInterval* interval) const;
+ void DumpAllIntervals(std::ostream& stream) const;
ArenaAllocator* const allocator_;
CodeGenerator* const codegen_;
diff --git a/compiler/optimizing/register_allocator_test.cc b/compiler/optimizing/register_allocator_test.cc
index 2d84a9d335..6845deacb9 100644
--- a/compiler/optimizing/register_allocator_test.cc
+++ b/compiler/optimizing/register_allocator_test.cc
@@ -414,21 +414,24 @@ TEST(RegisterAllocatorTest, FreeUntil) {
// Add an artifical range to cover the temps that will be put in the unhandled list.
LiveInterval* unhandled = graph->GetEntryBlock()->GetFirstInstruction()->GetLiveInterval();
unhandled->AddLoopRange(0, 60);
+ // For SSA value intervals, only an interval resulted from a split may intersect
+ // with inactive intervals.
+ unhandled = register_allocator.Split(unhandled, 5);
// Add three temps holding the same register, and starting at different positions.
// Put the one that should be picked in the middle of the inactive list to ensure
// we do not depend on an order.
- LiveInterval* interval = LiveInterval::MakeTempInterval(&allocator, Primitive::kPrimInt);
+ LiveInterval* interval = LiveInterval::MakeInterval(&allocator, Primitive::kPrimInt);
interval->SetRegister(0);
interval->AddRange(40, 50);
register_allocator.inactive_.Add(interval);
- interval = LiveInterval::MakeTempInterval(&allocator, Primitive::kPrimInt);
+ interval = LiveInterval::MakeInterval(&allocator, Primitive::kPrimInt);
interval->SetRegister(0);
interval->AddRange(20, 30);
register_allocator.inactive_.Add(interval);
- interval = LiveInterval::MakeTempInterval(&allocator, Primitive::kPrimInt);
+ interval = LiveInterval::MakeInterval(&allocator, Primitive::kPrimInt);
interval->SetRegister(0);
interval->AddRange(60, 70);
register_allocator.inactive_.Add(interval);
@@ -438,7 +441,7 @@ TEST(RegisterAllocatorTest, FreeUntil) {
register_allocator.processing_core_registers_ = true;
register_allocator.unhandled_ = &register_allocator.unhandled_core_intervals_;
- register_allocator.TryAllocateFreeReg(unhandled);
+ ASSERT_TRUE(register_allocator.TryAllocateFreeReg(unhandled));
// Check that we have split the interval.
ASSERT_EQ(1u, register_allocator.unhandled_->Size());
diff --git a/compiler/optimizing/ssa_builder.cc b/compiler/optimizing/ssa_builder.cc
index a0cc8a94ee..e83c528fab 100644
--- a/compiler/optimizing/ssa_builder.cc
+++ b/compiler/optimizing/ssa_builder.cc
@@ -109,8 +109,8 @@ void SsaBuilder::VisitBasicBlock(HBasicBlock* block) {
HPhi* phi = new (GetGraph()->GetArena()) HPhi(
GetGraph()->GetArena(), local, block->GetPredecessors().Size(), Primitive::kPrimVoid);
for (size_t i = 0; i < block->GetPredecessors().Size(); i++) {
- HInstruction* value = ValueOfLocal(block->GetPredecessors().Get(i), local);
- phi->SetRawInputAt(i, value);
+ HInstruction* pred_value = ValueOfLocal(block->GetPredecessors().Get(i), local);
+ phi->SetRawInputAt(i, pred_value);
}
block->AddPhi(phi);
value = phi;
diff --git a/compiler/optimizing/ssa_liveness_analysis.cc b/compiler/optimizing/ssa_liveness_analysis.cc
index 1e34670d76..0085b27c58 100644
--- a/compiler/optimizing/ssa_liveness_analysis.cc
+++ b/compiler/optimizing/ssa_liveness_analysis.cc
@@ -107,15 +107,15 @@ void SsaLivenessAnalysis::NumberInstructions() {
HBasicBlock* block = it.Current();
block->SetLifetimeStart(lifetime_position);
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- HInstruction* current = it.Current();
+ for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ HInstruction* current = inst_it.Current();
current->Accept(location_builder);
LocationSummary* locations = current->GetLocations();
if (locations != nullptr && locations->Out().IsValid()) {
instructions_from_ssa_index_.Add(current);
current->SetSsaIndex(ssa_index++);
current->SetLiveInterval(
- new (graph_.GetArena()) LiveInterval(graph_.GetArena(), current->GetType(), current));
+ LiveInterval::MakeInterval(graph_.GetArena(), current->GetType(), current));
}
current->SetLifetimePosition(lifetime_position);
}
@@ -124,15 +124,16 @@ void SsaLivenessAnalysis::NumberInstructions() {
// Add a null marker to notify we are starting a block.
instructions_from_lifetime_position_.Add(nullptr);
- for (HInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* current = it.Current();
+ for (HInstructionIterator inst_it(block->GetInstructions()); !inst_it.Done();
+ inst_it.Advance()) {
+ HInstruction* current = inst_it.Current();
current->Accept(codegen_->GetLocationBuilder());
LocationSummary* locations = current->GetLocations();
if (locations != nullptr && locations->Out().IsValid()) {
instructions_from_ssa_index_.Add(current);
current->SetSsaIndex(ssa_index++);
current->SetLiveInterval(
- new (graph_.GetArena()) LiveInterval(graph_.GetArena(), current->GetType(), current));
+ LiveInterval::MakeInterval(graph_.GetArena(), current->GetType(), current));
}
instructions_from_lifetime_position_.Add(current);
current->SetLifetimePosition(lifetime_position);
@@ -178,8 +179,8 @@ void SsaLivenessAnalysis::ComputeLiveRanges() {
HBasicBlock* successor = block->GetSuccessors().Get(i);
live_in->Union(GetLiveInSet(*successor));
size_t phi_input_index = successor->GetPredecessorIndexOf(block);
- for (HInstructionIterator it(successor->GetPhis()); !it.Done(); it.Advance()) {
- HInstruction* phi = it.Current();
+ for (HInstructionIterator inst_it(successor->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ HInstruction* phi = inst_it.Current();
HInstruction* input = phi->InputAt(phi_input_index);
input->GetLiveInterval()->AddPhiUse(phi, phi_input_index, block);
// A phi input whose last user is the phi dies at the end of the predecessor block,
@@ -195,8 +196,9 @@ void SsaLivenessAnalysis::ComputeLiveRanges() {
current->GetLiveInterval()->AddRange(block->GetLifetimeStart(), block->GetLifetimeEnd());
}
- for (HBackwardInstructionIterator it(block->GetInstructions()); !it.Done(); it.Advance()) {
- HInstruction* current = it.Current();
+ for (HBackwardInstructionIterator back_it(block->GetInstructions()); !back_it.Done();
+ back_it.Advance()) {
+ HInstruction* current = back_it.Current();
if (current->HasSsaIndex()) {
// Kill the instruction and shorten its interval.
kill->SetBit(current->GetSsaIndex());
@@ -230,8 +232,8 @@ void SsaLivenessAnalysis::ComputeLiveRanges() {
}
// Kill phis defined in this block.
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- HInstruction* current = it.Current();
+ for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ HInstruction* current = inst_it.Current();
if (current->HasSsaIndex()) {
kill->SetBit(current->GetSsaIndex());
live_in->ClearBit(current->GetSsaIndex());
diff --git a/compiler/optimizing/ssa_liveness_analysis.h b/compiler/optimizing/ssa_liveness_analysis.h
index 7dda4f61d5..ca08d5b3e6 100644
--- a/compiler/optimizing/ssa_liveness_analysis.h
+++ b/compiler/optimizing/ssa_liveness_analysis.h
@@ -25,7 +25,7 @@ class CodeGenerator;
static constexpr int kNoRegister = -1;
-class BlockInfo : public ArenaObject {
+class BlockInfo : public ArenaObject<kArenaAllocMisc> {
public:
BlockInfo(ArenaAllocator* allocator, const HBasicBlock& block, size_t number_of_ssa_values)
: block_(block),
@@ -53,7 +53,7 @@ class BlockInfo : public ArenaObject {
* A live range contains the start and end of a range where an instruction or a temporary
* is live.
*/
-class LiveRange : public ArenaObject {
+class LiveRange FINAL : public ArenaObject<kArenaAllocMisc> {
public:
LiveRange(size_t start, size_t end, LiveRange* next) : start_(start), end_(end), next_(next) {
DCHECK_LT(start, end);
@@ -64,16 +64,16 @@ class LiveRange : public ArenaObject {
size_t GetEnd() const { return end_; }
LiveRange* GetNext() const { return next_; }
- bool IntersectsWith(const LiveRange& other) {
+ bool IntersectsWith(const LiveRange& other) const {
return (start_ >= other.start_ && start_ < other.end_)
|| (other.start_ >= start_ && other.start_ < end_);
}
- bool IsBefore(const LiveRange& other) {
+ bool IsBefore(const LiveRange& other) const {
return end_ <= other.start_;
}
- void Dump(std::ostream& stream) {
+ void Dump(std::ostream& stream) const {
stream << "[" << start_ << ", " << end_ << ")";
}
@@ -90,7 +90,7 @@ class LiveRange : public ArenaObject {
/**
* A use position represents a live interval use at a given position.
*/
-class UsePosition : public ArenaObject {
+class UsePosition : public ArenaObject<kArenaAllocMisc> {
public:
UsePosition(HInstruction* user,
size_t input_index,
@@ -137,28 +137,13 @@ class UsePosition : public ArenaObject {
* An interval is a list of disjoint live ranges where an instruction is live.
* Each instruction that has uses gets an interval.
*/
-class LiveInterval : public ArenaObject {
+class LiveInterval : public ArenaObject<kArenaAllocMisc> {
public:
- LiveInterval(ArenaAllocator* allocator,
- Primitive::Type type,
- HInstruction* defined_by = nullptr,
- bool is_fixed = false,
- int reg = kNoRegister,
- bool is_temp = false,
- bool is_slow_path_safepoint = false)
- : allocator_(allocator),
- first_range_(nullptr),
- last_range_(nullptr),
- first_use_(nullptr),
- type_(type),
- next_sibling_(nullptr),
- parent_(this),
- register_(reg),
- spill_slot_(kNoSpillSlot),
- is_fixed_(is_fixed),
- is_temp_(is_temp),
- is_slow_path_safepoint_(is_slow_path_safepoint),
- defined_by_(defined_by) {}
+ static LiveInterval* MakeInterval(ArenaAllocator* allocator,
+ Primitive::Type type,
+ HInstruction* instruction = nullptr) {
+ return new (allocator) LiveInterval(allocator, type, instruction);
+ }
static LiveInterval* MakeSlowPathInterval(ArenaAllocator* allocator, HInstruction* instruction) {
return new (allocator) LiveInterval(
@@ -174,7 +159,10 @@ class LiveInterval : public ArenaObject {
}
bool IsFixed() const { return is_fixed_; }
+ bool IsTemp() const { return is_temp_; }
bool IsSlowPathSafepoint() const { return is_slow_path_safepoint_; }
+ // This interval is the result of a split.
+ bool IsSplit() const { return parent_ != this; }
void AddUse(HInstruction* instruction, size_t input_index, bool is_environment) {
// Set the use within the instruction.
@@ -489,6 +477,7 @@ class LiveInterval : public ArenaObject {
} while ((use = use->GetNext()) != nullptr);
}
stream << "}";
+ stream << " is_fixed: " << is_fixed_ << ", is_split: " << IsSplit();
}
LiveInterval* GetNextSibling() const { return next_sibling_; }
@@ -520,12 +509,31 @@ class LiveInterval : public ArenaObject {
// Finds the interval that covers `position`.
const LiveInterval& GetIntervalAt(size_t position) const;
- bool IsTemp() const { return is_temp_; }
-
// Returns whether `other` and `this` share the same kind of register.
bool SameRegisterKind(Location other) const;
private:
+ LiveInterval(ArenaAllocator* allocator,
+ Primitive::Type type,
+ HInstruction* defined_by = nullptr,
+ bool is_fixed = false,
+ int reg = kNoRegister,
+ bool is_temp = false,
+ bool is_slow_path_safepoint = false)
+ : allocator_(allocator),
+ first_range_(nullptr),
+ last_range_(nullptr),
+ first_use_(nullptr),
+ type_(type),
+ next_sibling_(nullptr),
+ parent_(this),
+ register_(reg),
+ spill_slot_(kNoSpillSlot),
+ is_fixed_(is_fixed),
+ is_temp_(is_temp),
+ is_slow_path_safepoint_(is_slow_path_safepoint),
+ defined_by_(defined_by) {}
+
ArenaAllocator* const allocator_;
// Ranges of this interval. We need a quick access to the last range to test
diff --git a/compiler/optimizing/ssa_phi_elimination.cc b/compiler/optimizing/ssa_phi_elimination.cc
index 4eda0f3757..56979e1c6a 100644
--- a/compiler/optimizing/ssa_phi_elimination.cc
+++ b/compiler/optimizing/ssa_phi_elimination.cc
@@ -22,10 +22,10 @@ void SsaDeadPhiElimination::Run() {
// Add to the worklist phis referenced by non-phi instructions.
for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- HPhi* phi = it.Current()->AsPhi();
- for (HUseIterator<HInstruction> it(phi->GetUses()); !it.Done(); it.Advance()) {
- HUseListNode<HInstruction>* current = it.Current();
+ for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ HPhi* phi = inst_it.Current()->AsPhi();
+ for (HUseIterator<HInstruction> use_it(phi->GetUses()); !use_it.Done(); use_it.Advance()) {
+ HUseListNode<HInstruction>* current = use_it.Current();
HInstruction* user = current->GetUser();
if (!user->IsPhi()) {
worklist_.Add(phi);
@@ -61,8 +61,9 @@ void SsaDeadPhiElimination::Run() {
next = current->GetNext();
if (current->AsPhi()->IsDead()) {
if (current->HasUses()) {
- for (HUseIterator<HInstruction> it(current->GetUses()); !it.Done(); it.Advance()) {
- HUseListNode<HInstruction>* user_node = it.Current();
+ for (HUseIterator<HInstruction> use_it(current->GetUses()); !use_it.Done();
+ use_it.Advance()) {
+ HUseListNode<HInstruction>* user_node = use_it.Current();
HInstruction* user = user_node->GetUser();
DCHECK(user->IsLoopHeaderPhi());
DCHECK(user->AsPhi()->IsDead());
@@ -72,8 +73,9 @@ void SsaDeadPhiElimination::Run() {
}
}
if (current->HasEnvironmentUses()) {
- for (HUseIterator<HEnvironment> it(current->GetEnvUses()); !it.Done(); it.Advance()) {
- HUseListNode<HEnvironment>* user_node = it.Current();
+ for (HUseIterator<HEnvironment> use_it(current->GetEnvUses()); !use_it.Done();
+ use_it.Advance()) {
+ HUseListNode<HEnvironment>* user_node = use_it.Current();
HEnvironment* user = user_node->GetUser();
user->SetRawEnvAt(user_node->GetIndex(), nullptr);
current->RemoveEnvironmentUser(user, user_node->GetIndex());
@@ -90,8 +92,8 @@ void SsaRedundantPhiElimination::Run() {
// Add all phis in the worklist.
for (HReversePostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
HBasicBlock* block = it.Current();
- for (HInstructionIterator it(block->GetPhis()); !it.Done(); it.Advance()) {
- worklist_.Add(it.Current()->AsPhi());
+ for (HInstructionIterator inst_it(block->GetPhis()); !inst_it.Done(); inst_it.Advance()) {
+ worklist_.Add(inst_it.Current()->AsPhi());
}
}
diff --git a/compiler/optimizing/stack_map_stream.h b/compiler/optimizing/stack_map_stream.h
index 5f74c33643..9cfa71c13f 100644
--- a/compiler/optimizing/stack_map_stream.h
+++ b/compiler/optimizing/stack_map_stream.h
@@ -167,33 +167,33 @@ class StackMapStream : public ValueObject {
}
// Set the register map.
- MemoryRegion region = dex_register_maps_region.Subregion(
+ MemoryRegion register_region = dex_register_maps_region.Subregion(
next_dex_register_map_offset,
DexRegisterMap::kFixedSize + entry.num_dex_registers * DexRegisterMap::SingleEntrySize());
- next_dex_register_map_offset += region.size();
- DexRegisterMap dex_register_map(region);
- stack_map.SetDexRegisterMapOffset(region.start() - memory_start);
+ next_dex_register_map_offset += register_region.size();
+ DexRegisterMap dex_register_map(register_region);
+ stack_map.SetDexRegisterMapOffset(register_region.start() - memory_start);
- for (size_t i = 0; i < entry.num_dex_registers; ++i) {
+ for (size_t j = 0; j < entry.num_dex_registers; ++j) {
DexRegisterEntry register_entry =
- dex_register_maps_.Get(i + entry.dex_register_maps_start_index);
- dex_register_map.SetRegisterInfo(i, register_entry.kind, register_entry.value);
+ dex_register_maps_.Get(j + entry.dex_register_maps_start_index);
+ dex_register_map.SetRegisterInfo(j, register_entry.kind, register_entry.value);
}
// Set the inlining info.
if (entry.inlining_depth != 0) {
- MemoryRegion region = inline_infos_region.Subregion(
+ MemoryRegion inline_region = inline_infos_region.Subregion(
next_inline_info_offset,
InlineInfo::kFixedSize + entry.inlining_depth * InlineInfo::SingleEntrySize());
- next_inline_info_offset += region.size();
- InlineInfo inline_info(region);
+ next_inline_info_offset += inline_region.size();
+ InlineInfo inline_info(inline_region);
- stack_map.SetInlineDescriptorOffset(region.start() - memory_start);
+ stack_map.SetInlineDescriptorOffset(inline_region.start() - memory_start);
inline_info.SetDepth(entry.inlining_depth);
- for (size_t i = 0; i < entry.inlining_depth; ++i) {
- InlineInfoEntry inline_entry = inline_infos_.Get(i + entry.inline_infos_start_index);
- inline_info.SetMethodReferenceIndexAtDepth(i, inline_entry.method_index);
+ for (size_t j = 0; j < entry.inlining_depth; ++j) {
+ InlineInfoEntry inline_entry = inline_infos_.Get(j + entry.inline_infos_start_index);
+ inline_info.SetMethodReferenceIndexAtDepth(j, inline_entry.method_index);
}
} else {
stack_map.SetInlineDescriptorOffset(InlineInfo::kNoInlineInfo);