Add a HCurrentMethod node.
This enables register allocation for the current method, so
that users of it don't always load it from the stack.
Currently only used by HLoadClass. Will make follow-up
CLs for the other users.
Change-Id: If73324d85643102faba47fabbbd2755eb258c59c
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index 49a0444..c142e4d 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -712,7 +712,11 @@
} else {
clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit;
HLoadClass* load_class = new (arena_) HLoadClass(
- storage_index, *dex_compilation_unit_->GetDexFile(), is_referrer_class, dex_pc);
+ graph_->GetCurrentMethod(),
+ storage_index,
+ *dex_compilation_unit_->GetDexFile(),
+ is_referrer_class,
+ dex_pc);
current_block_->AddInstruction(load_class);
clinit_check = new (arena_) HClinitCheck(load_class, dex_pc);
current_block_->AddInstruction(clinit_check);
@@ -915,8 +919,11 @@
*outer_compilation_unit_->GetDexFile(), storage_index);
bool is_initialized = resolved_field->GetDeclaringClass()->IsInitialized() && is_in_dex_cache;
- HLoadClass* constant = new (arena_) HLoadClass(
- storage_index, *dex_compilation_unit_->GetDexFile(), is_referrer_class, dex_pc);
+ HLoadClass* constant = new (arena_) HLoadClass(graph_->GetCurrentMethod(),
+ storage_index,
+ *dex_compilation_unit_->GetDexFile(),
+ is_referrer_class,
+ dex_pc);
current_block_->AddInstruction(constant);
HInstruction* cls = constant;
@@ -1152,6 +1159,7 @@
}
HInstruction* object = LoadLocal(reference, Primitive::kPrimNot);
HLoadClass* cls = new (arena_) HLoadClass(
+ graph_->GetCurrentMethod(),
type_index,
*dex_compilation_unit_->GetDexFile(),
IsOutermostCompilingClass(type_index),
@@ -2167,6 +2175,7 @@
return false;
}
current_block_->AddInstruction(new (arena_) HLoadClass(
+ graph_->GetCurrentMethod(),
type_index,
*dex_compilation_unit_->GetDexFile(),
IsOutermostCompilingClass(type_index),
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 7d26a3c..1cca522 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -41,6 +41,7 @@
}
static constexpr int kCurrentMethodStackOffset = 0;
+static constexpr Register kMethodRegisterArgument = R0;
// We unconditionally allocate R5 to ensure we can do long operations
// with baseline.
@@ -544,7 +545,7 @@
uint32_t push_mask = (core_spill_mask_ & (~(1 << PC))) | 1 << LR;
__ PushList(push_mask);
__ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(push_mask));
- __ cfi().RelOffsetForMany(DWARFReg(R0), 0, push_mask, kArmWordSize);
+ __ cfi().RelOffsetForMany(DWARFReg(kMethodRegisterArgument), 0, push_mask, kArmWordSize);
if (fpu_spill_mask_ != 0) {
SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
__ vpushs(start_register, POPCOUNT(fpu_spill_mask_));
@@ -554,7 +555,7 @@
int adjust = GetFrameSize() - FrameEntrySpillSize();
__ AddConstant(SP, -adjust);
__ cfi().AdjustCFAOffset(adjust);
- __ StoreToOffset(kStoreWord, R0, SP, 0);
+ __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, 0);
}
void CodeGeneratorARM::GenerateFrameExit() {
@@ -803,11 +804,11 @@
void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstruction* move_for) {
LocationSummary* locations = instruction->GetLocations();
- if (locations != nullptr && locations->Out().Equals(location)) {
+ if (instruction->IsCurrentMethod()) {
+ Move32(location, Location::StackSlot(kCurrentMethodStackOffset));
+ } else if (locations != nullptr && locations->Out().Equals(location)) {
return;
- }
-
- if (locations != nullptr && locations->Out().IsConstant()) {
+ } else if (locations != nullptr && locations->Out().IsConstant()) {
HConstant* const_to_move = locations->Out().GetConstant();
if (const_to_move->IsIntConstant() || const_to_move->IsNullConstant()) {
int32_t value = GetInt32ValueOf(const_to_move);
@@ -1286,7 +1287,7 @@
void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
- locations->AddTemp(Location::RegisterLocation(R0));
+ locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
InvokeDexCallingConventionVisitorARM calling_convention_visitor;
for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
@@ -2802,9 +2803,19 @@
locations->SetOut(location);
}
-void InstructionCodeGeneratorARM::VisitParameterValue(HParameterValue* instruction) {
+void InstructionCodeGeneratorARM::VisitParameterValue(
+ HParameterValue* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, the parameter is already at its location.
- UNUSED(instruction);
+}
+
+void LocationsBuilderARM::VisitCurrentMethod(HCurrentMethod* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
+}
+
+void InstructionCodeGeneratorARM::VisitCurrentMethod(HCurrentMethod* instruction ATTRIBUTE_UNUSED) {
+ // Nothing to do, the method is already at its location.
}
void LocationsBuilderARM::VisitNot(HNot* not_) {
@@ -3954,21 +3965,25 @@
: LocationSummary::kNoCall;
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
- Register out = cls->GetLocations()->Out().AsRegister<Register>();
+ LocationSummary* locations = cls->GetLocations();
+ Register out = locations->Out().AsRegister<Register>();
+ Register current_method = locations->InAt(0).AsRegister<Register>();
if (cls->IsReferrersClass()) {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
- codegen_->LoadCurrentMethod(out);
- __ LoadFromOffset(kLoadWord, out, out, mirror::ArtMethod::DeclaringClassOffset().Int32Value());
+ __ LoadFromOffset(
+ kLoadWord, out, current_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value());
} else {
DCHECK(cls->CanCallRuntime());
- codegen_->LoadCurrentMethod(out);
- __ LoadFromOffset(
- kLoadWord, out, out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value());
+ __ LoadFromOffset(kLoadWord,
+ out,
+ current_method,
+ mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value());
__ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index ced60cd..04c38f6 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -634,16 +634,16 @@
Location location,
HInstruction* move_for) {
LocationSummary* locations = instruction->GetLocations();
- if (locations != nullptr && locations->Out().Equals(location)) {
- return;
- }
-
Primitive::Type type = instruction->GetType();
DCHECK_NE(type, Primitive::kPrimVoid);
- if (instruction->IsIntConstant()
- || instruction->IsLongConstant()
- || instruction->IsNullConstant()) {
+ if (instruction->IsCurrentMethod()) {
+ MoveLocation(location, Location::StackSlot(kCurrentMethodStackOffset));
+ } else if (locations != nullptr && locations->Out().Equals(location)) {
+ return;
+ } else if (instruction->IsIntConstant()
+ || instruction->IsLongConstant()
+ || instruction->IsNullConstant()) {
int64_t value = GetInt64ValueOf(instruction->AsConstant());
if (location.IsRegister()) {
Register dst = RegisterFrom(location, type);
@@ -2345,20 +2345,20 @@
LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) {
Register out = OutputRegister(cls);
+ Register current_method = InputRegisterAt(cls, 0);
if (cls->IsReferrersClass()) {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
- codegen_->LoadCurrentMethod(out);
- __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset()));
+ __ Ldr(out, HeapOperand(current_method, mirror::ArtMethod::DeclaringClassOffset()));
} else {
DCHECK(cls->CanCallRuntime());
- codegen_->LoadCurrentMethod(out);
- __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DexCacheResolvedTypesOffset()));
+ __ Ldr(out, HeapOperand(current_method, mirror::ArtMethod::DexCacheResolvedTypesOffset()));
__ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
@@ -2674,9 +2674,20 @@
locations->SetOut(location);
}
-void InstructionCodeGeneratorARM64::VisitParameterValue(HParameterValue* instruction) {
+void InstructionCodeGeneratorARM64::VisitParameterValue(
+ HParameterValue* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, the parameter is already at its location.
- UNUSED(instruction);
+}
+
+void LocationsBuilderARM64::VisitCurrentMethod(HCurrentMethod* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetOut(LocationFrom(x0));
+}
+
+void InstructionCodeGeneratorARM64::VisitCurrentMethod(
+ HCurrentMethod* instruction ATTRIBUTE_UNUSED) {
+ // Nothing to do, the method is already at its location.
}
void LocationsBuilderARM64::VisitPhi(HPhi* instruction) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index a6f01da..3c5efa4 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -36,6 +36,7 @@
namespace x86 {
static constexpr int kCurrentMethodStackOffset = 0;
+static constexpr Register kMethodRegisterArgument = EAX;
static constexpr Register kCoreCalleeSaves[] = { EBP, ESI, EDI };
@@ -498,7 +499,7 @@
int adjust = GetFrameSize() - FrameEntrySpillSize();
__ subl(ESP, Immediate(adjust));
__ cfi().AdjustCFAOffset(adjust);
- __ movl(Address(ESP, kCurrentMethodStackOffset), EAX);
+ __ movl(Address(ESP, kCurrentMethodStackOffset), kMethodRegisterArgument);
}
void CodeGeneratorX86::GenerateFrameExit() {
@@ -717,11 +718,11 @@
void CodeGeneratorX86::Move(HInstruction* instruction, Location location, HInstruction* move_for) {
LocationSummary* locations = instruction->GetLocations();
- if (locations != nullptr && locations->Out().Equals(location)) {
+ if (instruction->IsCurrentMethod()) {
+ Move32(location, Location::StackSlot(kCurrentMethodStackOffset));
+ } else if (locations != nullptr && locations->Out().Equals(location)) {
return;
- }
-
- if (locations != nullptr && locations->Out().IsConstant()) {
+ } else if (locations != nullptr && locations->Out().IsConstant()) {
HConstant* const_to_move = locations->Out().GetConstant();
if (const_to_move->IsIntConstant() || const_to_move->IsNullConstant()) {
Immediate imm(GetInt32ValueOf(const_to_move));
@@ -1239,7 +1240,7 @@
void LocationsBuilderX86::HandleInvoke(HInvoke* invoke) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
- locations->AddTemp(Location::RegisterLocation(EAX));
+ locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
InvokeDexCallingConventionVisitorX86 calling_convention_visitor;
for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
@@ -3010,8 +3011,17 @@
locations->SetOut(location);
}
-void InstructionCodeGeneratorX86::VisitParameterValue(HParameterValue* instruction) {
- UNUSED(instruction);
+void InstructionCodeGeneratorX86::VisitParameterValue(
+ HParameterValue* instruction ATTRIBUTE_UNUSED) {
+}
+
+void LocationsBuilderX86::VisitCurrentMethod(HCurrentMethod* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
+}
+
+void InstructionCodeGeneratorX86::VisitCurrentMethod(HCurrentMethod* instruction ATTRIBUTE_UNUSED) {
}
void LocationsBuilderX86::VisitNot(HNot* not_) {
@@ -4279,20 +4289,22 @@
: LocationSummary::kNoCall;
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) {
- Register out = cls->GetLocations()->Out().AsRegister<Register>();
+ LocationSummary* locations = cls->GetLocations();
+ Register out = locations->Out().AsRegister<Register>();
+ Register current_method = locations->InAt(0).AsRegister<Register>();
if (cls->IsReferrersClass()) {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
- codegen_->LoadCurrentMethod(out);
- __ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
+ __ movl(out, Address(current_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
} else {
DCHECK(cls->CanCallRuntime());
- codegen_->LoadCurrentMethod(out);
- __ movl(out, Address(out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
+ __ movl(out, Address(
+ current_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
__ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86(
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index f49c26d..7b08c89 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -39,13 +39,13 @@
static constexpr Register TMP = R11;
static constexpr int kCurrentMethodStackOffset = 0;
+static constexpr Register kMethodRegisterArgument = RDI;
static constexpr Register kCoreCalleeSaves[] = { RBX, RBP, R12, R13, R14, R15 };
static constexpr FloatRegister kFpuCalleeSaves[] = { XMM12, XMM13, XMM14, XMM15 };
static constexpr int kC2ConditionMask = 0x400;
-
#define __ reinterpret_cast<X86_64Assembler*>(codegen->GetAssembler())->
class NullCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
@@ -545,7 +545,8 @@
}
}
- __ movl(Address(CpuRegister(RSP), kCurrentMethodStackOffset), CpuRegister(RDI));
+ __ movl(Address(CpuRegister(RSP), kCurrentMethodStackOffset),
+ CpuRegister(kMethodRegisterArgument));
}
void CodeGeneratorX86_64::GenerateFrameExit() {
@@ -689,11 +690,11 @@
Location location,
HInstruction* move_for) {
LocationSummary* locations = instruction->GetLocations();
- if (locations != nullptr && locations->Out().Equals(location)) {
+ if (instruction->IsCurrentMethod()) {
+ Move(location, Location::StackSlot(kCurrentMethodStackOffset));
+ } else if (locations != nullptr && locations->Out().Equals(location)) {
return;
- }
-
- if (locations != nullptr && locations->Out().IsConstant()) {
+ } else if (locations != nullptr && locations->Out().IsConstant()) {
HConstant* const_to_move = locations->Out().GetConstant();
if (const_to_move->IsIntConstant() || const_to_move->IsNullConstant()) {
Immediate imm(GetInt32ValueOf(const_to_move));
@@ -1339,7 +1340,7 @@
void LocationsBuilderX86_64::HandleInvoke(HInvoke* invoke) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
- locations->AddTemp(Location::RegisterLocation(RDI));
+ locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
InvokeDexCallingConventionVisitorX86_64 calling_convention_visitor;
for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
@@ -3066,9 +3067,20 @@
locations->SetOut(location);
}
-void InstructionCodeGeneratorX86_64::VisitParameterValue(HParameterValue* instruction) {
+void InstructionCodeGeneratorX86_64::VisitParameterValue(
+ HParameterValue* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, the parameter is already at its location.
- UNUSED(instruction);
+}
+
+void LocationsBuilderX86_64::VisitCurrentMethod(HCurrentMethod* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
+}
+
+void InstructionCodeGeneratorX86_64::VisitCurrentMethod(
+ HCurrentMethod* instruction ATTRIBUTE_UNUSED) {
+ // Nothing to do, the method is already at its location.
}
void LocationsBuilderX86_64::VisitNot(HNot* not_) {
@@ -4123,20 +4135,22 @@
: LocationSummary::kNoCall;
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) {
- CpuRegister out = cls->GetLocations()->Out().AsRegister<CpuRegister>();
+ LocationSummary* locations = cls->GetLocations();
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
+ CpuRegister current_method = locations->InAt(0).AsRegister<CpuRegister>();
if (cls->IsReferrersClass()) {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
- codegen_->LoadCurrentMethod(out);
- __ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
+ __ movl(out, Address(current_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
} else {
DCHECK(cls->CanCallRuntime());
- codegen_->LoadCurrentMethod(out);
- __ movl(out, Address(out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
+ __ movl(out, Address(
+ current_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
__ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 483c09e..80d4b4a 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -295,6 +295,19 @@
return cached_null_constant_;
}
+HCurrentMethod* HGraph::GetCurrentMethod() {
+ if (cached_current_method_ == nullptr) {
+ cached_current_method_ = new (arena_) HCurrentMethod();
+ if (entry_block_->GetFirstInstruction() == nullptr) {
+ entry_block_->AddInstruction(cached_current_method_);
+ } else {
+ entry_block_->InsertInstructionBefore(
+ cached_current_method_, entry_block_->GetFirstInstruction());
+ }
+ }
+ return cached_current_method_;
+}
+
HConstant* HGraph::GetConstant(Primitive::Type type, int64_t value) {
switch (type) {
case Primitive::Type::kPrimBoolean:
@@ -1461,6 +1474,8 @@
DCHECK(parameter_index != last_input_index);
}
current->ReplaceWith(invoke->InputAt(parameter_index++));
+ } else if (current->IsCurrentMethod()) {
+ current->ReplaceWith(outer_graph->GetCurrentMethod());
} else {
DCHECK(current->IsGoto() || current->IsSuspendCheck());
entry_block_->RemoveInstruction(current);
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index 01870c3..d542e59 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -35,6 +35,7 @@
class GraphChecker;
class HBasicBlock;
+class HCurrentMethod;
class HDoubleConstant;
class HEnvironment;
class HFloatConstant;
@@ -147,7 +148,8 @@
cached_int_constants_(std::less<int32_t>(), arena->Adapter()),
cached_float_constants_(std::less<int32_t>(), arena->Adapter()),
cached_long_constants_(std::less<int64_t>(), arena->Adapter()),
- cached_double_constants_(std::less<int64_t>(), arena->Adapter()) {}
+ cached_double_constants_(std::less<int64_t>(), arena->Adapter()),
+ cached_current_method_(nullptr) {}
ArenaAllocator* GetArena() const { return arena_; }
const GrowableArray<HBasicBlock*>& GetBlocks() const { return blocks_; }
@@ -278,6 +280,8 @@
return CreateConstant(bit_cast<int64_t, double>(value), &cached_double_constants_);
}
+ HCurrentMethod* GetCurrentMethod();
+
HBasicBlock* FindCommonDominator(HBasicBlock* first, HBasicBlock* second) const;
const DexFile& GetDexFile() const {
@@ -386,6 +390,8 @@
ArenaSafeMap<int64_t, HLongConstant*> cached_long_constants_;
ArenaSafeMap<int64_t, HDoubleConstant*> cached_double_constants_;
+ HCurrentMethod* cached_current_method_;
+
friend class SsaBuilder; // For caching constants.
friend class SsaLivenessAnalysis; // For the linear order.
ART_FRIEND_TEST(GraphTest, IfSuccessorSimpleJoinBlock1);
@@ -811,6 +817,7 @@
M(ClinitCheck, Instruction) \
M(Compare, BinaryOperation) \
M(Condition, BinaryOperation) \
+ M(CurrentMethod, Instruction) \
M(Deoptimize, Instruction) \
M(Div, BinaryOperation) \
M(DivZeroCheck, Instruction) \
@@ -1824,6 +1831,19 @@
DISALLOW_COPY_AND_ASSIGN(HDeoptimize);
};
+// Represents the ArtMethod that was passed as a first argument to
+// the method. It is used by instructions that depend on it, like
+// instructions that work with the dex cache.
+class HCurrentMethod : public HExpression<0> {
+ public:
+ HCurrentMethod() : HExpression(Primitive::kPrimNot, SideEffects::None()) {}
+
+ DECLARE_INSTRUCTION(CurrentMethod);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HCurrentMethod);
+};
+
class HUnaryOperation : public HExpression<1> {
public:
HUnaryOperation(Primitive::Type result_type, HInstruction* input)
@@ -3433,9 +3453,10 @@
/**
* Instruction to load a Class object.
*/
-class HLoadClass : public HExpression<0> {
+class HLoadClass : public HExpression<1> {
public:
- HLoadClass(uint16_t type_index,
+ HLoadClass(HCurrentMethod* current_method,
+ uint16_t type_index,
const DexFile& dex_file,
bool is_referrers_class,
uint32_t dex_pc)
@@ -3445,7 +3466,9 @@
is_referrers_class_(is_referrers_class),
dex_pc_(dex_pc),
generate_clinit_check_(false),
- loaded_class_rti_(ReferenceTypeInfo::CreateTop(/* is_exact */ false)) {}
+ loaded_class_rti_(ReferenceTypeInfo::CreateTop(/* is_exact */ false)) {
+ SetRawInputAt(0, current_method);
+ }
bool CanBeMoved() const OVERRIDE { return true; }
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 925099a..d4ff4d8 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -482,8 +482,9 @@
LiveInterval* current = it.CurrentInterval();
HInstruction* defined_by = current->GetParent()->GetDefinedBy();
if (current->GetParent()->HasSpillSlot()
- // Parameters have their own stack slot.
- && !(defined_by != nullptr && defined_by->IsParameterValue())) {
+ // Parameters and current method have their own stack slot.
+ && !(defined_by != nullptr && (defined_by->IsParameterValue()
+ || defined_by->IsCurrentMethod()))) {
BitVector* liveness_of_spill_slot = liveness_of_values.Get(number_of_registers
+ current->GetParent()->GetSpillSlot() / kVRegSize
- number_of_out_slots);
@@ -1246,6 +1247,11 @@
return;
}
+ if (defined_by->IsCurrentMethod()) {
+ parent->SetSpillSlot(0);
+ return;
+ }
+
if (defined_by->IsConstant()) {
// Constants don't need a spill slot.
return;
@@ -1519,7 +1525,10 @@
void RegisterAllocator::ConnectSiblings(LiveInterval* interval) {
LiveInterval* current = interval;
- if (current->HasSpillSlot() && current->HasRegister()) {
+ if (current->HasSpillSlot()
+ && current->HasRegister()
+ // Currently, we spill unconditionnally the current method in the code generators.
+ && !interval->GetDefinedBy()->IsCurrentMethod()) {
// We spill eagerly, so move must be at definition.
InsertMoveAfter(interval->GetDefinedBy(),
interval->ToLocation(),
@@ -1715,6 +1724,9 @@
} else if (current->HasSpillSlot()) {
current->SetSpillSlot(current->GetSpillSlot() + codegen_->GetFrameSize());
}
+ } else if (instruction->IsCurrentMethod()) {
+ // The current method is always at offset 0.
+ DCHECK(!current->HasSpillSlot() || (current->GetSpillSlot() == 0));
} else if (current->HasSpillSlot()) {
// Adjust the stack slot, now that we know the number of them for each type.
// The way this implementation lays out the stack is the following: