summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/builder.cc21
-rw-r--r--compiler/optimizing/code_generator_arm.cc81
-rw-r--r--compiler/optimizing/code_generator_arm64.cc44
-rw-r--r--compiler/optimizing/code_generator_x86.cc62
-rw-r--r--compiler/optimizing/code_generator_x86_64.cc68
-rw-r--r--compiler/optimizing/inliner.cc4
-rw-r--r--compiler/optimizing/intrinsics.cc1
-rw-r--r--compiler/optimizing/nodes.cc16
-rw-r--r--compiler/optimizing/nodes.h39
-rw-r--r--compiler/optimizing/optimizing_compiler.cc6
-rw-r--r--compiler/optimizing/parallel_move_resolver.h1
-rw-r--r--compiler/optimizing/register_allocator.cc18
-rw-r--r--compiler/optimizing/register_allocator.h1
13 files changed, 246 insertions, 116 deletions
diff --git a/compiler/optimizing/builder.cc b/compiler/optimizing/builder.cc
index edce9487f8..a17b578e4c 100644
--- a/compiler/optimizing/builder.cc
+++ b/compiler/optimizing/builder.cc
@@ -712,7 +712,11 @@ bool HGraphBuilder::BuildInvoke(const Instruction& instruction,
} else {
clinit_check_requirement = HInvokeStaticOrDirect::ClinitCheckRequirement::kExplicit;
HLoadClass* load_class = new (arena_) HLoadClass(
- storage_index, *dex_compilation_unit_->GetDexFile(), is_referrer_class, dex_pc);
+ graph_->GetCurrentMethod(),
+ storage_index,
+ *dex_compilation_unit_->GetDexFile(),
+ is_referrer_class,
+ dex_pc);
current_block_->AddInstruction(load_class);
clinit_check = new (arena_) HClinitCheck(load_class, dex_pc);
current_block_->AddInstruction(clinit_check);
@@ -919,8 +923,11 @@ bool HGraphBuilder::BuildStaticFieldAccess(const Instruction& instruction,
*outer_compilation_unit_->GetDexFile(), storage_index);
bool is_initialized = resolved_field->GetDeclaringClass()->IsInitialized() && is_in_dex_cache;
- HLoadClass* constant = new (arena_) HLoadClass(
- storage_index, *dex_compilation_unit_->GetDexFile(), is_referrer_class, dex_pc);
+ HLoadClass* constant = new (arena_) HLoadClass(graph_->GetCurrentMethod(),
+ storage_index,
+ *dex_compilation_unit_->GetDexFile(),
+ is_referrer_class,
+ dex_pc);
current_block_->AddInstruction(constant);
HInstruction* cls = constant;
@@ -1167,6 +1174,7 @@ bool HGraphBuilder::BuildTypeCheck(const Instruction& instruction,
}
HInstruction* object = LoadLocal(reference, Primitive::kPrimNot);
HLoadClass* cls = new (arena_) HLoadClass(
+ graph_->GetCurrentMethod(),
type_index,
*dex_compilation_unit_->GetDexFile(),
IsOutermostCompilingClass(type_index),
@@ -2154,13 +2162,15 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
}
case Instruction::CONST_STRING: {
- current_block_->AddInstruction(new (arena_) HLoadString(instruction.VRegB_21c(), dex_pc));
+ current_block_->AddInstruction(
+ new (arena_) HLoadString(graph_->GetCurrentMethod(), instruction.VRegB_21c(), dex_pc));
UpdateLocal(instruction.VRegA_21c(), current_block_->GetLastInstruction());
break;
}
case Instruction::CONST_STRING_JUMBO: {
- current_block_->AddInstruction(new (arena_) HLoadString(instruction.VRegB_31c(), dex_pc));
+ current_block_->AddInstruction(
+ new (arena_) HLoadString(graph_->GetCurrentMethod(), instruction.VRegB_31c(), dex_pc));
UpdateLocal(instruction.VRegA_31c(), current_block_->GetLastInstruction());
break;
}
@@ -2182,6 +2192,7 @@ bool HGraphBuilder::AnalyzeDexInstruction(const Instruction& instruction, uint32
return false;
}
current_block_->AddInstruction(new (arena_) HLoadClass(
+ graph_->GetCurrentMethod(),
type_index,
*dex_compilation_unit_->GetDexFile(),
IsOutermostCompilingClass(type_index),
diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc
index 09ed9c700e..bd1f134c32 100644
--- a/compiler/optimizing/code_generator_arm.cc
+++ b/compiler/optimizing/code_generator_arm.cc
@@ -41,6 +41,7 @@ static bool ExpectedPairLayout(Location location) {
}
static constexpr int kCurrentMethodStackOffset = 0;
+static constexpr Register kMethodRegisterArgument = R0;
// We unconditionally allocate R5 to ensure we can do long operations
// with baseline.
@@ -494,11 +495,6 @@ InstructionCodeGeneratorARM::InstructionCodeGeneratorARM(HGraph* graph, CodeGene
assembler_(codegen->GetAssembler()),
codegen_(codegen) {}
-static uint32_t LeastSignificantBit(uint32_t mask) {
- // ffs starts at 1.
- return ffs(mask) - 1;
-}
-
void CodeGeneratorARM::ComputeSpillMask() {
core_spill_mask_ = allocated_registers_.GetCoreRegisters() & core_callee_save_mask_;
// Save one extra register for baseline. Note that on thumb2, there is no easy
@@ -549,7 +545,7 @@ void CodeGeneratorARM::GenerateFrameEntry() {
uint32_t push_mask = (core_spill_mask_ & (~(1 << PC))) | 1 << LR;
__ PushList(push_mask);
__ cfi().AdjustCFAOffset(kArmWordSize * POPCOUNT(push_mask));
- __ cfi().RelOffsetForMany(DWARFReg(R0), 0, push_mask, kArmWordSize);
+ __ cfi().RelOffsetForMany(DWARFReg(kMethodRegisterArgument), 0, push_mask, kArmWordSize);
if (fpu_spill_mask_ != 0) {
SRegister start_register = SRegister(LeastSignificantBit(fpu_spill_mask_));
__ vpushs(start_register, POPCOUNT(fpu_spill_mask_));
@@ -559,7 +555,7 @@ void CodeGeneratorARM::GenerateFrameEntry() {
int adjust = GetFrameSize() - FrameEntrySpillSize();
__ AddConstant(SP, -adjust);
__ cfi().AdjustCFAOffset(adjust);
- __ StoreToOffset(kStoreWord, R0, SP, 0);
+ __ StoreToOffset(kStoreWord, kMethodRegisterArgument, SP, 0);
}
void CodeGeneratorARM::GenerateFrameExit() {
@@ -808,11 +804,11 @@ void CodeGeneratorARM::Move64(Location destination, Location source) {
void CodeGeneratorARM::Move(HInstruction* instruction, Location location, HInstruction* move_for) {
LocationSummary* locations = instruction->GetLocations();
- if (locations != nullptr && locations->Out().Equals(location)) {
+ if (instruction->IsCurrentMethod()) {
+ Move32(location, Location::StackSlot(kCurrentMethodStackOffset));
+ } else if (locations != nullptr && locations->Out().Equals(location)) {
return;
- }
-
- if (locations != nullptr && locations->Out().IsConstant()) {
+ } else if (locations != nullptr && locations->Out().IsConstant()) {
HConstant* const_to_move = locations->Out().GetConstant();
if (const_to_move->IsIntConstant() || const_to_move->IsNullConstant()) {
int32_t value = GetInt32ValueOf(const_to_move);
@@ -1033,19 +1029,19 @@ void InstructionCodeGeneratorARM::VisitDeoptimize(HDeoptimize* deoptimize) {
GenerateTestAndBranch(deoptimize, slow_path_entry, nullptr, slow_path_entry);
}
-void LocationsBuilderARM::VisitCondition(HCondition* comp) {
+void LocationsBuilderARM::VisitCondition(HCondition* cond) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(comp, LocationSummary::kNoCall);
+ new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
- locations->SetInAt(1, Location::RegisterOrConstant(comp->InputAt(1)));
- if (comp->NeedsMaterialization()) {
+ locations->SetInAt(1, Location::RegisterOrConstant(cond->InputAt(1)));
+ if (cond->NeedsMaterialization()) {
locations->SetOut(Location::RequiresRegister(), Location::kNoOutputOverlap);
}
}
-void InstructionCodeGeneratorARM::VisitCondition(HCondition* comp) {
- if (!comp->NeedsMaterialization()) return;
- LocationSummary* locations = comp->GetLocations();
+void InstructionCodeGeneratorARM::VisitCondition(HCondition* cond) {
+ if (!cond->NeedsMaterialization()) return;
+ LocationSummary* locations = cond->GetLocations();
Register left = locations->InAt(0).AsRegister<Register>();
if (locations->InAt(1).IsRegister()) {
@@ -1062,11 +1058,11 @@ void InstructionCodeGeneratorARM::VisitCondition(HCondition* comp) {
__ cmp(left, ShifterOperand(temp));
}
}
- __ it(ARMCondition(comp->GetCondition()), kItElse);
+ __ it(ARMCondition(cond->GetCondition()), kItElse);
__ mov(locations->Out().AsRegister<Register>(), ShifterOperand(1),
- ARMCondition(comp->GetCondition()));
+ ARMCondition(cond->GetCondition()));
__ mov(locations->Out().AsRegister<Register>(), ShifterOperand(0),
- ARMOppositeCondition(comp->GetCondition()));
+ ARMOppositeCondition(cond->GetCondition()));
}
void LocationsBuilderARM::VisitEqual(HEqual* comp) {
@@ -1291,7 +1287,7 @@ void InstructionCodeGeneratorARM::VisitInvokeStaticOrDirect(HInvokeStaticOrDirec
void LocationsBuilderARM::HandleInvoke(HInvoke* invoke) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
- locations->AddTemp(Location::RegisterLocation(R0));
+ locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
InvokeDexCallingConventionVisitorARM calling_convention_visitor;
for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
@@ -2222,7 +2218,7 @@ void InstructionCodeGeneratorARM::DivRemByPowerOfTwo(HBinaryOperation* instructi
Register dividend = locations->InAt(0).AsRegister<Register>();
Register temp = locations->GetTemp(0).AsRegister<Register>();
int32_t imm = second.GetConstant()->AsIntConstant()->GetValue();
- int32_t abs_imm = std::abs(imm);
+ uint32_t abs_imm = static_cast<uint32_t>(std::abs(imm));
DCHECK(IsPowerOfTwo(abs_imm));
int ctz_imm = CTZ(abs_imm);
@@ -2807,9 +2803,19 @@ void LocationsBuilderARM::VisitParameterValue(HParameterValue* instruction) {
locations->SetOut(location);
}
-void InstructionCodeGeneratorARM::VisitParameterValue(HParameterValue* instruction) {
+void InstructionCodeGeneratorARM::VisitParameterValue(
+ HParameterValue* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, the parameter is already at its location.
- UNUSED(instruction);
+}
+
+void LocationsBuilderARM::VisitCurrentMethod(HCurrentMethod* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
+}
+
+void InstructionCodeGeneratorARM::VisitCurrentMethod(HCurrentMethod* instruction ATTRIBUTE_UNUSED) {
+ // Nothing to do, the method is already at its location.
}
void LocationsBuilderARM::VisitNot(HNot* not_) {
@@ -3959,21 +3965,25 @@ void LocationsBuilderARM::VisitLoadClass(HLoadClass* cls) {
: LocationSummary::kNoCall;
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
void InstructionCodeGeneratorARM::VisitLoadClass(HLoadClass* cls) {
- Register out = cls->GetLocations()->Out().AsRegister<Register>();
+ LocationSummary* locations = cls->GetLocations();
+ Register out = locations->Out().AsRegister<Register>();
+ Register current_method = locations->InAt(0).AsRegister<Register>();
if (cls->IsReferrersClass()) {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
- codegen_->LoadCurrentMethod(out);
- __ LoadFromOffset(kLoadWord, out, out, mirror::ArtMethod::DeclaringClassOffset().Int32Value());
+ __ LoadFromOffset(
+ kLoadWord, out, current_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value());
} else {
DCHECK(cls->CanCallRuntime());
- codegen_->LoadCurrentMethod(out);
- __ LoadFromOffset(
- kLoadWord, out, out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value());
+ __ LoadFromOffset(kLoadWord,
+ out,
+ current_method,
+ mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value());
__ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex()));
SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM(
@@ -4021,6 +4031,7 @@ void InstructionCodeGeneratorARM::GenerateClassInitializationCheck(
void LocationsBuilderARM::VisitLoadString(HLoadString* load) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
@@ -4028,9 +4039,11 @@ void InstructionCodeGeneratorARM::VisitLoadString(HLoadString* load) {
SlowPathCodeARM* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathARM(load);
codegen_->AddSlowPath(slow_path);
- Register out = load->GetLocations()->Out().AsRegister<Register>();
- codegen_->LoadCurrentMethod(out);
- __ LoadFromOffset(kLoadWord, out, out, mirror::ArtMethod::DeclaringClassOffset().Int32Value());
+ LocationSummary* locations = load->GetLocations();
+ Register out = locations->Out().AsRegister<Register>();
+ Register current_method = locations->InAt(0).AsRegister<Register>();
+ __ LoadFromOffset(
+ kLoadWord, out, current_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value());
__ LoadFromOffset(kLoadWord, out, out, mirror::Class::DexCacheStringsOffset().Int32Value());
__ LoadFromOffset(kLoadWord, out, out, CodeGenerator::GetCacheOffset(load->GetStringIndex()));
__ cmp(out, ShifterOperand(0));
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index b6d99abca0..cf5a8fb605 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -634,16 +634,16 @@ void CodeGeneratorARM64::Move(HInstruction* instruction,
Location location,
HInstruction* move_for) {
LocationSummary* locations = instruction->GetLocations();
- if (locations != nullptr && locations->Out().Equals(location)) {
- return;
- }
-
Primitive::Type type = instruction->GetType();
DCHECK_NE(type, Primitive::kPrimVoid);
- if (instruction->IsIntConstant()
- || instruction->IsLongConstant()
- || instruction->IsNullConstant()) {
+ if (instruction->IsCurrentMethod()) {
+ MoveLocation(location, Location::StackSlot(kCurrentMethodStackOffset));
+ } else if (locations != nullptr && locations->Out().Equals(location)) {
+ return;
+ } else if (instruction->IsIntConstant()
+ || instruction->IsLongConstant()
+ || instruction->IsNullConstant()) {
int64_t value = GetInt64ValueOf(instruction->AsConstant());
if (location.IsRegister()) {
Register dst = RegisterFrom(location, type);
@@ -1738,7 +1738,7 @@ void InstructionCodeGeneratorARM64::DivRemByPowerOfTwo(HBinaryOperation* instruc
Register out = OutputRegister(instruction);
Register dividend = InputRegisterAt(instruction, 0);
int64_t imm = Int64FromConstant(second.GetConstant());
- int64_t abs_imm = std::abs(imm);
+ uint64_t abs_imm = static_cast<uint64_t>(std::abs(imm));
DCHECK(IsPowerOfTwo(abs_imm));
int ctz_imm = CTZ(abs_imm);
@@ -2345,20 +2345,20 @@ void LocationsBuilderARM64::VisitLoadClass(HLoadClass* cls) {
LocationSummary::CallKind call_kind = cls->CanCallRuntime() ? LocationSummary::kCallOnSlowPath
: LocationSummary::kNoCall;
LocationSummary* locations = new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
void InstructionCodeGeneratorARM64::VisitLoadClass(HLoadClass* cls) {
Register out = OutputRegister(cls);
+ Register current_method = InputRegisterAt(cls, 0);
if (cls->IsReferrersClass()) {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
- codegen_->LoadCurrentMethod(out);
- __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset()));
+ __ Ldr(out, HeapOperand(current_method, mirror::ArtMethod::DeclaringClassOffset()));
} else {
DCHECK(cls->CanCallRuntime());
- codegen_->LoadCurrentMethod(out);
- __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DexCacheResolvedTypesOffset()));
+ __ Ldr(out, HeapOperand(current_method, mirror::ArtMethod::DexCacheResolvedTypesOffset()));
__ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
SlowPathCodeARM64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathARM64(
@@ -2397,6 +2397,7 @@ void InstructionCodeGeneratorARM64::VisitLoadLocal(HLoadLocal* load) {
void LocationsBuilderARM64::VisitLoadString(HLoadString* load) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
@@ -2405,8 +2406,8 @@ void InstructionCodeGeneratorARM64::VisitLoadString(HLoadString* load) {
codegen_->AddSlowPath(slow_path);
Register out = OutputRegister(load);
- codegen_->LoadCurrentMethod(out);
- __ Ldr(out, HeapOperand(out, mirror::ArtMethod::DeclaringClassOffset()));
+ Register current_method = InputRegisterAt(load, 0);
+ __ Ldr(out, HeapOperand(current_method, mirror::ArtMethod::DeclaringClassOffset()));
__ Ldr(out, HeapOperand(out, mirror::Class::DexCacheStringsOffset()));
__ Ldr(out, HeapOperand(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
__ Cbz(out, slow_path->GetEntryLabel());
@@ -2674,9 +2675,20 @@ void LocationsBuilderARM64::VisitParameterValue(HParameterValue* instruction) {
locations->SetOut(location);
}
-void InstructionCodeGeneratorARM64::VisitParameterValue(HParameterValue* instruction) {
+void InstructionCodeGeneratorARM64::VisitParameterValue(
+ HParameterValue* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, the parameter is already at its location.
- UNUSED(instruction);
+}
+
+void LocationsBuilderARM64::VisitCurrentMethod(HCurrentMethod* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetOut(LocationFrom(x0));
+}
+
+void InstructionCodeGeneratorARM64::VisitCurrentMethod(
+ HCurrentMethod* instruction ATTRIBUTE_UNUSED) {
+ // Nothing to do, the method is already at its location.
}
void LocationsBuilderARM64::VisitPhi(HPhi* instruction) {
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index a6f01dad38..81c3526b35 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -36,6 +36,7 @@ namespace art {
namespace x86 {
static constexpr int kCurrentMethodStackOffset = 0;
+static constexpr Register kMethodRegisterArgument = EAX;
static constexpr Register kCoreCalleeSaves[] = { EBP, ESI, EDI };
@@ -498,7 +499,7 @@ void CodeGeneratorX86::GenerateFrameEntry() {
int adjust = GetFrameSize() - FrameEntrySpillSize();
__ subl(ESP, Immediate(adjust));
__ cfi().AdjustCFAOffset(adjust);
- __ movl(Address(ESP, kCurrentMethodStackOffset), EAX);
+ __ movl(Address(ESP, kCurrentMethodStackOffset), kMethodRegisterArgument);
}
void CodeGeneratorX86::GenerateFrameExit() {
@@ -717,11 +718,11 @@ void CodeGeneratorX86::Move64(Location destination, Location source) {
void CodeGeneratorX86::Move(HInstruction* instruction, Location location, HInstruction* move_for) {
LocationSummary* locations = instruction->GetLocations();
- if (locations != nullptr && locations->Out().Equals(location)) {
+ if (instruction->IsCurrentMethod()) {
+ Move32(location, Location::StackSlot(kCurrentMethodStackOffset));
+ } else if (locations != nullptr && locations->Out().Equals(location)) {
return;
- }
-
- if (locations != nullptr && locations->Out().IsConstant()) {
+ } else if (locations != nullptr && locations->Out().IsConstant()) {
HConstant* const_to_move = locations->Out().GetConstant();
if (const_to_move->IsIntConstant() || const_to_move->IsNullConstant()) {
Immediate imm(GetInt32ValueOf(const_to_move));
@@ -983,20 +984,20 @@ void InstructionCodeGeneratorX86::VisitStoreLocal(HStoreLocal* store) {
UNUSED(store);
}
-void LocationsBuilderX86::VisitCondition(HCondition* comp) {
+void LocationsBuilderX86::VisitCondition(HCondition* cond) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(comp, LocationSummary::kNoCall);
+ new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::Any());
- if (comp->NeedsMaterialization()) {
+ if (cond->NeedsMaterialization()) {
// We need a byte register.
locations->SetOut(Location::RegisterLocation(ECX));
}
}
-void InstructionCodeGeneratorX86::VisitCondition(HCondition* comp) {
- if (comp->NeedsMaterialization()) {
- LocationSummary* locations = comp->GetLocations();
+void InstructionCodeGeneratorX86::VisitCondition(HCondition* cond) {
+ if (cond->NeedsMaterialization()) {
+ LocationSummary* locations = cond->GetLocations();
Register reg = locations->Out().AsRegister<Register>();
// Clear register: setcc only sets the low byte.
__ xorl(reg, reg);
@@ -1014,7 +1015,7 @@ void InstructionCodeGeneratorX86::VisitCondition(HCondition* comp) {
} else {
__ cmpl(lhs.AsRegister<Register>(), Address(ESP, rhs.GetStackIndex()));
}
- __ setb(X86Condition(comp->GetCondition()), reg);
+ __ setb(X86Condition(cond->GetCondition()), reg);
}
}
@@ -1239,7 +1240,7 @@ void LocationsBuilderX86::VisitInvokeVirtual(HInvokeVirtual* invoke) {
void LocationsBuilderX86::HandleInvoke(HInvoke* invoke) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
- locations->AddTemp(Location::RegisterLocation(EAX));
+ locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
InvokeDexCallingConventionVisitorX86 calling_convention_visitor;
for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
@@ -1959,6 +1960,8 @@ void InstructionCodeGeneratorX86::VisitAdd(HAdd* add) {
if (second.IsRegister()) {
if (out.AsRegister<Register>() == first.AsRegister<Register>()) {
__ addl(out.AsRegister<Register>(), second.AsRegister<Register>());
+ } else if (out.AsRegister<Register>() == second.AsRegister<Register>()) {
+ __ addl(out.AsRegister<Register>(), first.AsRegister<Register>());
} else {
__ leal(out.AsRegister<Register>(), Address(
first.AsRegister<Register>(), second.AsRegister<Register>(), TIMES_1, 0));
@@ -3010,8 +3013,17 @@ void LocationsBuilderX86::VisitParameterValue(HParameterValue* instruction) {
locations->SetOut(location);
}
-void InstructionCodeGeneratorX86::VisitParameterValue(HParameterValue* instruction) {
- UNUSED(instruction);
+void InstructionCodeGeneratorX86::VisitParameterValue(
+ HParameterValue* instruction ATTRIBUTE_UNUSED) {
+}
+
+void LocationsBuilderX86::VisitCurrentMethod(HCurrentMethod* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
+}
+
+void InstructionCodeGeneratorX86::VisitCurrentMethod(HCurrentMethod* instruction ATTRIBUTE_UNUSED) {
}
void LocationsBuilderX86::VisitNot(HNot* not_) {
@@ -4279,20 +4291,22 @@ void LocationsBuilderX86::VisitLoadClass(HLoadClass* cls) {
: LocationSummary::kNoCall;
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
void InstructionCodeGeneratorX86::VisitLoadClass(HLoadClass* cls) {
- Register out = cls->GetLocations()->Out().AsRegister<Register>();
+ LocationSummary* locations = cls->GetLocations();
+ Register out = locations->Out().AsRegister<Register>();
+ Register current_method = locations->InAt(0).AsRegister<Register>();
if (cls->IsReferrersClass()) {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
- codegen_->LoadCurrentMethod(out);
- __ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
+ __ movl(out, Address(current_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
} else {
DCHECK(cls->CanCallRuntime());
- codegen_->LoadCurrentMethod(out);
- __ movl(out, Address(out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
+ __ movl(out, Address(
+ current_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
__ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86(
@@ -4338,6 +4352,7 @@ void InstructionCodeGeneratorX86::GenerateClassInitializationCheck(
void LocationsBuilderX86::VisitLoadString(HLoadString* load) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
@@ -4345,9 +4360,10 @@ void InstructionCodeGeneratorX86::VisitLoadString(HLoadString* load) {
SlowPathCodeX86* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86(load);
codegen_->AddSlowPath(slow_path);
- Register out = load->GetLocations()->Out().AsRegister<Register>();
- codegen_->LoadCurrentMethod(out);
- __ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
+ LocationSummary* locations = load->GetLocations();
+ Register out = locations->Out().AsRegister<Register>();
+ Register current_method = locations->InAt(0).AsRegister<Register>();
+ __ movl(out, Address(current_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
__ movl(out, Address(out, mirror::Class::DexCacheStringsOffset().Int32Value()));
__ movl(out, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
__ testl(out, out);
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index f49c26db2b..f8125c64e1 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -39,13 +39,13 @@ namespace x86_64 {
static constexpr Register TMP = R11;
static constexpr int kCurrentMethodStackOffset = 0;
+static constexpr Register kMethodRegisterArgument = RDI;
static constexpr Register kCoreCalleeSaves[] = { RBX, RBP, R12, R13, R14, R15 };
static constexpr FloatRegister kFpuCalleeSaves[] = { XMM12, XMM13, XMM14, XMM15 };
static constexpr int kC2ConditionMask = 0x400;
-
#define __ reinterpret_cast<X86_64Assembler*>(codegen->GetAssembler())->
class NullCheckSlowPathX86_64 : public SlowPathCodeX86_64 {
@@ -545,7 +545,8 @@ void CodeGeneratorX86_64::GenerateFrameEntry() {
}
}
- __ movl(Address(CpuRegister(RSP), kCurrentMethodStackOffset), CpuRegister(RDI));
+ __ movl(Address(CpuRegister(RSP), kCurrentMethodStackOffset),
+ CpuRegister(kMethodRegisterArgument));
}
void CodeGeneratorX86_64::GenerateFrameExit() {
@@ -689,11 +690,11 @@ void CodeGeneratorX86_64::Move(HInstruction* instruction,
Location location,
HInstruction* move_for) {
LocationSummary* locations = instruction->GetLocations();
- if (locations != nullptr && locations->Out().Equals(location)) {
+ if (instruction->IsCurrentMethod()) {
+ Move(location, Location::StackSlot(kCurrentMethodStackOffset));
+ } else if (locations != nullptr && locations->Out().Equals(location)) {
return;
- }
-
- if (locations != nullptr && locations->Out().IsConstant()) {
+ } else if (locations != nullptr && locations->Out().IsConstant()) {
HConstant* const_to_move = locations->Out().GetConstant();
if (const_to_move->IsIntConstant() || const_to_move->IsNullConstant()) {
Immediate imm(GetInt32ValueOf(const_to_move));
@@ -944,19 +945,19 @@ void InstructionCodeGeneratorX86_64::VisitStoreLocal(HStoreLocal* store) {
UNUSED(store);
}
-void LocationsBuilderX86_64::VisitCondition(HCondition* comp) {
+void LocationsBuilderX86_64::VisitCondition(HCondition* cond) {
LocationSummary* locations =
- new (GetGraph()->GetArena()) LocationSummary(comp, LocationSummary::kNoCall);
+ new (GetGraph()->GetArena()) LocationSummary(cond, LocationSummary::kNoCall);
locations->SetInAt(0, Location::RequiresRegister());
locations->SetInAt(1, Location::Any());
- if (comp->NeedsMaterialization()) {
+ if (cond->NeedsMaterialization()) {
locations->SetOut(Location::RequiresRegister());
}
}
-void InstructionCodeGeneratorX86_64::VisitCondition(HCondition* comp) {
- if (comp->NeedsMaterialization()) {
- LocationSummary* locations = comp->GetLocations();
+void InstructionCodeGeneratorX86_64::VisitCondition(HCondition* cond) {
+ if (cond->NeedsMaterialization()) {
+ LocationSummary* locations = cond->GetLocations();
CpuRegister reg = locations->Out().AsRegister<CpuRegister>();
// Clear register: setcc only sets the low byte.
__ xorl(reg, reg);
@@ -974,7 +975,7 @@ void InstructionCodeGeneratorX86_64::VisitCondition(HCondition* comp) {
} else {
__ cmpl(lhs.AsRegister<CpuRegister>(), Address(CpuRegister(RSP), rhs.GetStackIndex()));
}
- __ setcc(X86_64Condition(comp->GetCondition()), reg);
+ __ setcc(X86_64Condition(cond->GetCondition()), reg);
}
}
@@ -1339,7 +1340,7 @@ void InstructionCodeGeneratorX86_64::VisitInvokeStaticOrDirect(HInvokeStaticOrDi
void LocationsBuilderX86_64::HandleInvoke(HInvoke* invoke) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(invoke, LocationSummary::kCall);
- locations->AddTemp(Location::RegisterLocation(RDI));
+ locations->AddTemp(Location::RegisterLocation(kMethodRegisterArgument));
InvokeDexCallingConventionVisitorX86_64 calling_convention_visitor;
for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
@@ -2117,6 +2118,8 @@ void InstructionCodeGeneratorX86_64::VisitAdd(HAdd* add) {
if (second.IsRegister()) {
if (out.AsRegister<Register>() == first.AsRegister<Register>()) {
__ addl(out.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
+ } else if (out.AsRegister<Register>() == second.AsRegister<Register>()) {
+ __ addl(out.AsRegister<CpuRegister>(), first.AsRegister<CpuRegister>());
} else {
__ leal(out.AsRegister<CpuRegister>(), Address(
first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>(), TIMES_1, 0));
@@ -2140,6 +2143,8 @@ void InstructionCodeGeneratorX86_64::VisitAdd(HAdd* add) {
if (second.IsRegister()) {
if (out.AsRegister<Register>() == first.AsRegister<Register>()) {
__ addq(out.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>());
+ } else if (out.AsRegister<Register>() == second.AsRegister<Register>()) {
+ __ addq(out.AsRegister<CpuRegister>(), first.AsRegister<CpuRegister>());
} else {
__ leaq(out.AsRegister<CpuRegister>(), Address(
first.AsRegister<CpuRegister>(), second.AsRegister<CpuRegister>(), TIMES_1, 0));
@@ -3066,9 +3071,20 @@ void LocationsBuilderX86_64::VisitParameterValue(HParameterValue* instruction) {
locations->SetOut(location);
}
-void InstructionCodeGeneratorX86_64::VisitParameterValue(HParameterValue* instruction) {
+void InstructionCodeGeneratorX86_64::VisitParameterValue(
+ HParameterValue* instruction ATTRIBUTE_UNUSED) {
// Nothing to do, the parameter is already at its location.
- UNUSED(instruction);
+}
+
+void LocationsBuilderX86_64::VisitCurrentMethod(HCurrentMethod* instruction) {
+ LocationSummary* locations =
+ new (GetGraph()->GetArena()) LocationSummary(instruction, LocationSummary::kNoCall);
+ locations->SetOut(Location::RegisterLocation(kMethodRegisterArgument));
+}
+
+void InstructionCodeGeneratorX86_64::VisitCurrentMethod(
+ HCurrentMethod* instruction ATTRIBUTE_UNUSED) {
+ // Nothing to do, the method is already at its location.
}
void LocationsBuilderX86_64::VisitNot(HNot* not_) {
@@ -4123,20 +4139,22 @@ void LocationsBuilderX86_64::VisitLoadClass(HLoadClass* cls) {
: LocationSummary::kNoCall;
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(cls, call_kind);
+ locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
void InstructionCodeGeneratorX86_64::VisitLoadClass(HLoadClass* cls) {
- CpuRegister out = cls->GetLocations()->Out().AsRegister<CpuRegister>();
+ LocationSummary* locations = cls->GetLocations();
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
+ CpuRegister current_method = locations->InAt(0).AsRegister<CpuRegister>();
if (cls->IsReferrersClass()) {
DCHECK(!cls->CanCallRuntime());
DCHECK(!cls->MustGenerateClinitCheck());
- codegen_->LoadCurrentMethod(out);
- __ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
+ __ movl(out, Address(current_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
} else {
DCHECK(cls->CanCallRuntime());
- codegen_->LoadCurrentMethod(out);
- __ movl(out, Address(out, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
+ __ movl(out, Address(
+ current_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value()));
__ movl(out, Address(out, CodeGenerator::GetCacheOffset(cls->GetTypeIndex())));
SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) LoadClassSlowPathX86_64(
cls, cls, cls->GetDexPc(), cls->MustGenerateClinitCheck());
@@ -4172,6 +4190,7 @@ void InstructionCodeGeneratorX86_64::VisitClinitCheck(HClinitCheck* check) {
void LocationsBuilderX86_64::VisitLoadString(HLoadString* load) {
LocationSummary* locations =
new (GetGraph()->GetArena()) LocationSummary(load, LocationSummary::kCallOnSlowPath);
+ locations->SetInAt(0, Location::RequiresRegister());
locations->SetOut(Location::RequiresRegister());
}
@@ -4179,9 +4198,10 @@ void InstructionCodeGeneratorX86_64::VisitLoadString(HLoadString* load) {
SlowPathCodeX86_64* slow_path = new (GetGraph()->GetArena()) LoadStringSlowPathX86_64(load);
codegen_->AddSlowPath(slow_path);
- CpuRegister out = load->GetLocations()->Out().AsRegister<CpuRegister>();
- codegen_->LoadCurrentMethod(CpuRegister(out));
- __ movl(out, Address(out, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
+ LocationSummary* locations = load->GetLocations();
+ CpuRegister out = locations->Out().AsRegister<CpuRegister>();
+ CpuRegister current_method = locations->InAt(0).AsRegister<CpuRegister>();
+ __ movl(out, Address(current_method, mirror::ArtMethod::DeclaringClassOffset().Int32Value()));
__ movl(out, Address(out, mirror::Class::DexCacheStringsOffset().Int32Value()));
__ movl(out, Address(out, CodeGenerator::GetCacheOffset(load->GetStringIndex())));
__ testl(out, out);
diff --git a/compiler/optimizing/inliner.cc b/compiler/optimizing/inliner.cc
index a72817fade..997f980f45 100644
--- a/compiler/optimizing/inliner.cc
+++ b/compiler/optimizing/inliner.cc
@@ -207,7 +207,9 @@ bool HInliner::TryBuildAndInline(Handle<mirror::ArtMethod> resolved_method,
if (!builder.BuildGraph(*code_item)) {
VLOG(compiler) << "Method " << PrettyMethod(method_index, caller_dex_file)
<< " could not be built, so cannot be inlined";
- resolved_method->SetShouldNotInline();
+ // There could be multiple reasons why the graph could not be built, including
+ // unaccessible methods/fields due to using a different dex cache. We do not mark
+ // the method as non-inlineable so that other callers can still try to inline it.
return false;
}
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 4e3436e32b..8ef13e125e 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -22,6 +22,7 @@
#include "invoke_type.h"
#include "nodes.h"
#include "quick/inline_method_analyser.h"
+#include "utils.h"
namespace art {
diff --git a/compiler/optimizing/nodes.cc b/compiler/optimizing/nodes.cc
index 2ece5a559c..80d4b4a863 100644
--- a/compiler/optimizing/nodes.cc
+++ b/compiler/optimizing/nodes.cc
@@ -19,6 +19,7 @@
#include "code_generator.h"
#include "ssa_builder.h"
#include "base/bit_vector-inl.h"
+#include "base/bit_utils.h"
#include "utils/growable_array.h"
#include "scoped_thread_state_change.h"
@@ -294,6 +295,19 @@ HNullConstant* HGraph::GetNullConstant() {
return cached_null_constant_;
}
+HCurrentMethod* HGraph::GetCurrentMethod() {
+ if (cached_current_method_ == nullptr) {
+ cached_current_method_ = new (arena_) HCurrentMethod();
+ if (entry_block_->GetFirstInstruction() == nullptr) {
+ entry_block_->AddInstruction(cached_current_method_);
+ } else {
+ entry_block_->InsertInstructionBefore(
+ cached_current_method_, entry_block_->GetFirstInstruction());
+ }
+ }
+ return cached_current_method_;
+}
+
HConstant* HGraph::GetConstant(Primitive::Type type, int64_t value) {
switch (type) {
case Primitive::Type::kPrimBoolean:
@@ -1460,6 +1474,8 @@ void HGraph::InlineInto(HGraph* outer_graph, HInvoke* invoke) {
DCHECK(parameter_index != last_input_index);
}
current->ReplaceWith(invoke->InputAt(parameter_index++));
+ } else if (current->IsCurrentMethod()) {
+ current->ReplaceWith(outer_graph->GetCurrentMethod());
} else {
DCHECK(current->IsGoto() || current->IsSuspendCheck());
entry_block_->RemoveInstruction(current);
diff --git a/compiler/optimizing/nodes.h b/compiler/optimizing/nodes.h
index d9d09aafa2..944568dfc2 100644
--- a/compiler/optimizing/nodes.h
+++ b/compiler/optimizing/nodes.h
@@ -35,6 +35,7 @@ namespace art {
class GraphChecker;
class HBasicBlock;
+class HCurrentMethod;
class HDoubleConstant;
class HEnvironment;
class HFloatConstant;
@@ -149,7 +150,8 @@ class HGraph : public ArenaObject<kArenaAllocMisc> {
cached_int_constants_(std::less<int32_t>(), arena->Adapter()),
cached_float_constants_(std::less<int32_t>(), arena->Adapter()),
cached_long_constants_(std::less<int64_t>(), arena->Adapter()),
- cached_double_constants_(std::less<int64_t>(), arena->Adapter()) {}
+ cached_double_constants_(std::less<int64_t>(), arena->Adapter()),
+ cached_current_method_(nullptr) {}
ArenaAllocator* GetArena() const { return arena_; }
const GrowableArray<HBasicBlock*>& GetBlocks() const { return blocks_; }
@@ -280,6 +282,8 @@ class HGraph : public ArenaObject<kArenaAllocMisc> {
return CreateConstant(bit_cast<int64_t, double>(value), &cached_double_constants_);
}
+ HCurrentMethod* GetCurrentMethod();
+
HBasicBlock* FindCommonDominator(HBasicBlock* first, HBasicBlock* second) const;
const DexFile& GetDexFile() const {
@@ -388,6 +392,8 @@ class HGraph : public ArenaObject<kArenaAllocMisc> {
ArenaSafeMap<int64_t, HLongConstant*> cached_long_constants_;
ArenaSafeMap<int64_t, HDoubleConstant*> cached_double_constants_;
+ HCurrentMethod* cached_current_method_;
+
friend class SsaBuilder; // For caching constants.
friend class SsaLivenessAnalysis; // For the linear order.
ART_FRIEND_TEST(GraphTest, IfSuccessorSimpleJoinBlock1);
@@ -813,6 +819,7 @@ class HLoopInformationOutwardIterator : public ValueObject {
M(ClinitCheck, Instruction) \
M(Compare, BinaryOperation) \
M(Condition, BinaryOperation) \
+ M(CurrentMethod, Instruction) \
M(Deoptimize, Instruction) \
M(Div, BinaryOperation) \
M(DivZeroCheck, Instruction) \
@@ -1826,6 +1833,19 @@ class HDeoptimize : public HTemplateInstruction<1> {
DISALLOW_COPY_AND_ASSIGN(HDeoptimize);
};
+// Represents the ArtMethod that was passed as a first argument to
+// the method. It is used by instructions that depend on it, like
+// instructions that work with the dex cache.
+class HCurrentMethod : public HExpression<0> {
+ public:
+ HCurrentMethod() : HExpression(Primitive::kPrimNot, SideEffects::None()) {}
+
+ DECLARE_INSTRUCTION(CurrentMethod);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(HCurrentMethod);
+};
+
class HUnaryOperation : public HExpression<1> {
public:
HUnaryOperation(Primitive::Type result_type, HInstruction* input)
@@ -3455,9 +3475,10 @@ class HSuspendCheck : public HTemplateInstruction<0> {
/**
* Instruction to load a Class object.
*/
-class HLoadClass : public HExpression<0> {
+class HLoadClass : public HExpression<1> {
public:
- HLoadClass(uint16_t type_index,
+ HLoadClass(HCurrentMethod* current_method,
+ uint16_t type_index,
const DexFile& dex_file,
bool is_referrers_class,
uint32_t dex_pc)
@@ -3467,7 +3488,9 @@ class HLoadClass : public HExpression<0> {
is_referrers_class_(is_referrers_class),
dex_pc_(dex_pc),
generate_clinit_check_(false),
- loaded_class_rti_(ReferenceTypeInfo::CreateTop(/* is_exact */ false)) {}
+ loaded_class_rti_(ReferenceTypeInfo::CreateTop(/* is_exact */ false)) {
+ SetRawInputAt(0, current_method);
+ }
bool CanBeMoved() const OVERRIDE { return true; }
@@ -3539,12 +3562,14 @@ class HLoadClass : public HExpression<0> {
DISALLOW_COPY_AND_ASSIGN(HLoadClass);
};
-class HLoadString : public HExpression<0> {
+class HLoadString : public HExpression<1> {
public:
- HLoadString(uint32_t string_index, uint32_t dex_pc)
+ HLoadString(HCurrentMethod* current_method, uint32_t string_index, uint32_t dex_pc)
: HExpression(Primitive::kPrimNot, SideEffects::None()),
string_index_(string_index),
- dex_pc_(dex_pc) {}
+ dex_pc_(dex_pc) {
+ SetRawInputAt(0, current_method);
+ }
bool CanBeMoved() const OVERRIDE { return true; }
diff --git a/compiler/optimizing/optimizing_compiler.cc b/compiler/optimizing/optimizing_compiler.cc
index fa3c310811..3123843b7f 100644
--- a/compiler/optimizing/optimizing_compiler.cc
+++ b/compiler/optimizing/optimizing_compiler.cc
@@ -401,7 +401,7 @@ CompiledMethod* OptimizingCompiler::CompileOptimized(HGraph* graph,
codegen->CompileOptimized(&allocator);
DefaultSrcMap src_mapping_table;
- if (compiler_driver->GetCompilerOptions().GetIncludeDebugSymbols()) {
+ if (compiler_driver->GetCompilerOptions().GetGenerateDebugInfo()) {
codegen->BuildSourceMap(&src_mapping_table);
}
@@ -438,7 +438,7 @@ CompiledMethod* OptimizingCompiler::CompileBaseline(
std::vector<uint8_t> mapping_table;
codegen->BuildMappingTable(&mapping_table);
DefaultSrcMap src_mapping_table;
- if (compiler_driver->GetCompilerOptions().GetIncludeDebugSymbols()) {
+ if (compiler_driver->GetCompilerOptions().GetGenerateDebugInfo()) {
codegen->BuildSourceMap(&src_mapping_table);
}
std::vector<uint8_t> vmap_table;
@@ -534,7 +534,7 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
return nullptr;
}
codegen->GetAssembler()->cfi().SetEnabled(
- compiler_driver->GetCompilerOptions().GetIncludeCFI());
+ compiler_driver->GetCompilerOptions().GetGenerateDebugInfo());
PassInfoPrinter pass_info_printer(graph,
method_name.c_str(),
diff --git a/compiler/optimizing/parallel_move_resolver.h b/compiler/optimizing/parallel_move_resolver.h
index e89417df7d..9ede91013e 100644
--- a/compiler/optimizing/parallel_move_resolver.h
+++ b/compiler/optimizing/parallel_move_resolver.h
@@ -20,6 +20,7 @@
#include "base/value_object.h"
#include "utils/growable_array.h"
#include "locations.h"
+#include "primitive.h"
namespace art {
diff --git a/compiler/optimizing/register_allocator.cc b/compiler/optimizing/register_allocator.cc
index 925099ade6..d4ff4d8dee 100644
--- a/compiler/optimizing/register_allocator.cc
+++ b/compiler/optimizing/register_allocator.cc
@@ -482,8 +482,9 @@ bool RegisterAllocator::ValidateIntervals(const GrowableArray<LiveInterval*>& in
LiveInterval* current = it.CurrentInterval();
HInstruction* defined_by = current->GetParent()->GetDefinedBy();
if (current->GetParent()->HasSpillSlot()
- // Parameters have their own stack slot.
- && !(defined_by != nullptr && defined_by->IsParameterValue())) {
+ // Parameters and current method have their own stack slot.
+ && !(defined_by != nullptr && (defined_by->IsParameterValue()
+ || defined_by->IsCurrentMethod()))) {
BitVector* liveness_of_spill_slot = liveness_of_values.Get(number_of_registers
+ current->GetParent()->GetSpillSlot() / kVRegSize
- number_of_out_slots);
@@ -1246,6 +1247,11 @@ void RegisterAllocator::AllocateSpillSlotFor(LiveInterval* interval) {
return;
}
+ if (defined_by->IsCurrentMethod()) {
+ parent->SetSpillSlot(0);
+ return;
+ }
+
if (defined_by->IsConstant()) {
// Constants don't need a spill slot.
return;
@@ -1519,7 +1525,10 @@ void RegisterAllocator::InsertMoveAfter(HInstruction* instruction,
void RegisterAllocator::ConnectSiblings(LiveInterval* interval) {
LiveInterval* current = interval;
- if (current->HasSpillSlot() && current->HasRegister()) {
+ if (current->HasSpillSlot()
+ && current->HasRegister()
+ // Currently, we spill unconditionnally the current method in the code generators.
+ && !interval->GetDefinedBy()->IsCurrentMethod()) {
// We spill eagerly, so move must be at definition.
InsertMoveAfter(interval->GetDefinedBy(),
interval->ToLocation(),
@@ -1715,6 +1724,9 @@ void RegisterAllocator::Resolve() {
} else if (current->HasSpillSlot()) {
current->SetSpillSlot(current->GetSpillSlot() + codegen_->GetFrameSize());
}
+ } else if (instruction->IsCurrentMethod()) {
+ // The current method is always at offset 0.
+ DCHECK(!current->HasSpillSlot() || (current->GetSpillSlot() == 0));
} else if (current->HasSpillSlot()) {
// Adjust the stack slot, now that we know the number of them for each type.
// The way this implementation lays out the stack is the following:
diff --git a/compiler/optimizing/register_allocator.h b/compiler/optimizing/register_allocator.h
index 6d5bfc3f0d..c29fe75921 100644
--- a/compiler/optimizing/register_allocator.h
+++ b/compiler/optimizing/register_allocator.h
@@ -17,6 +17,7 @@
#ifndef ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_H_
#define ART_COMPILER_OPTIMIZING_REGISTER_ALLOCATOR_H_
+#include "arch/instruction_set.h"
#include "base/macros.h"
#include "primitive.h"
#include "utils/growable_array.h"