Factor MoveArguments methods in Optimizing's intrinsics handlers.
Also add a precondition similar to the one present in code
generators, regarding static invoke related explicit clinit
check elimination in non-baseline compilations.
Change-Id: I26f4dcb5d02824d7556f90b4b0c85b08b737fa53
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index beaff5c..bdbd571 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -327,6 +327,7 @@
return GetFpuSpillSize() + GetCoreSpillSize();
}
+ virtual ParallelMoveResolver* GetMoveResolver() = 0;
protected:
CodeGenerator(HGraph* graph,
@@ -370,7 +371,6 @@
virtual Location GetStackLocation(HLoadLocal* load) const = 0;
- virtual ParallelMoveResolver* GetMoveResolver() = 0;
virtual HGraphVisitor* GetLocationBuilder() = 0;
virtual HGraphVisitor* GetInstructionVisitor() = 0;
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 5d3db5c..43fe374 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -374,4 +374,3 @@
}
} // namespace art
-
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index dbb7cba..c243ef3 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -17,8 +17,10 @@
#ifndef ART_COMPILER_OPTIMIZING_INTRINSICS_H_
#define ART_COMPILER_OPTIMIZING_INTRINSICS_H_
+#include "code_generator.h"
#include "nodes.h"
#include "optimization.h"
+#include "parallel_move_resolver.h"
namespace art {
@@ -76,6 +78,38 @@
#undef INTRINSICS_LIST
#undef OPTIMIZING_INTRINSICS
+ static void MoveArguments(HInvoke* invoke,
+ CodeGenerator* codegen,
+ InvokeDexCallingConventionVisitor* calling_convention_visitor) {
+ if (kIsDebugBuild && invoke->IsInvokeStaticOrDirect()) {
+ HInvokeStaticOrDirect* invoke_static_or_direct = invoke->AsInvokeStaticOrDirect();
+ // When we do not run baseline, explicit clinit checks triggered by static
+ // invokes must have been pruned by art::PrepareForRegisterAllocation.
+ DCHECK(codegen->IsBaseline() || !invoke_static_or_direct->IsStaticWithExplicitClinitCheck());
+ }
+
+ if (invoke->GetNumberOfArguments() == 0) {
+ // No argument to move.
+ return;
+ }
+
+ LocationSummary* locations = invoke->GetLocations();
+
+ // We're moving potentially two or more locations to locations that could overlap, so we need
+ // a parallel move resolver.
+ HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+
+ for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
+ HInstruction* input = invoke->InputAt(i);
+ Location cc_loc = calling_convention_visitor->GetNextLocation(input->GetType());
+ Location actual_loc = locations->InAt(i);
+
+ parallel_move.AddMove(actual_loc, cc_loc, input->GetType(), nullptr);
+ }
+
+ codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
+ }
+
protected:
IntrinsicVisitor() {}
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 259d554..7f7b450 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -77,28 +77,9 @@
}
}
-static void MoveArguments(HInvoke* invoke, ArenaAllocator* arena, CodeGeneratorARM* codegen) {
- if (invoke->GetNumberOfArguments() == 0) {
- // No argument to move.
- return;
- }
-
- LocationSummary* locations = invoke->GetLocations();
+static void MoveArguments(HInvoke* invoke, CodeGeneratorARM* codegen) {
InvokeDexCallingConventionVisitorARM calling_convention_visitor;
-
- // We're moving potentially two or more locations to locations that could overlap, so we need
- // a parallel move resolver.
- HParallelMove parallel_move(arena);
-
- for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
- HInstruction* input = invoke->InputAt(i);
- Location cc_loc = calling_convention_visitor.GetNextLocation(input->GetType());
- Location actual_loc = locations->InAt(i);
-
- parallel_move.AddMove(actual_loc, cc_loc, input->GetType(), nullptr);
- }
-
- codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
+ IntrinsicVisitor::MoveArguments(invoke, codegen, &calling_convention_visitor);
}
// Slow-path for fallback (calling the managed code to handle the intrinsic) in an intrinsified
@@ -117,7 +98,7 @@
SaveLiveRegisters(codegen, invoke_->GetLocations());
- MoveArguments(invoke_, codegen->GetGraph()->GetArena(), codegen);
+ MoveArguments(invoke_, codegen);
if (invoke_->IsInvokeStaticOrDirect()) {
codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), kArtMethodRegister);
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 9cfa782..ca3de99 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -86,28 +86,9 @@
}
}
-static void MoveArguments(HInvoke* invoke, ArenaAllocator* arena, CodeGeneratorARM64* codegen) {
- if (invoke->GetNumberOfArguments() == 0) {
- // No argument to move.
- return;
- }
-
- LocationSummary* locations = invoke->GetLocations();
+static void MoveArguments(HInvoke* invoke, CodeGeneratorARM64* codegen) {
InvokeDexCallingConventionVisitorARM64 calling_convention_visitor;
-
- // We're moving potentially two or more locations to locations that could overlap, so we need
- // a parallel move resolver.
- HParallelMove parallel_move(arena);
-
- for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
- HInstruction* input = invoke->InputAt(i);
- Location cc_loc = calling_convention_visitor.GetNextLocation(input->GetType());
- Location actual_loc = locations->InAt(i);
-
- parallel_move.AddMove(actual_loc, cc_loc, input->GetType(), nullptr);
- }
-
- codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
+ IntrinsicVisitor::MoveArguments(invoke, codegen, &calling_convention_visitor);
}
// Slow-path for fallback (calling the managed code to handle the intrinsic) in an intrinsified
@@ -126,7 +107,7 @@
SaveLiveRegisters(codegen, invoke_->GetLocations());
- MoveArguments(invoke_, codegen->GetGraph()->GetArena(), codegen);
+ MoveArguments(invoke_, codegen);
if (invoke_->IsInvokeStaticOrDirect()) {
codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), kArtMethodRegister);
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 62cf3eb..1eef1ef 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -111,28 +111,9 @@
}
}
-static void MoveArguments(HInvoke* invoke, ArenaAllocator* arena, CodeGeneratorX86* codegen) {
- if (invoke->GetNumberOfArguments() == 0) {
- // No argument to move.
- return;
- }
-
- LocationSummary* locations = invoke->GetLocations();
+static void MoveArguments(HInvoke* invoke, CodeGeneratorX86* codegen) {
InvokeDexCallingConventionVisitorX86 calling_convention_visitor;
-
- // We're moving potentially two or more locations to locations that could overlap, so we need
- // a parallel move resolver.
- HParallelMove parallel_move(arena);
-
- for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
- HInstruction* input = invoke->InputAt(i);
- Location cc_loc = calling_convention_visitor.GetNextLocation(input->GetType());
- Location actual_loc = locations->InAt(i);
-
- parallel_move.AddMove(actual_loc, cc_loc, input->GetType(), nullptr);
- }
-
- codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
+ IntrinsicVisitor::MoveArguments(invoke, codegen, &calling_convention_visitor);
}
// Slow-path for fallback (calling the managed code to handle the intrinsic) in an intrinsified
@@ -155,7 +136,7 @@
SaveLiveRegisters(codegen, invoke_->GetLocations());
- MoveArguments(invoke_, codegen->GetGraph()->GetArena(), codegen);
+ MoveArguments(invoke_, codegen);
if (invoke_->IsInvokeStaticOrDirect()) {
codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), EAX);
@@ -749,7 +730,7 @@
}
static void InvokeOutOfLineIntrinsic(CodeGeneratorX86* codegen, HInvoke* invoke) {
- MoveArguments(invoke, codegen->GetGraph()->GetArena(), codegen);
+ MoveArguments(invoke, codegen);
DCHECK(invoke->IsInvokeStaticOrDirect());
codegen->GenerateStaticOrDirectCall(invoke->AsInvokeStaticOrDirect(), EAX);
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 7e24dca..1fc5432 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -103,28 +103,9 @@
}
}
-static void MoveArguments(HInvoke* invoke, ArenaAllocator* arena, CodeGeneratorX86_64* codegen) {
- if (invoke->GetNumberOfArguments() == 0) {
- // No argument to move.
- return;
- }
-
- LocationSummary* locations = invoke->GetLocations();
+static void MoveArguments(HInvoke* invoke, CodeGeneratorX86_64* codegen) {
InvokeDexCallingConventionVisitorX86_64 calling_convention_visitor;
-
- // We're moving potentially two or more locations to locations that could overlap, so we need
- // a parallel move resolver.
- HParallelMove parallel_move(arena);
-
- for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
- HInstruction* input = invoke->InputAt(i);
- Location cc_loc = calling_convention_visitor.GetNextLocation(input->GetType());
- Location actual_loc = locations->InAt(i);
-
- parallel_move.AddMove(actual_loc, cc_loc, input->GetType(), nullptr);
- }
-
- codegen->GetMoveResolver()->EmitNativeCode(¶llel_move);
+ IntrinsicVisitor::MoveArguments(invoke, codegen, &calling_convention_visitor);
}
// Slow-path for fallback (calling the managed code to handle the intrinsic) in an intrinsified
@@ -143,7 +124,7 @@
SaveLiveRegisters(codegen, invoke_->GetLocations());
- MoveArguments(invoke_, codegen->GetGraph()->GetArena(), codegen);
+ MoveArguments(invoke_, codegen);
if (invoke_->IsInvokeStaticOrDirect()) {
codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), CpuRegister(RDI));
@@ -623,7 +604,7 @@
}
static void InvokeOutOfLineIntrinsic(CodeGeneratorX86_64* codegen, HInvoke* invoke) {
- MoveArguments(invoke, codegen->GetGraph()->GetArena(), codegen);
+ MoveArguments(invoke, codegen);
DCHECK(invoke->IsInvokeStaticOrDirect());
codegen->GenerateStaticOrDirectCall(invoke->AsInvokeStaticOrDirect(), CpuRegister(RDI));