summaryrefslogtreecommitdiff
path: root/compiler/optimizing
diff options
context:
space:
mode:
author Roland Levillain <rpl@google.com> 2015-04-29 15:38:43 +0000
committer Android Git Automerger <android-git-automerger@android.com> 2015-04-29 15:38:43 +0000
commit4c176ef5ecd5a28354b9f98e5bafb848b0399a63 (patch)
tree531fee01eef05df55c6247c657548f9511429050 /compiler/optimizing
parent40e183005ecf05938f27a9722f419e71bb0e4257 (diff)
parent0829d9de97b92bcd575f4d5c1f717abf7405d8e2 (diff)
am 0829d9de: Merge "Factor MoveArguments methods in Optimizing\'s intrinsics handlers."
* commit '0829d9de97b92bcd575f4d5c1f717abf7405d8e2': Factor MoveArguments methods in Optimizing's intrinsics handlers.
Diffstat (limited to 'compiler/optimizing')
-rw-r--r--compiler/optimizing/code_generator.h2
-rw-r--r--compiler/optimizing/intrinsics.cc1
-rw-r--r--compiler/optimizing/intrinsics.h34
-rw-r--r--compiler/optimizing/intrinsics_arm.cc25
-rw-r--r--compiler/optimizing/intrinsics_arm64.cc25
-rw-r--r--compiler/optimizing/intrinsics_x86.cc27
-rw-r--r--compiler/optimizing/intrinsics_x86_64.cc27
7 files changed, 49 insertions, 92 deletions
diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h
index beaff5cc4c..bdbd571133 100644
--- a/compiler/optimizing/code_generator.h
+++ b/compiler/optimizing/code_generator.h
@@ -327,6 +327,7 @@ class CodeGenerator {
return GetFpuSpillSize() + GetCoreSpillSize();
}
+ virtual ParallelMoveResolver* GetMoveResolver() = 0;
protected:
CodeGenerator(HGraph* graph,
@@ -370,7 +371,6 @@ class CodeGenerator {
virtual Location GetStackLocation(HLoadLocal* load) const = 0;
- virtual ParallelMoveResolver* GetMoveResolver() = 0;
virtual HGraphVisitor* GetLocationBuilder() = 0;
virtual HGraphVisitor* GetInstructionVisitor() = 0;
diff --git a/compiler/optimizing/intrinsics.cc b/compiler/optimizing/intrinsics.cc
index 5d3db5c6f2..43fe3746ad 100644
--- a/compiler/optimizing/intrinsics.cc
+++ b/compiler/optimizing/intrinsics.cc
@@ -374,4 +374,3 @@ INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
}
} // namespace art
-
diff --git a/compiler/optimizing/intrinsics.h b/compiler/optimizing/intrinsics.h
index dbb7cbaa98..c243ef3f8b 100644
--- a/compiler/optimizing/intrinsics.h
+++ b/compiler/optimizing/intrinsics.h
@@ -17,8 +17,10 @@
#ifndef ART_COMPILER_OPTIMIZING_INTRINSICS_H_
#define ART_COMPILER_OPTIMIZING_INTRINSICS_H_
+#include "code_generator.h"
#include "nodes.h"
#include "optimization.h"
+#include "parallel_move_resolver.h"
namespace art {
@@ -76,6 +78,38 @@ INTRINSICS_LIST(OPTIMIZING_INTRINSICS)
#undef INTRINSICS_LIST
#undef OPTIMIZING_INTRINSICS
+ static void MoveArguments(HInvoke* invoke,
+ CodeGenerator* codegen,
+ InvokeDexCallingConventionVisitor* calling_convention_visitor) {
+ if (kIsDebugBuild && invoke->IsInvokeStaticOrDirect()) {
+ HInvokeStaticOrDirect* invoke_static_or_direct = invoke->AsInvokeStaticOrDirect();
+ // When we do not run baseline, explicit clinit checks triggered by static
+ // invokes must have been pruned by art::PrepareForRegisterAllocation.
+ DCHECK(codegen->IsBaseline() || !invoke_static_or_direct->IsStaticWithExplicitClinitCheck());
+ }
+
+ if (invoke->GetNumberOfArguments() == 0) {
+ // No argument to move.
+ return;
+ }
+
+ LocationSummary* locations = invoke->GetLocations();
+
+ // We're moving potentially two or more locations to locations that could overlap, so we need
+ // a parallel move resolver.
+ HParallelMove parallel_move(codegen->GetGraph()->GetArena());
+
+ for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
+ HInstruction* input = invoke->InputAt(i);
+ Location cc_loc = calling_convention_visitor->GetNextLocation(input->GetType());
+ Location actual_loc = locations->InAt(i);
+
+ parallel_move.AddMove(actual_loc, cc_loc, input->GetType(), nullptr);
+ }
+
+ codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
+ }
+
protected:
IntrinsicVisitor() {}
diff --git a/compiler/optimizing/intrinsics_arm.cc b/compiler/optimizing/intrinsics_arm.cc
index 259d554dbe..7f7b450003 100644
--- a/compiler/optimizing/intrinsics_arm.cc
+++ b/compiler/optimizing/intrinsics_arm.cc
@@ -77,28 +77,9 @@ static void MoveFromReturnRegister(Location trg, Primitive::Type type, CodeGener
}
}
-static void MoveArguments(HInvoke* invoke, ArenaAllocator* arena, CodeGeneratorARM* codegen) {
- if (invoke->GetNumberOfArguments() == 0) {
- // No argument to move.
- return;
- }
-
- LocationSummary* locations = invoke->GetLocations();
+static void MoveArguments(HInvoke* invoke, CodeGeneratorARM* codegen) {
InvokeDexCallingConventionVisitorARM calling_convention_visitor;
-
- // We're moving potentially two or more locations to locations that could overlap, so we need
- // a parallel move resolver.
- HParallelMove parallel_move(arena);
-
- for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
- HInstruction* input = invoke->InputAt(i);
- Location cc_loc = calling_convention_visitor.GetNextLocation(input->GetType());
- Location actual_loc = locations->InAt(i);
-
- parallel_move.AddMove(actual_loc, cc_loc, input->GetType(), nullptr);
- }
-
- codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
+ IntrinsicVisitor::MoveArguments(invoke, codegen, &calling_convention_visitor);
}
// Slow-path for fallback (calling the managed code to handle the intrinsic) in an intrinsified
@@ -117,7 +98,7 @@ class IntrinsicSlowPathARM : public SlowPathCodeARM {
SaveLiveRegisters(codegen, invoke_->GetLocations());
- MoveArguments(invoke_, codegen->GetGraph()->GetArena(), codegen);
+ MoveArguments(invoke_, codegen);
if (invoke_->IsInvokeStaticOrDirect()) {
codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), kArtMethodRegister);
diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc
index 9cfa78219d..ca3de99092 100644
--- a/compiler/optimizing/intrinsics_arm64.cc
+++ b/compiler/optimizing/intrinsics_arm64.cc
@@ -86,28 +86,9 @@ static void MoveFromReturnRegister(Location trg,
}
}
-static void MoveArguments(HInvoke* invoke, ArenaAllocator* arena, CodeGeneratorARM64* codegen) {
- if (invoke->GetNumberOfArguments() == 0) {
- // No argument to move.
- return;
- }
-
- LocationSummary* locations = invoke->GetLocations();
+static void MoveArguments(HInvoke* invoke, CodeGeneratorARM64* codegen) {
InvokeDexCallingConventionVisitorARM64 calling_convention_visitor;
-
- // We're moving potentially two or more locations to locations that could overlap, so we need
- // a parallel move resolver.
- HParallelMove parallel_move(arena);
-
- for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
- HInstruction* input = invoke->InputAt(i);
- Location cc_loc = calling_convention_visitor.GetNextLocation(input->GetType());
- Location actual_loc = locations->InAt(i);
-
- parallel_move.AddMove(actual_loc, cc_loc, input->GetType(), nullptr);
- }
-
- codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
+ IntrinsicVisitor::MoveArguments(invoke, codegen, &calling_convention_visitor);
}
// Slow-path for fallback (calling the managed code to handle the intrinsic) in an intrinsified
@@ -126,7 +107,7 @@ class IntrinsicSlowPathARM64 : public SlowPathCodeARM64 {
SaveLiveRegisters(codegen, invoke_->GetLocations());
- MoveArguments(invoke_, codegen->GetGraph()->GetArena(), codegen);
+ MoveArguments(invoke_, codegen);
if (invoke_->IsInvokeStaticOrDirect()) {
codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), kArtMethodRegister);
diff --git a/compiler/optimizing/intrinsics_x86.cc b/compiler/optimizing/intrinsics_x86.cc
index 62cf3eb0e5..1eef1eff0b 100644
--- a/compiler/optimizing/intrinsics_x86.cc
+++ b/compiler/optimizing/intrinsics_x86.cc
@@ -111,28 +111,9 @@ static void MoveFromReturnRegister(Location target,
}
}
-static void MoveArguments(HInvoke* invoke, ArenaAllocator* arena, CodeGeneratorX86* codegen) {
- if (invoke->GetNumberOfArguments() == 0) {
- // No argument to move.
- return;
- }
-
- LocationSummary* locations = invoke->GetLocations();
+static void MoveArguments(HInvoke* invoke, CodeGeneratorX86* codegen) {
InvokeDexCallingConventionVisitorX86 calling_convention_visitor;
-
- // We're moving potentially two or more locations to locations that could overlap, so we need
- // a parallel move resolver.
- HParallelMove parallel_move(arena);
-
- for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
- HInstruction* input = invoke->InputAt(i);
- Location cc_loc = calling_convention_visitor.GetNextLocation(input->GetType());
- Location actual_loc = locations->InAt(i);
-
- parallel_move.AddMove(actual_loc, cc_loc, input->GetType(), nullptr);
- }
-
- codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
+ IntrinsicVisitor::MoveArguments(invoke, codegen, &calling_convention_visitor);
}
// Slow-path for fallback (calling the managed code to handle the intrinsic) in an intrinsified
@@ -155,7 +136,7 @@ class IntrinsicSlowPathX86 : public SlowPathCodeX86 {
SaveLiveRegisters(codegen, invoke_->GetLocations());
- MoveArguments(invoke_, codegen->GetGraph()->GetArena(), codegen);
+ MoveArguments(invoke_, codegen);
if (invoke_->IsInvokeStaticOrDirect()) {
codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), EAX);
@@ -749,7 +730,7 @@ void IntrinsicCodeGeneratorX86::VisitMathSqrt(HInvoke* invoke) {
}
static void InvokeOutOfLineIntrinsic(CodeGeneratorX86* codegen, HInvoke* invoke) {
- MoveArguments(invoke, codegen->GetGraph()->GetArena(), codegen);
+ MoveArguments(invoke, codegen);
DCHECK(invoke->IsInvokeStaticOrDirect());
codegen->GenerateStaticOrDirectCall(invoke->AsInvokeStaticOrDirect(), EAX);
diff --git a/compiler/optimizing/intrinsics_x86_64.cc b/compiler/optimizing/intrinsics_x86_64.cc
index 7e24dca99a..1fc5432a89 100644
--- a/compiler/optimizing/intrinsics_x86_64.cc
+++ b/compiler/optimizing/intrinsics_x86_64.cc
@@ -103,28 +103,9 @@ static void MoveFromReturnRegister(Location trg,
}
}
-static void MoveArguments(HInvoke* invoke, ArenaAllocator* arena, CodeGeneratorX86_64* codegen) {
- if (invoke->GetNumberOfArguments() == 0) {
- // No argument to move.
- return;
- }
-
- LocationSummary* locations = invoke->GetLocations();
+static void MoveArguments(HInvoke* invoke, CodeGeneratorX86_64* codegen) {
InvokeDexCallingConventionVisitorX86_64 calling_convention_visitor;
-
- // We're moving potentially two or more locations to locations that could overlap, so we need
- // a parallel move resolver.
- HParallelMove parallel_move(arena);
-
- for (size_t i = 0; i < invoke->GetNumberOfArguments(); i++) {
- HInstruction* input = invoke->InputAt(i);
- Location cc_loc = calling_convention_visitor.GetNextLocation(input->GetType());
- Location actual_loc = locations->InAt(i);
-
- parallel_move.AddMove(actual_loc, cc_loc, input->GetType(), nullptr);
- }
-
- codegen->GetMoveResolver()->EmitNativeCode(&parallel_move);
+ IntrinsicVisitor::MoveArguments(invoke, codegen, &calling_convention_visitor);
}
// Slow-path for fallback (calling the managed code to handle the intrinsic) in an intrinsified
@@ -143,7 +124,7 @@ class IntrinsicSlowPathX86_64 : public SlowPathCodeX86_64 {
SaveLiveRegisters(codegen, invoke_->GetLocations());
- MoveArguments(invoke_, codegen->GetGraph()->GetArena(), codegen);
+ MoveArguments(invoke_, codegen);
if (invoke_->IsInvokeStaticOrDirect()) {
codegen->GenerateStaticOrDirectCall(invoke_->AsInvokeStaticOrDirect(), CpuRegister(RDI));
@@ -623,7 +604,7 @@ void IntrinsicCodeGeneratorX86_64::VisitMathSqrt(HInvoke* invoke) {
}
static void InvokeOutOfLineIntrinsic(CodeGeneratorX86_64* codegen, HInvoke* invoke) {
- MoveArguments(invoke, codegen->GetGraph()->GetArena(), codegen);
+ MoveArguments(invoke, codegen);
DCHECK(invoke->IsInvokeStaticOrDirect());
codegen->GenerateStaticOrDirectCall(invoke->AsInvokeStaticOrDirect(), CpuRegister(RDI));