Refactor OSR related code to prepare for "true" OSR.
- Make the compiler restore all callee-save registers.
- Make the compiler return any value in a core register: this simplifies
the current stub, and will also avoid having to look at the return
type (and reading the shorty) when returning to an nterp frame.
- Add OsrData and offsets of its members to be used by nterp.
Test: test.py
Bug: 27094810
Change-Id: Ifa4f4877ab8b1f0c6a96feccea30c909942eb2fa
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 8406ef5..a94514c 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1011,6 +1011,20 @@
is_leaf_(true),
requires_current_method_(false),
code_generation_data_() {
+ if (GetGraph()->IsCompilingOsr()) {
+ // Make OSR methods have all registers spilled, this simplifies the logic of
+ // jumping to the compiled code directly.
+ for (size_t i = 0; i < number_of_core_registers_; ++i) {
+ if (IsCoreCalleeSaveRegister(i)) {
+ AddAllocatedRegister(Location::RegisterLocation(i));
+ }
+ }
+ for (size_t i = 0; i < number_of_fpu_registers_; ++i) {
+ if (IsFloatingPointCalleeSaveRegister(i)) {
+ AddAllocatedRegister(Location::FpuRegisterLocation(i));
+ }
+ }
+ }
}
CodeGenerator::~CodeGenerator() {}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index d3ce2db..64ec987 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -5530,7 +5530,21 @@
locations->SetInAt(0, ARM64ReturnLocation(return_type));
}
-void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction ATTRIBUTE_UNUSED) {
+void InstructionCodeGeneratorARM64::VisitReturn(HReturn* ret) {
+ if (GetGraph()->IsCompilingOsr()) {
+ // To simplify callers of an OSR method, we put the return value in both
+ // floating point and core register.
+ switch (ret->InputAt(0)->GetType()) {
+ case DataType::Type::kFloat32:
+ __ Fmov(w0, s0);
+ break;
+ case DataType::Type::kFloat64:
+ __ Fmov(x0, d0);
+ break;
+ default:
+ break;
+ }
+ }
codegen_->GenerateFrameExit();
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 4932a2c..d4a41f7 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -3252,7 +3252,21 @@
locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType()));
}
-void InstructionCodeGeneratorARMVIXL::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
+void InstructionCodeGeneratorARMVIXL::VisitReturn(HReturn* ret) {
+ if (GetGraph()->IsCompilingOsr()) {
+ // To simplify callers of an OSR method, we put the return value in both
+ // floating point and core registers.
+ switch (ret->InputAt(0)->GetType()) {
+ case DataType::Type::kFloat32:
+ __ Vmov(r0, s0);
+ break;
+ case DataType::Type::kFloat64:
+ __ Vmov(r0, r1, d0);
+ break;
+ default:
+ break;
+ }
+ }
codegen_->GenerateFrameExit();
}
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index c3cd25c..f02ab26 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -2212,31 +2212,46 @@
}
void InstructionCodeGeneratorX86::VisitReturn(HReturn* ret) {
- if (kIsDebugBuild) {
- switch (ret->InputAt(0)->GetType()) {
- case DataType::Type::kReference:
- case DataType::Type::kBool:
- case DataType::Type::kUint8:
- case DataType::Type::kInt8:
- case DataType::Type::kUint16:
- case DataType::Type::kInt16:
- case DataType::Type::kInt32:
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegister<Register>(), EAX);
- break;
+ switch (ret->InputAt(0)->GetType()) {
+ case DataType::Type::kReference:
+ case DataType::Type::kBool:
+ case DataType::Type::kUint8:
+ case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
+ case DataType::Type::kInt32:
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegister<Register>(), EAX);
+ break;
- case DataType::Type::kInt64:
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegisterPairLow<Register>(), EAX);
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegisterPairHigh<Register>(), EDX);
- break;
+ case DataType::Type::kInt64:
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegisterPairLow<Register>(), EAX);
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegisterPairHigh<Register>(), EDX);
+ break;
- case DataType::Type::kFloat32:
- case DataType::Type::kFloat64:
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsFpuRegister<XmmRegister>(), XMM0);
- break;
+ case DataType::Type::kFloat32:
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsFpuRegister<XmmRegister>(), XMM0);
+ if (GetGraph()->IsCompilingOsr()) {
+ // To simplify callers of an OSR method, we put the return value in both
+ // floating point and core registers.
+ __ movd(EAX, XMM0);
+ }
+ break;
- default:
- LOG(FATAL) << "Unknown return type " << ret->InputAt(0)->GetType();
- }
+ case DataType::Type::kFloat64:
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsFpuRegister<XmmRegister>(), XMM0);
+ if (GetGraph()->IsCompilingOsr()) {
+ // To simplify callers of an OSR method, we put the return value in both
+ // floating point and core registers.
+ __ movd(EAX, XMM0);
+ // Use XMM1 as temporary register to not clobber XMM0.
+ __ movaps(XMM1, XMM0);
+ __ psrlq(XMM1, Immediate(32));
+ __ movd(EDX, XMM1);
+ }
+ break;
+
+ default:
+ LOG(FATAL) << "Unknown return type " << ret->InputAt(0)->GetType();
}
codegen_->GenerateFrameExit();
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 5d4cfb4..1172776 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -2364,28 +2364,41 @@
}
void InstructionCodeGeneratorX86_64::VisitReturn(HReturn* ret) {
- if (kIsDebugBuild) {
- switch (ret->InputAt(0)->GetType()) {
- case DataType::Type::kReference:
- case DataType::Type::kBool:
- case DataType::Type::kUint8:
- case DataType::Type::kInt8:
- case DataType::Type::kUint16:
- case DataType::Type::kInt16:
- case DataType::Type::kInt32:
- case DataType::Type::kInt64:
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegister<CpuRegister>().AsRegister(), RAX);
- break;
+ switch (ret->InputAt(0)->GetType()) {
+ case DataType::Type::kReference:
+ case DataType::Type::kBool:
+ case DataType::Type::kUint8:
+ case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
+ case DataType::Type::kInt32:
+ case DataType::Type::kInt64:
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegister<CpuRegister>().AsRegister(), RAX);
+ break;
- case DataType::Type::kFloat32:
- case DataType::Type::kFloat64:
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsFpuRegister<XmmRegister>().AsFloatRegister(),
- XMM0);
- break;
-
- default:
- LOG(FATAL) << "Unexpected return type " << ret->InputAt(0)->GetType();
+ case DataType::Type::kFloat32: {
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsFpuRegister<XmmRegister>().AsFloatRegister(),
+ XMM0);
+ // To simplify callers of an OSR method, we put the return value in both
+ // floating point and core register.
+ if (GetGraph()->IsCompilingOsr()) {
+ __ movd(CpuRegister(RAX), XmmRegister(XMM0), /* is64bit= */ false);
+ }
+ break;
}
+ case DataType::Type::kFloat64: {
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsFpuRegister<XmmRegister>().AsFloatRegister(),
+ XMM0);
+ // To simplify callers of an OSR method, we put the return value in both
+ // floating point and core register.
+ if (GetGraph()->IsCompilingOsr()) {
+ __ movd(CpuRegister(RAX), XmmRegister(XMM0), /* is64bit= */ true);
+ }
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected return type " << ret->InputAt(0)->GetType();
}
codegen_->GenerateFrameExit();
}