Refactor OSR related code to prepare for "true" OSR.
- Make the compiler restore all callee-save registers.
- Make the compiler return any value in a core register: this simplifies
the current stub, and will also avoid having to look at the return
type (and reading the shorty) when returning to an nterp frame.
- Add OsrData and offsets of its members to be used by nterp.
Test: test.py
Bug: 27094810
Change-Id: Ifa4f4877ab8b1f0c6a96feccea30c909942eb2fa
diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc
index 8406ef5..a94514c 100644
--- a/compiler/optimizing/code_generator.cc
+++ b/compiler/optimizing/code_generator.cc
@@ -1011,6 +1011,20 @@
is_leaf_(true),
requires_current_method_(false),
code_generation_data_() {
+ if (GetGraph()->IsCompilingOsr()) {
+ // Make OSR methods have all registers spilled, this simplifies the logic of
+ // jumping to the compiled code directly.
+ for (size_t i = 0; i < number_of_core_registers_; ++i) {
+ if (IsCoreCalleeSaveRegister(i)) {
+ AddAllocatedRegister(Location::RegisterLocation(i));
+ }
+ }
+ for (size_t i = 0; i < number_of_fpu_registers_; ++i) {
+ if (IsFloatingPointCalleeSaveRegister(i)) {
+ AddAllocatedRegister(Location::FpuRegisterLocation(i));
+ }
+ }
+ }
}
CodeGenerator::~CodeGenerator() {}
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index d3ce2db..64ec987 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -5530,7 +5530,21 @@
locations->SetInAt(0, ARM64ReturnLocation(return_type));
}
-void InstructionCodeGeneratorARM64::VisitReturn(HReturn* instruction ATTRIBUTE_UNUSED) {
+void InstructionCodeGeneratorARM64::VisitReturn(HReturn* ret) {
+ if (GetGraph()->IsCompilingOsr()) {
+ // To simplify callers of an OSR method, we put the return value in both
+ // floating point and core register.
+ switch (ret->InputAt(0)->GetType()) {
+ case DataType::Type::kFloat32:
+ __ Fmov(w0, s0);
+ break;
+ case DataType::Type::kFloat64:
+ __ Fmov(x0, d0);
+ break;
+ default:
+ break;
+ }
+ }
codegen_->GenerateFrameExit();
}
diff --git a/compiler/optimizing/code_generator_arm_vixl.cc b/compiler/optimizing/code_generator_arm_vixl.cc
index 4932a2c..d4a41f7 100644
--- a/compiler/optimizing/code_generator_arm_vixl.cc
+++ b/compiler/optimizing/code_generator_arm_vixl.cc
@@ -3252,7 +3252,21 @@
locations->SetInAt(0, parameter_visitor_.GetReturnLocation(ret->InputAt(0)->GetType()));
}
-void InstructionCodeGeneratorARMVIXL::VisitReturn(HReturn* ret ATTRIBUTE_UNUSED) {
+void InstructionCodeGeneratorARMVIXL::VisitReturn(HReturn* ret) {
+ if (GetGraph()->IsCompilingOsr()) {
+ // To simplify callers of an OSR method, we put the return value in both
+ // floating point and core registers.
+ switch (ret->InputAt(0)->GetType()) {
+ case DataType::Type::kFloat32:
+ __ Vmov(r0, s0);
+ break;
+ case DataType::Type::kFloat64:
+ __ Vmov(r0, r1, d0);
+ break;
+ default:
+ break;
+ }
+ }
codegen_->GenerateFrameExit();
}
diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc
index c3cd25c..f02ab26 100644
--- a/compiler/optimizing/code_generator_x86.cc
+++ b/compiler/optimizing/code_generator_x86.cc
@@ -2212,31 +2212,46 @@
}
void InstructionCodeGeneratorX86::VisitReturn(HReturn* ret) {
- if (kIsDebugBuild) {
- switch (ret->InputAt(0)->GetType()) {
- case DataType::Type::kReference:
- case DataType::Type::kBool:
- case DataType::Type::kUint8:
- case DataType::Type::kInt8:
- case DataType::Type::kUint16:
- case DataType::Type::kInt16:
- case DataType::Type::kInt32:
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegister<Register>(), EAX);
- break;
+ switch (ret->InputAt(0)->GetType()) {
+ case DataType::Type::kReference:
+ case DataType::Type::kBool:
+ case DataType::Type::kUint8:
+ case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
+ case DataType::Type::kInt32:
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegister<Register>(), EAX);
+ break;
- case DataType::Type::kInt64:
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegisterPairLow<Register>(), EAX);
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegisterPairHigh<Register>(), EDX);
- break;
+ case DataType::Type::kInt64:
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegisterPairLow<Register>(), EAX);
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegisterPairHigh<Register>(), EDX);
+ break;
- case DataType::Type::kFloat32:
- case DataType::Type::kFloat64:
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsFpuRegister<XmmRegister>(), XMM0);
- break;
+ case DataType::Type::kFloat32:
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsFpuRegister<XmmRegister>(), XMM0);
+ if (GetGraph()->IsCompilingOsr()) {
+ // To simplify callers of an OSR method, we put the return value in both
+ // floating point and core registers.
+ __ movd(EAX, XMM0);
+ }
+ break;
- default:
- LOG(FATAL) << "Unknown return type " << ret->InputAt(0)->GetType();
- }
+ case DataType::Type::kFloat64:
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsFpuRegister<XmmRegister>(), XMM0);
+ if (GetGraph()->IsCompilingOsr()) {
+ // To simplify callers of an OSR method, we put the return value in both
+ // floating point and core registers.
+ __ movd(EAX, XMM0);
+ // Use XMM1 as temporary register to not clobber XMM0.
+ __ movaps(XMM1, XMM0);
+ __ psrlq(XMM1, Immediate(32));
+ __ movd(EDX, XMM1);
+ }
+ break;
+
+ default:
+ LOG(FATAL) << "Unknown return type " << ret->InputAt(0)->GetType();
}
codegen_->GenerateFrameExit();
}
diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc
index 5d4cfb4..1172776 100644
--- a/compiler/optimizing/code_generator_x86_64.cc
+++ b/compiler/optimizing/code_generator_x86_64.cc
@@ -2364,28 +2364,41 @@
}
void InstructionCodeGeneratorX86_64::VisitReturn(HReturn* ret) {
- if (kIsDebugBuild) {
- switch (ret->InputAt(0)->GetType()) {
- case DataType::Type::kReference:
- case DataType::Type::kBool:
- case DataType::Type::kUint8:
- case DataType::Type::kInt8:
- case DataType::Type::kUint16:
- case DataType::Type::kInt16:
- case DataType::Type::kInt32:
- case DataType::Type::kInt64:
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegister<CpuRegister>().AsRegister(), RAX);
- break;
+ switch (ret->InputAt(0)->GetType()) {
+ case DataType::Type::kReference:
+ case DataType::Type::kBool:
+ case DataType::Type::kUint8:
+ case DataType::Type::kInt8:
+ case DataType::Type::kUint16:
+ case DataType::Type::kInt16:
+ case DataType::Type::kInt32:
+ case DataType::Type::kInt64:
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsRegister<CpuRegister>().AsRegister(), RAX);
+ break;
- case DataType::Type::kFloat32:
- case DataType::Type::kFloat64:
- DCHECK_EQ(ret->GetLocations()->InAt(0).AsFpuRegister<XmmRegister>().AsFloatRegister(),
- XMM0);
- break;
-
- default:
- LOG(FATAL) << "Unexpected return type " << ret->InputAt(0)->GetType();
+ case DataType::Type::kFloat32: {
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsFpuRegister<XmmRegister>().AsFloatRegister(),
+ XMM0);
+ // To simplify callers of an OSR method, we put the return value in both
+ // floating point and core register.
+ if (GetGraph()->IsCompilingOsr()) {
+ __ movd(CpuRegister(RAX), XmmRegister(XMM0), /* is64bit= */ false);
+ }
+ break;
}
+ case DataType::Type::kFloat64: {
+ DCHECK_EQ(ret->GetLocations()->InAt(0).AsFpuRegister<XmmRegister>().AsFloatRegister(),
+ XMM0);
+ // To simplify callers of an OSR method, we put the return value in both
+ // floating point and core register.
+ if (GetGraph()->IsCompilingOsr()) {
+ __ movd(CpuRegister(RAX), XmmRegister(XMM0), /* is64bit= */ true);
+ }
+ break;
+ }
+
+ default:
+ LOG(FATAL) << "Unexpected return type " << ret->InputAt(0)->GetType();
}
codegen_->GenerateFrameExit();
}
diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S
index 8b1fc9e..74c1fe7 100644
--- a/runtime/arch/arm/quick_entrypoints_arm.S
+++ b/runtime/arch/arm/quick_entrypoints_arm.S
@@ -632,17 +632,7 @@
ldr r10, [sp, #8] @ Restore JValue* result
ldr sp, [sp, #4] @ Restore saved stack pointer
.cfi_def_cfa sp, SAVE_SIZE @ CFA = sp + SAVE_SIZE
- ldr r4, [sp, #SAVE_SIZE] @ load shorty
- ldrb r4, [r4, #0] @ load return type
- cmp r4, #68 @ Test if result type char == 'D'.
- beq .Losr_fp_result
- cmp r4, #70 @ Test if result type char == 'F'.
- beq .Losr_fp_result
strd r0, [r10] @ Store r0/r1 into result pointer
- b .Losr_exit
-.Losr_fp_result:
- vstr d0, [r10] @ Store s0-s1/d0 into result pointer
-.Losr_exit:
vpop {s16-s31}
.cfi_adjust_cfa_offset -64
pop {r4, r5, r6, r7, r8, r9, r10, r11, pc}
diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S
index e0094e6..fd1a44e 100644
--- a/runtime/arch/arm64/quick_entrypoints_arm64.S
+++ b/runtime/arch/arm64/quick_entrypoints_arm64.S
@@ -1031,29 +1031,8 @@
RESTORE_TWO_REGS xFP, xLR, 96
RESTORE_TWO_REGS_DECREASE_FRAME x3, x4, SAVE_SIZE
- // Store result (w0/x0/s0/d0) appropriately, depending on resultType.
- ldrb w10, [x4]
-
- // Check the return type and store the correct register into the jvalue in memory.
-
- // Don't set anything for a void type.
- cmp w10, #'V'
- beq .Losr_exit
- // Is it a double?
- cmp w10, #'D'
- beq .Losr_return_double
- // Is it a float?
- cmp w10, #'F'
- beq .Losr_return_float
- // Just store x0. Doesn't matter if it is 64 or 32 bits.
+ // The compiler put the result in x0. Doesn't matter if it is 64 or 32 bits.
str x0, [x3]
-.Losr_exit:
- ret
-.Losr_return_double:
- str d0, [x3]
- ret
-.Losr_return_float:
- str s0, [x3]
ret
.Losr_entry:
diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S
index 794ee89..f212f34 100644
--- a/runtime/arch/x86/quick_entrypoints_x86.S
+++ b/runtime/arch/x86/quick_entrypoints_x86.S
@@ -2425,19 +2425,8 @@
POP ebx
POP ebp
mov 16(%esp), %ecx // Get JValue result
- mov %eax, (%ecx) // Store the result assuming it is a long, int or Object*
- mov %edx, 4(%ecx) // Store the other half of the result
- mov 20(%esp), %edx // Get the shorty
- cmpb LITERAL(68), (%edx) // Test if result type char == 'D'
- je .Losr_return_double_quick
- cmpb LITERAL(70), (%edx) // Test if result type char == 'F'
- je .Losr_return_float_quick
- ret
-.Losr_return_double_quick:
- movsd %xmm0, (%ecx) // Store the floating point result
- ret
-.Losr_return_float_quick:
- movss %xmm0, (%ecx) // Store the floating point result
+ mov %eax, (%ecx) // Store the result.
+ mov %edx, 4(%ecx) // Store the other half of the result.
ret
.Losr_entry:
CFI_RESTORE_STATE_AND_DEF_CFA(ebp, SAVE_SIZE) // CFA = ebp + SAVE_SIZE
diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
index f9b6d2e..d6d68de 100644
--- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S
+++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S
@@ -2253,17 +2253,7 @@
POP r8
POP rcx
POP rbp
- cmpb LITERAL(68), (%r8) // Test if result type char == 'D'.
- je .Losr_return_double_quick
- cmpb LITERAL(70), (%r8) // Test if result type char == 'F'.
- je .Losr_return_float_quick
- movq %rax, (%rcx) // Store the result assuming its a long, int or Object*
- ret
-.Losr_return_double_quick:
- movsd %xmm0, (%rcx) // Store the double floating point result.
- ret
-.Losr_return_float_quick:
- movss %xmm0, (%rcx) // Store the floating point result.
+ movq %rax, (%rcx) // Store the result.
ret
.Losr_entry:
CFI_RESTORE_STATE_AND_DEF_CFA(rsp, 80)
diff --git a/runtime/jit/jit.cc b/runtime/jit/jit.cc
index 4b22d2a..6e89973 100644
--- a/runtime/jit/jit.cc
+++ b/runtime/jit/jit.cc
@@ -459,36 +459,15 @@
const char* shorty,
Thread* self);
-bool Jit::MaybeDoOnStackReplacement(Thread* thread,
- ArtMethod* method,
- uint32_t dex_pc,
- int32_t dex_pc_offset,
- JValue* result) {
+OsrData* Jit::PrepareForOsr(ArtMethod* method, uint32_t dex_pc, uint32_t* vregs) {
if (!kEnableOnStackReplacement) {
- return false;
+ return nullptr;
}
- Jit* jit = Runtime::Current()->GetJit();
- if (jit == nullptr) {
- return false;
- }
-
- if (UNLIKELY(__builtin_frame_address(0) < thread->GetStackEnd())) {
- // Don't attempt to do an OSR if we are close to the stack limit. Since
- // the interpreter frames are still on stack, OSR has the potential
- // to stack overflow even for a simple loop.
- // b/27094810.
- return false;
- }
-
- // Get the actual Java method if this method is from a proxy class. The compiler
- // and the JIT code cache do not expect methods from proxy classes.
- method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
-
// Cheap check if the method has been compiled already. That's an indicator that we should
// osr into it.
- if (!jit->GetCodeCache()->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
- return false;
+ if (!GetCodeCache()->ContainsPc(method->GetEntryPointFromQuickCompiledCode())) {
+ return nullptr;
}
// Fetch some data before looking up for an OSR method. We don't want thread
@@ -496,36 +475,25 @@
// method while we are being suspended.
CodeItemDataAccessor accessor(method->DexInstructionData());
const size_t number_of_vregs = accessor.RegistersSize();
- const char* shorty = method->GetShorty();
std::string method_name(VLOG_IS_ON(jit) ? method->PrettyMethod() : "");
- void** memory = nullptr;
- size_t frame_size = 0;
- ShadowFrame* shadow_frame = nullptr;
- const uint8_t* native_pc = nullptr;
+ OsrData* osr_data = nullptr;
{
ScopedAssertNoThreadSuspension sts("Holding OSR method");
- const OatQuickMethodHeader* osr_method = jit->GetCodeCache()->LookupOsrMethodHeader(method);
+ const OatQuickMethodHeader* osr_method = GetCodeCache()->LookupOsrMethodHeader(method);
if (osr_method == nullptr) {
// No osr method yet, just return to the interpreter.
- return false;
+ return nullptr;
}
CodeInfo code_info(osr_method);
// Find stack map starting at the target dex_pc.
- StackMap stack_map = code_info.GetOsrStackMapForDexPc(dex_pc + dex_pc_offset);
+ StackMap stack_map = code_info.GetOsrStackMapForDexPc(dex_pc);
if (!stack_map.IsValid()) {
// There is no OSR stack map for this dex pc offset. Just return to the interpreter in the
// hope that the next branch has one.
- return false;
- }
-
- // Before allowing the jump, make sure no code is actively inspecting the method to avoid
- // jumping from interpreter to OSR while e.g. single stepping. Note that we could selectively
- // disable OSR when single stepping, but that's currently hard to know at this point.
- if (Runtime::Current()->GetRuntimeCallbacks()->IsMethodBeingInspected(method)) {
- return false;
+ return nullptr;
}
// We found a stack map, now fill the frame with dex register values from the interpreter's
@@ -533,20 +501,22 @@
DexRegisterMap vreg_map = code_info.GetDexRegisterMapOf(stack_map);
DCHECK_EQ(vreg_map.size(), number_of_vregs);
- frame_size = osr_method->GetFrameSizeInBytes();
+ size_t frame_size = osr_method->GetFrameSizeInBytes();
// Allocate memory to put shadow frame values. The osr stub will copy that memory to
// stack.
// Note that we could pass the shadow frame to the stub, and let it copy the values there,
// but that is engineering complexity not worth the effort for something like OSR.
- memory = reinterpret_cast<void**>(malloc(frame_size));
- CHECK(memory != nullptr);
- memset(memory, 0, frame_size);
+ osr_data = reinterpret_cast<OsrData*>(malloc(sizeof(OsrData) + frame_size));
+ if (osr_data == nullptr) {
+ return nullptr;
+ }
+ memset(osr_data, 0, sizeof(OsrData) + frame_size);
+ osr_data->frame_size = frame_size;
// Art ABI: ArtMethod is at the bottom of the stack.
- memory[0] = method;
+ osr_data->memory[0] = method;
- shadow_frame = thread->PopShadowFrame();
if (vreg_map.empty()) {
// If we don't have a dex register map, then there are no live dex registers at
// this dex pc.
@@ -565,30 +535,71 @@
DCHECK_EQ(location, DexRegisterLocation::Kind::kInStack);
- int32_t vreg_value = shadow_frame->GetVReg(vreg);
+ int32_t vreg_value = vregs[vreg];
int32_t slot_offset = vreg_map[vreg].GetStackOffsetInBytes();
DCHECK_LT(slot_offset, static_cast<int32_t>(frame_size));
DCHECK_GT(slot_offset, 0);
- (reinterpret_cast<int32_t*>(memory))[slot_offset / sizeof(int32_t)] = vreg_value;
+ (reinterpret_cast<int32_t*>(osr_data->memory))[slot_offset / sizeof(int32_t)] = vreg_value;
}
}
- native_pc = stack_map.GetNativePcOffset(kRuntimeISA) +
+ osr_data->native_pc = stack_map.GetNativePcOffset(kRuntimeISA) +
osr_method->GetEntryPoint();
VLOG(jit) << "Jumping to "
<< method_name
<< "@"
- << std::hex << reinterpret_cast<uintptr_t>(native_pc);
+ << std::hex << reinterpret_cast<uintptr_t>(osr_data->native_pc);
+ }
+ return osr_data;
+}
+
+bool Jit::MaybeDoOnStackReplacement(Thread* thread,
+ ArtMethod* method,
+ uint32_t dex_pc,
+ int32_t dex_pc_offset,
+ JValue* result) {
+ Jit* jit = Runtime::Current()->GetJit();
+ if (jit == nullptr) {
+ return false;
+ }
+
+ if (UNLIKELY(__builtin_frame_address(0) < thread->GetStackEnd())) {
+ // Don't attempt to do an OSR if we are close to the stack limit. Since
+ // the interpreter frames are still on stack, OSR has the potential
+ // to stack overflow even for a simple loop.
+ // b/27094810.
+ return false;
+ }
+
+ // Get the actual Java method if this method is from a proxy class. The compiler
+ // and the JIT code cache do not expect methods from proxy classes.
+ method = method->GetInterfaceMethodIfProxy(kRuntimePointerSize);
+
+ // Before allowing the jump, make sure no code is actively inspecting the method to avoid
+ // jumping from interpreter to OSR while e.g. single stepping. Note that we could selectively
+ // disable OSR when single stepping, but that's currently hard to know at this point.
+ if (Runtime::Current()->GetRuntimeCallbacks()->IsMethodBeingInspected(method)) {
+ return false;
+ }
+
+ ShadowFrame* shadow_frame = thread->GetManagedStack()->GetTopShadowFrame();
+ OsrData* osr_data = jit->PrepareForOsr(method,
+ dex_pc + dex_pc_offset,
+ shadow_frame->GetVRegArgs(0));
+
+ if (osr_data == nullptr) {
+ return false;
}
{
+ thread->PopShadowFrame();
ManagedStack fragment;
thread->PushManagedStackFragment(&fragment);
- (*art_quick_osr_stub)(memory,
- frame_size,
- native_pc,
+ (*art_quick_osr_stub)(osr_data->memory,
+ osr_data->frame_size,
+ osr_data->native_pc,
result,
- shorty,
+ method->GetShorty(),
thread);
if (UNLIKELY(thread->GetException() == Thread::GetDeoptimizationException())) {
@@ -596,9 +607,9 @@
}
thread->PopManagedStackFragment(fragment);
}
- free(memory);
+ free(osr_data);
thread->PushShadowFrame(shadow_frame);
- VLOG(jit) << "Done running OSR code for " << method_name;
+ VLOG(jit) << "Done running OSR code for " << method->PrettyMethod();
return true;
}
diff --git a/runtime/jit/jit.h b/runtime/jit/jit.h
index 08a464e..8d5676b 100644
--- a/runtime/jit/jit.h
+++ b/runtime/jit/jit.h
@@ -25,6 +25,7 @@
#include "base/runtime_debug.h"
#include "base/timing_logger.h"
#include "handle.h"
+#include "offsets.h"
#include "jit/debugger_interface.h"
#include "jit/profile_saver_options.h"
#include "obj_ptr.h"
@@ -201,6 +202,30 @@
/*out*/ size_t* num_symbols) = 0;
};
+// Data structure holding information to perform an OSR.
+struct OsrData {
+ // The native PC to jump to.
+ const uint8_t* native_pc;
+
+ // The frame size of the compiled code to jump to.
+ size_t frame_size;
+
+ // The dynamically allocated memory of size `frame_size` to copy to stack.
+ void* memory[0];
+
+ static constexpr MemberOffset NativePcOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(OsrData, native_pc));
+ }
+
+ static constexpr MemberOffset FrameSizeOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(OsrData, frame_size));
+ }
+
+ static constexpr MemberOffset MemoryOffset() {
+ return MemberOffset(OFFSETOF_MEMBER(OsrData, memory));
+ }
+};
+
class Jit {
public:
static constexpr size_t kDefaultPriorityThreadWeightRatio = 1000;
@@ -324,6 +349,11 @@
// Return whether the runtime should use a priority thread weight when sampling.
static bool ShouldUsePriorityThreadWeight(Thread* self);
+ // Return the information required to do an OSR jump. Return null if the OSR
+ // cannot be done.
+ OsrData* PrepareForOsr(ArtMethod* method, uint32_t dex_pc, uint32_t* vregs)
+ REQUIRES_SHARED(Locks::mutator_lock_);
+
// If an OSR compiled version is available for `method`,
// and `dex_pc + dex_pc_offset` is an entry point of that compiled
// version, this method will jump to the compiled code, let it run,
diff --git a/tools/cpp-define-generator/asm_defines.def b/tools/cpp-define-generator/asm_defines.def
index 9747844..a64676f 100644
--- a/tools/cpp-define-generator/asm_defines.def
+++ b/tools/cpp-define-generator/asm_defines.def
@@ -28,6 +28,7 @@
#include "mirror_dex_cache.def"
#include "mirror_object.def"
#include "mirror_string.def"
+#include "osr.def"
#include "profiling_info.def"
#include "rosalloc.def"
#include "runtime.def"
diff --git a/tools/cpp-define-generator/osr.def b/tools/cpp-define-generator/osr.def
new file mode 100644
index 0000000..bf611fd
--- /dev/null
+++ b/tools/cpp-define-generator/osr.def
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2019 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if ASM_DEFINE_INCLUDE_DEPENDENCIES
+#include "jit/jit.h"
+#endif
+
+ASM_DEFINE(OSR_DATA_NATIVE_PC, art::jit::OsrData::NativePcOffset().Int32Value())
+ASM_DEFINE(OSR_DATA_FRAME_SIZE, art::jit::OsrData::FrameSizeOffset().Int32Value())
+ASM_DEFINE(OSR_DATA_MEMORY, art::jit::OsrData::MemoryOffset().Int32Value())