Portable refactorings.
Separate quick from portable entrypoints.
Move architectural dependencies into arch.
Change-Id: I9adbc0a9782e2959fdc3308215f01e3107632b7c
diff --git a/compiler/dex/quick/arm/call_arm.cc b/compiler/dex/quick/arm/call_arm.cc
index 7c3ec14..745e43d 100644
--- a/compiler/dex/quick/arm/call_arm.cc
+++ b/compiler/dex/quick/arm/call_arm.cc
@@ -432,7 +432,7 @@
// Making a call - use explicit registers
FlushAllRegs(); /* Everything to home location */
LoadValueDirectFixed(rl_src, r0);
- LoadWordDisp(rARM_SELF, ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode),
+ LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode),
rARM_LR);
// Materialize a pointer to the fill data image
NewLIR3(kThumb2Adr, r1, 0, reinterpret_cast<uintptr_t>(tab_rec));
@@ -488,7 +488,7 @@
OpRegImm(kOpCmp, r1, 0);
OpIT(kCondNe, "T");
// Go expensive route - artLockObjectFromCode(self, obj);
- LoadWordDisp(rARM_SELF, ENTRYPOINT_OFFSET(pLockObjectFromCode), rARM_LR);
+ LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode), rARM_LR);
ClobberCalleeSave();
LIR* call_inst = OpReg(kOpBlx, rARM_LR);
MarkSafepointPC(call_inst);
@@ -519,7 +519,7 @@
OpIT(kCondEq, "EE");
StoreWordDisp(r0, mirror::Object::MonitorOffset().Int32Value(), r3);
// Go expensive route - UnlockObjectFromCode(obj);
- LoadWordDisp(rARM_SELF, ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARM_LR);
+ LoadWordDisp(rARM_SELF, QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rARM_LR);
ClobberCalleeSave();
LIR* call_inst = OpReg(kOpBlx, rARM_LR);
MarkSafepointPC(call_inst);
diff --git a/compiler/dex/quick/arm/fp_arm.cc b/compiler/dex/quick/arm/fp_arm.cc
index 1bb08c4..08d6778 100644
--- a/compiler/dex/quick/arm/fp_arm.cc
+++ b/compiler/dex/quick/arm/fp_arm.cc
@@ -49,7 +49,8 @@
case Instruction::REM_FLOAT_2ADDR:
case Instruction::REM_FLOAT:
FlushAllRegs(); // Send everything to home location
- CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false);
+ CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2,
+ false);
rl_result = GetReturn(true);
StoreValue(rl_dest, rl_result);
return;
@@ -91,7 +92,8 @@
case Instruction::REM_DOUBLE_2ADDR:
case Instruction::REM_DOUBLE:
FlushAllRegs(); // Send everything to home location
- CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false);
+ CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2,
+ false);
rl_result = GetReturnWide(true);
StoreValueWide(rl_dest, rl_result);
return;
@@ -140,16 +142,16 @@
op = kThumb2VcvtDI;
break;
case Instruction::LONG_TO_DOUBLE:
- GenConversionCall(ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
return;
case Instruction::FLOAT_TO_LONG:
- GenConversionCall(ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
return;
case Instruction::LONG_TO_FLOAT:
- GenConversionCall(ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
return;
case Instruction::DOUBLE_TO_LONG:
- GenConversionCall(ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
return;
default:
LOG(FATAL) << "Unexpected opcode: " << opcode;
@@ -315,7 +317,7 @@
branch = NewLIR2(kThumbBCond, 0, kArmCondEq);
ClobberCalleeSave();
LockCallTemps(); // Using fixed registers
- int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pSqrt));
+ int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pSqrt));
NewLIR3(kThumb2Fmrrd, r0, r1, S2d(rl_src.low_reg, rl_src.high_reg));
NewLIR1(kThumbBlxR, r_tgt);
NewLIR3(kThumb2Fmdrr, S2d(rl_result.low_reg, rl_result.high_reg), r0, r1);
diff --git a/compiler/dex/quick/arm/int_arm.cc b/compiler/dex/quick/arm/int_arm.cc
index 4bb507b..9db1016 100644
--- a/compiler/dex/quick/arm/int_arm.cc
+++ b/compiler/dex/quick/arm/int_arm.cc
@@ -665,7 +665,7 @@
*/
RegLocation rl_result;
if (BadOverlap(rl_src1, rl_dest) || (BadOverlap(rl_src2, rl_dest))) {
- int func_offset = ENTRYPOINT_OFFSET(pLmul);
+ int func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul);
FlushAllRegs();
CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false);
rl_result = GetReturnWide(false);
@@ -956,7 +956,7 @@
// Get the array's class.
LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
- CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+ CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
r_array_class, true);
// Redo LoadValues in case they didn't survive the call.
LoadValueDirectFixed(rl_array, r_array); // Reload array
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc
index 8934340..ebe10bb 100644
--- a/compiler/dex/quick/gen_common.cc
+++ b/compiler/dex/quick/gen_common.cc
@@ -211,9 +211,9 @@
int func_offset;
if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
type_idx)) {
- func_offset = ENTRYPOINT_OFFSET(pAllocArrayFromCode);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocArrayFromCode);
} else {
- func_offset= ENTRYPOINT_OFFSET(pAllocArrayFromCodeWithAccessCheck);
+ func_offset= QUICK_ENTRYPOINT_OFFSET(pAllocArrayFromCodeWithAccessCheck);
}
CallRuntimeHelperImmMethodRegLocation(func_offset, type_idx, rl_src, true);
RegLocation rl_result = GetReturn(false);
@@ -233,9 +233,9 @@
int func_offset;
if (cu_->compiler_driver->CanAccessTypeWithoutChecks(cu_->method_idx, *cu_->dex_file,
type_idx)) {
- func_offset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCode);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCode);
} else {
- func_offset = ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCodeWithAccessCheck);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pCheckAndAllocArrayFromCodeWithAccessCheck);
}
CallRuntimeHelperImmMethodImm(func_offset, type_idx, elems, true);
FreeTemp(TargetReg(kArg2));
@@ -375,7 +375,7 @@
// TUNING: fast path should fall through
LIR* branch_over = OpCmpImmBranch(kCondNe, rBase, 0, NULL);
LoadConstant(TargetReg(kArg0), ssb_index);
- CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
+ CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
if (cu_->instruction_set == kMips) {
// For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy
OpRegCopy(rBase, TargetReg(kRet0));
@@ -408,9 +408,9 @@
FreeTemp(rBase);
} else {
FlushAllRegs(); // Everything to home locations
- int setter_offset = is_long_or_double ? ENTRYPOINT_OFFSET(pSet64Static) :
- (is_object ? ENTRYPOINT_OFFSET(pSetObjStatic)
- : ENTRYPOINT_OFFSET(pSet32Static));
+ int setter_offset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Static) :
+ (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjStatic)
+ : QUICK_ENTRYPOINT_OFFSET(pSet32Static));
CallRuntimeHelperImmRegLocation(setter_offset, field_idx, rl_src, true);
}
}
@@ -455,7 +455,7 @@
// or NULL if not initialized. Check for NULL and call helper if NULL.
// TUNING: fast path should fall through
LIR* branch_over = OpCmpImmBranch(kCondNe, rBase, 0, NULL);
- CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
+ CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeStaticStorage), ssb_index, true);
if (cu_->instruction_set == kMips) {
// For Arm, kRet0 = kArg0 = rBase, for Mips, we need to copy
OpRegCopy(rBase, TargetReg(kRet0));
@@ -483,9 +483,9 @@
}
} else {
FlushAllRegs(); // Everything to home locations
- int getterOffset = is_long_or_double ? ENTRYPOINT_OFFSET(pGet64Static) :
- (is_object ? ENTRYPOINT_OFFSET(pGetObjStatic)
- : ENTRYPOINT_OFFSET(pGet32Static));
+ int getterOffset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Static) :
+ (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjStatic)
+ : QUICK_ENTRYPOINT_OFFSET(pGet32Static));
CallRuntimeHelperImm(getterOffset, field_idx, true);
if (is_long_or_double) {
RegLocation rl_result = GetReturnWide(rl_dest.fp);
@@ -499,7 +499,7 @@
void Mir2Lir::HandleSuspendLaunchPads() {
int num_elems = suspend_launchpads_.Size();
- int helper_offset = ENTRYPOINT_OFFSET(pTestSuspendFromCode);
+ int helper_offset = QUICK_ENTRYPOINT_OFFSET(pTestSuspendFromCode);
for (int i = 0; i < num_elems; i++) {
ResetRegPool();
ResetDefTracking();
@@ -545,7 +545,7 @@
bool target_x86 = (cu_->instruction_set == kX86);
switch (lab->operands[0]) {
case kThrowNullPointer:
- func_offset = ENTRYPOINT_OFFSET(pThrowNullPointerFromCode);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowNullPointerFromCode);
break;
case kThrowConstantArrayBounds: // v1 is length reg (for Arm/Mips), v2 constant index
// v1 holds the constant array index. Mips/Arm uses v2 for length, x86 reloads.
@@ -557,7 +557,7 @@
// Make sure the following LoadConstant doesn't mess with kArg1.
LockTemp(TargetReg(kArg1));
LoadConstant(TargetReg(kArg0), v2);
- func_offset = ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
break;
case kThrowArrayBounds:
// Move v1 (array index) to kArg0 and v2 (array length) to kArg1
@@ -590,18 +590,18 @@
OpRegCopy(TargetReg(kArg0), v1);
}
}
- func_offset = ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowArrayBoundsFromCode);
break;
case kThrowDivZero:
- func_offset = ENTRYPOINT_OFFSET(pThrowDivZeroFromCode);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowDivZeroFromCode);
break;
case kThrowNoSuchMethod:
OpRegCopy(TargetReg(kArg0), v1);
func_offset =
- ENTRYPOINT_OFFSET(pThrowNoSuchMethodFromCode);
+ QUICK_ENTRYPOINT_OFFSET(pThrowNoSuchMethodFromCode);
break;
case kThrowStackOverflow:
- func_offset = ENTRYPOINT_OFFSET(pThrowStackOverflowFromCode);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pThrowStackOverflowFromCode);
// Restore stack alignment
if (target_x86) {
OpRegImm(kOpAdd, TargetReg(kSp), frame_size_);
@@ -664,9 +664,9 @@
StoreValue(rl_dest, rl_result);
}
} else {
- int getterOffset = is_long_or_double ? ENTRYPOINT_OFFSET(pGet64Instance) :
- (is_object ? ENTRYPOINT_OFFSET(pGetObjInstance)
- : ENTRYPOINT_OFFSET(pGet32Instance));
+ int getterOffset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pGet64Instance) :
+ (is_object ? QUICK_ENTRYPOINT_OFFSET(pGetObjInstance)
+ : QUICK_ENTRYPOINT_OFFSET(pGet32Instance));
CallRuntimeHelperImmRegLocation(getterOffset, field_idx, rl_obj, true);
if (is_long_or_double) {
RegLocation rl_result = GetReturnWide(rl_dest.fp);
@@ -719,9 +719,9 @@
}
}
} else {
- int setter_offset = is_long_or_double ? ENTRYPOINT_OFFSET(pSet64Instance) :
- (is_object ? ENTRYPOINT_OFFSET(pSetObjInstance)
- : ENTRYPOINT_OFFSET(pSet32Instance));
+ int setter_offset = is_long_or_double ? QUICK_ENTRYPOINT_OFFSET(pSet64Instance) :
+ (is_object ? QUICK_ENTRYPOINT_OFFSET(pSetObjInstance)
+ : QUICK_ENTRYPOINT_OFFSET(pSet32Instance));
CallRuntimeHelperImmRegLocationRegLocation(setter_offset, field_idx, rl_obj, rl_src, true);
}
}
@@ -735,7 +735,7 @@
type_idx)) {
// Call out to helper which resolves type and verifies access.
// Resolved type returned in kRet0.
- CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+ CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
type_idx, rl_method.low_reg, true);
RegLocation rl_result = GetReturn(false);
StoreValue(rl_dest, rl_result);
@@ -764,7 +764,7 @@
// TUNING: move slow path to end & remove unconditional branch
LIR* target1 = NewLIR0(kPseudoTargetLabel);
// Call out to helper, which will return resolved type in kArg0
- CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx,
+ CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx,
rl_method.low_reg, true);
RegLocation rl_result = GetReturn(false);
StoreValue(rl_dest, rl_result);
@@ -797,7 +797,7 @@
LoadWordDisp(TargetReg(kArg2),
mirror::AbstractMethod::DexCacheStringsOffset().Int32Value(), TargetReg(kArg0));
// Might call out to helper, which will return resolved string in kRet0
- int r_tgt = CallHelperSetup(ENTRYPOINT_OFFSET(pResolveStringFromCode));
+ int r_tgt = CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(pResolveStringFromCode));
LoadWordDisp(TargetReg(kArg0), offset_of_string, TargetReg(kRet0));
LoadConstant(TargetReg(kArg1), string_idx);
if (cu_->instruction_set == kThumb2) {
@@ -821,7 +821,8 @@
branch->target = target;
} else {
DCHECK_EQ(cu_->instruction_set, kX86);
- CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pResolveStringFromCode), TargetReg(kArg2), TargetReg(kArg1), true);
+ CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pResolveStringFromCode), TargetReg(kArg2),
+ TargetReg(kArg1), true);
}
GenBarrier();
StoreValue(rl_dest, GetReturn(false));
@@ -847,9 +848,9 @@
int func_offset;
if (cu_->compiler_driver->CanAccessInstantiableTypeWithoutChecks(
cu_->method_idx, *cu_->dex_file, type_idx)) {
- func_offset = ENTRYPOINT_OFFSET(pAllocObjectFromCode);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectFromCode);
} else {
- func_offset = ENTRYPOINT_OFFSET(pAllocObjectFromCodeWithAccessCheck);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pAllocObjectFromCodeWithAccessCheck);
}
CallRuntimeHelperImmMethod(func_offset, type_idx, true);
RegLocation rl_result = GetReturn(false);
@@ -858,7 +859,7 @@
void Mir2Lir::GenThrow(RegLocation rl_src) {
FlushAllRegs();
- CallRuntimeHelperRegLocation(ENTRYPOINT_OFFSET(pDeliverException), rl_src, true);
+ CallRuntimeHelperRegLocation(QUICK_ENTRYPOINT_OFFSET(pDeliverException), rl_src, true);
}
// For final classes there are no sub-classes to check and so we can answer the instance-of
@@ -928,7 +929,7 @@
if (needs_access_check) {
// Check we have access to type_idx and if not throw IllegalAccessError,
// returns Class* in kArg0
- CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+ CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
type_idx, true);
OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path
LoadValueDirectFixed(rl_src, TargetReg(kArg0)); // kArg0 <= ref
@@ -950,7 +951,7 @@
LIR* hop_branch = OpCmpImmBranch(kCondNe, class_reg, 0, NULL);
// Not resolved
// Call out to helper, which will return resolved type in kRet0
- CallRuntimeHelperImm(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, true);
+ CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, true);
OpRegCopy(TargetReg(kArg2), TargetReg(kRet0)); // Align usage with fast path
LoadValueDirectFixed(rl_src, TargetReg(kArg0)); /* reload Ref */
// Rejoin code paths
@@ -985,7 +986,7 @@
}
} else {
if (cu_->instruction_set == kThumb2) {
- int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+ int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
if (!type_known_abstract) {
/* Uses conditional nullification */
OpRegReg(kOpCmp, TargetReg(kArg1), TargetReg(kArg2)); // Same?
@@ -1002,13 +1003,13 @@
branchover = OpCmpBranch(kCondEq, TargetReg(kArg1), TargetReg(kArg2), NULL);
}
if (cu_->instruction_set != kX86) {
- int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+ int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
OpRegCopy(TargetReg(kArg0), TargetReg(kArg2)); // .ne case - arg0 <= class
OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class)
FreeTemp(r_tgt);
} else {
OpRegCopy(TargetReg(kArg0), TargetReg(kArg2));
- OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
+ OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pInstanceofNonTrivialFromCode));
}
}
}
@@ -1068,7 +1069,7 @@
// Check we have access to type_idx and if not throw IllegalAccessError,
// returns Class* in kRet0
// InitializeTypeAndVerifyAccess(idx, method)
- CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
+ CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode),
type_idx, TargetReg(kArg1), true);
OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path
} else if (use_declaring_class) {
@@ -1088,8 +1089,8 @@
// Not resolved
// Call out to helper, which will return resolved type in kArg0
// InitializeTypeFromCode(idx, method)
- CallRuntimeHelperImmReg(ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx, TargetReg(kArg1),
- true);
+ CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(pInitializeTypeFromCode), type_idx,
+ TargetReg(kArg1), true);
OpRegCopy(class_reg, TargetReg(kRet0)); // Align usage with fast path
// Rejoin code paths
LIR* hop_target = NewLIR0(kPseudoTargetLabel);
@@ -1108,8 +1109,8 @@
if (!type_known_abstract) {
branch2 = OpCmpBranch(kCondEq, TargetReg(kArg1), class_reg, NULL);
}
- CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCheckCastFromCode), TargetReg(kArg1), TargetReg(kArg2),
- true);
+ CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCheckCastFromCode), TargetReg(kArg1),
+ TargetReg(kArg2), true);
/* branch target here */
LIR* target = NewLIR0(kPseudoTargetLabel);
branch1->target = target;
@@ -1172,15 +1173,15 @@
switch (opcode) {
case Instruction::SHL_LONG:
case Instruction::SHL_LONG_2ADDR:
- func_offset = ENTRYPOINT_OFFSET(pShlLong);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pShlLong);
break;
case Instruction::SHR_LONG:
case Instruction::SHR_LONG_2ADDR:
- func_offset = ENTRYPOINT_OFFSET(pShrLong);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pShrLong);
break;
case Instruction::USHR_LONG:
case Instruction::USHR_LONG_2ADDR:
- func_offset = ENTRYPOINT_OFFSET(pUshrLong);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pUshrLong);
break;
default:
LOG(FATAL) << "Unexpected case";
@@ -1302,7 +1303,7 @@
}
rl_result = GenDivRem(rl_dest, rl_src1.low_reg, rl_src2.low_reg, op == kOpDiv);
} else {
- int func_offset = ENTRYPOINT_OFFSET(pIdivmod);
+ int func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod);
FlushAllRegs(); /* Send everything to home location */
LoadValueDirectFixed(rl_src2, TargetReg(kArg1));
int r_tgt = CallHelperSetup(func_offset);
@@ -1557,7 +1558,7 @@
FlushAllRegs(); /* Everything to home location */
LoadValueDirectFixed(rl_src, TargetReg(kArg0));
Clobber(TargetReg(kArg0));
- int func_offset = ENTRYPOINT_OFFSET(pIdivmod);
+ int func_offset = QUICK_ENTRYPOINT_OFFSET(pIdivmod);
CallRuntimeHelperRegImm(func_offset, TargetReg(kArg0), lit, false);
if (is_div)
rl_result = GetReturn(false);
@@ -1634,7 +1635,7 @@
} else {
call_out = true;
ret_reg = TargetReg(kRet0);
- func_offset = ENTRYPOINT_OFFSET(pLmul);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pLmul);
}
break;
case Instruction::DIV_LONG:
@@ -1642,13 +1643,13 @@
call_out = true;
check_zero = true;
ret_reg = TargetReg(kRet0);
- func_offset = ENTRYPOINT_OFFSET(pLdiv);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pLdiv);
break;
case Instruction::REM_LONG:
case Instruction::REM_LONG_2ADDR:
call_out = true;
check_zero = true;
- func_offset = ENTRYPOINT_OFFSET(pLdivmod);
+ func_offset = QUICK_ENTRYPOINT_OFFSET(pLdivmod);
/* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */
ret_reg = (cu_->instruction_set == kThumb2) ? TargetReg(kArg2) : TargetReg(kRet0);
break;
diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc
index 91f2500..1b34e99 100644
--- a/compiler/dex/quick/gen_invoke.cc
+++ b/compiler/dex/quick/gen_invoke.cc
@@ -471,7 +471,7 @@
direct_method = 0;
}
int trampoline = (cu->instruction_set == kX86) ? 0
- : ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline);
+ : QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline);
if (direct_method != 0) {
switch (state) {
@@ -555,7 +555,7 @@
uint32_t method_idx,
uintptr_t unused, uintptr_t unused2,
InvokeType unused3) {
- int trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
+ int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
}
@@ -563,7 +563,7 @@
const MethodReference& target_method,
uint32_t method_idx, uintptr_t unused,
uintptr_t unused2, InvokeType unused3) {
- int trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
+ int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
}
@@ -571,7 +571,7 @@
const MethodReference& target_method,
uint32_t method_idx, uintptr_t unused,
uintptr_t unused2, InvokeType unused3) {
- int trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
+ int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
}
@@ -579,7 +579,7 @@
const MethodReference& target_method,
uint32_t method_idx, uintptr_t unused,
uintptr_t unused2, InvokeType unused3) {
- int trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
+ int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
}
@@ -589,7 +589,7 @@
uint32_t unused,
uintptr_t unused2, uintptr_t unused3,
InvokeType unused4) {
- int trampoline = ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
+ int trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
return NextInvokeInsnSP(cu, info, trampoline, state, target_method, 0);
}
@@ -773,14 +773,14 @@
// Generate memcpy
OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
- CallRuntimeHelperRegRegImm(ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
+ CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
} else {
if (info->num_arg_words >= 20) {
// Generate memcpy
OpRegRegImm(kOpAdd, TargetReg(kArg0), TargetReg(kSp), outs_offset);
OpRegRegImm(kOpAdd, TargetReg(kArg1), TargetReg(kSp), start_offset);
- CallRuntimeHelperRegRegImm(ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
+ CallRuntimeHelperRegRegImm(QUICK_ENTRYPOINT_OFFSET(pMemcpy), TargetReg(kArg0),
TargetReg(kArg1), (info->num_arg_words - 3) * 4, false);
} else {
// Use vldm/vstm pair using kArg3 as a temp
@@ -1047,7 +1047,7 @@
} else {
LoadValueDirectFixed(rl_start, reg_start);
}
- int r_tgt = (cu_->instruction_set != kX86) ? LoadHelper(ENTRYPOINT_OFFSET(pIndexOf)) : 0;
+ int r_tgt = (cu_->instruction_set != kX86) ? LoadHelper(QUICK_ENTRYPOINT_OFFSET(pIndexOf)) : 0;
GenNullCheck(rl_obj.s_reg_low, reg_ptr, info->opt_flags);
LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
intrinsic_launchpads_.Insert(launch_pad);
@@ -1056,7 +1056,7 @@
if (cu_->instruction_set != kX86) {
OpReg(kOpBlx, r_tgt);
} else {
- OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pIndexOf));
+ OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pIndexOf));
}
LIR* resume_tgt = NewLIR0(kPseudoTargetLabel);
launch_pad->operands[2] = reinterpret_cast<uintptr_t>(resume_tgt);
@@ -1084,7 +1084,7 @@
LoadValueDirectFixed(rl_this, reg_this);
LoadValueDirectFixed(rl_cmp, reg_cmp);
int r_tgt = (cu_->instruction_set != kX86) ?
- LoadHelper(ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
+ LoadHelper(QUICK_ENTRYPOINT_OFFSET(pStringCompareTo)) : 0;
GenNullCheck(rl_this.s_reg_low, reg_this, info->opt_flags);
// TUNING: check if rl_cmp.s_reg_low is already null checked
LIR* launch_pad = RawLIR(0, kPseudoIntrinsicRetry, reinterpret_cast<uintptr_t>(info));
@@ -1094,7 +1094,7 @@
if (cu_->instruction_set != kX86) {
OpReg(kOpBlx, r_tgt);
} else {
- OpThreadMem(kOpBlx, ENTRYPOINT_OFFSET(pStringCompareTo));
+ OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(pStringCompareTo));
}
launch_pad->operands[2] = 0; // No return possible
// Record that we've already inlined & null checked
@@ -1409,20 +1409,20 @@
int trampoline = 0;
switch (info->type) {
case kInterface:
- trampoline = fast_path ? ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline)
- : ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
+ trampoline = fast_path ? QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampoline)
+ : QUICK_ENTRYPOINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck);
break;
case kDirect:
- trampoline = ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
+ trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck);
break;
case kStatic:
- trampoline = ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
+ trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck);
break;
case kSuper:
- trampoline = ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
+ trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck);
break;
case kVirtual:
- trampoline = ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
+ trampoline = QUICK_ENTRYPOINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck);
break;
default:
LOG(FATAL) << "Unexpected invoke type";
diff --git a/compiler/dex/quick/mips/call_mips.cc b/compiler/dex/quick/mips/call_mips.cc
index b6c200c..846c055 100644
--- a/compiler/dex/quick/mips/call_mips.cc
+++ b/compiler/dex/quick/mips/call_mips.cc
@@ -247,7 +247,7 @@
GenBarrier();
NewLIR0(kMipsCurrPC); // Really a jal to .+8
// Now, fill the branch delay slot with the helper load
- int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode));
+ int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode));
GenBarrier(); // Scheduling barrier
// Construct BaseLabel and set up table base register
@@ -272,7 +272,7 @@
LockCallTemps(); // Prepare for explicit register usage
GenNullCheck(rl_src.s_reg_low, rMIPS_ARG0, opt_flags);
// Go expensive route - artLockObjectFromCode(self, obj);
- int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pLockObjectFromCode));
+ int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode));
ClobberCalleeSave();
LIR* call_inst = OpReg(kOpBlx, r_tgt);
MarkSafepointPC(call_inst);
@@ -287,7 +287,7 @@
LockCallTemps(); // Prepare for explicit register usage
GenNullCheck(rl_src.s_reg_low, rMIPS_ARG0, opt_flags);
// Go expensive route - UnlockObjectFromCode(obj);
- int r_tgt = LoadHelper(ENTRYPOINT_OFFSET(pUnlockObjectFromCode));
+ int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode));
ClobberCalleeSave();
LIR* call_inst = OpReg(kOpBlx, r_tgt);
MarkSafepointPC(call_inst);
diff --git a/compiler/dex/quick/mips/fp_mips.cc b/compiler/dex/quick/mips/fp_mips.cc
index 620527e..3203017 100644
--- a/compiler/dex/quick/mips/fp_mips.cc
+++ b/compiler/dex/quick/mips/fp_mips.cc
@@ -50,7 +50,8 @@
case Instruction::REM_FLOAT_2ADDR:
case Instruction::REM_FLOAT:
FlushAllRegs(); // Send everything to home location
- CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false);
+ CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2,
+ false);
rl_result = GetReturn(true);
StoreValue(rl_dest, rl_result);
return;
@@ -92,7 +93,8 @@
case Instruction::REM_DOUBLE_2ADDR:
case Instruction::REM_DOUBLE:
FlushAllRegs(); // Send everything to home location
- CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false);
+ CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2,
+ false);
rl_result = GetReturnWide(true);
StoreValueWide(rl_dest, rl_result);
return;
@@ -133,22 +135,22 @@
op = kMipsFcvtdw;
break;
case Instruction::FLOAT_TO_INT:
- GenConversionCall(ENTRYPOINT_OFFSET(pF2iz), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pF2iz), rl_dest, rl_src);
return;
case Instruction::DOUBLE_TO_INT:
- GenConversionCall(ENTRYPOINT_OFFSET(pD2iz), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pD2iz), rl_dest, rl_src);
return;
case Instruction::LONG_TO_DOUBLE:
- GenConversionCall(ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
return;
case Instruction::FLOAT_TO_LONG:
- GenConversionCall(ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
return;
case Instruction::LONG_TO_FLOAT:
- GenConversionCall(ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
return;
case Instruction::DOUBLE_TO_LONG:
- GenConversionCall(ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
return;
default:
LOG(FATAL) << "Unexpected opcode: " << opcode;
@@ -178,18 +180,18 @@
switch (opcode) {
case Instruction::CMPL_FLOAT:
- offset = ENTRYPOINT_OFFSET(pCmplFloat);
+ offset = QUICK_ENTRYPOINT_OFFSET(pCmplFloat);
wide = false;
break;
case Instruction::CMPG_FLOAT:
- offset = ENTRYPOINT_OFFSET(pCmpgFloat);
+ offset = QUICK_ENTRYPOINT_OFFSET(pCmpgFloat);
wide = false;
break;
case Instruction::CMPL_DOUBLE:
- offset = ENTRYPOINT_OFFSET(pCmplDouble);
+ offset = QUICK_ENTRYPOINT_OFFSET(pCmplDouble);
break;
case Instruction::CMPG_DOUBLE:
- offset = ENTRYPOINT_OFFSET(pCmpgDouble);
+ offset = QUICK_ENTRYPOINT_OFFSET(pCmpgDouble);
break;
default:
LOG(FATAL) << "Unexpected opcode: " << opcode;
diff --git a/compiler/dex/quick/mips/int_mips.cc b/compiler/dex/quick/mips/int_mips.cc
index 7c8214b..bd044c6 100644
--- a/compiler/dex/quick/mips/int_mips.cc
+++ b/compiler/dex/quick/mips/int_mips.cc
@@ -579,7 +579,7 @@
// Get the array's class.
LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
- CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+ CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
r_array_class, true);
// Redo LoadValues in case they didn't survive the call.
LoadValueDirectFixed(rl_array, r_array); // Reload array
diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc
index d530a1c..1c395de 100644
--- a/compiler/dex/quick/x86/call_x86.cc
+++ b/compiler/dex/quick/x86/call_x86.cc
@@ -148,7 +148,7 @@
NewLIR1(kX86StartOfMethod, rX86_ARG2);
NewLIR2(kX86PcRelAdr, rX86_ARG1, reinterpret_cast<uintptr_t>(tab_rec));
NewLIR2(kX86Add32RR, rX86_ARG1, rX86_ARG2);
- CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rX86_ARG0,
+ CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayDataFromCode), rX86_ARG0,
rX86_ARG1, true);
}
@@ -165,7 +165,7 @@
NewLIR3(kX86LockCmpxchgMR, rCX, mirror::Object::MonitorOffset().Int32Value(), rDX);
LIR* branch = NewLIR2(kX86Jcc8, 0, kX86CondEq);
// If lock is held, go the expensive route - artLockObjectFromCode(self, obj);
- CallRuntimeHelperReg(ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX, true);
+ CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pLockObjectFromCode), rCX, true);
branch->target = NewLIR0(kPseudoTargetLabel);
}
@@ -185,7 +185,7 @@
LIR* branch2 = NewLIR1(kX86Jmp8, 0);
branch->target = NewLIR0(kPseudoTargetLabel);
// Otherwise, go the expensive route - UnlockObjectFromCode(obj);
- CallRuntimeHelperReg(ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rAX, true);
+ CallRuntimeHelperReg(QUICK_ENTRYPOINT_OFFSET(pUnlockObjectFromCode), rAX, true);
branch2->target = NewLIR0(kPseudoTargetLabel);
}
diff --git a/compiler/dex/quick/x86/fp_x86.cc b/compiler/dex/quick/x86/fp_x86.cc
index cc6f374..f736b5e 100644
--- a/compiler/dex/quick/x86/fp_x86.cc
+++ b/compiler/dex/quick/x86/fp_x86.cc
@@ -49,7 +49,8 @@
case Instruction::REM_FLOAT_2ADDR:
case Instruction::REM_FLOAT:
FlushAllRegs(); // Send everything to home location
- CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2, false);
+ CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmodf), rl_src1, rl_src2,
+ false);
rl_result = GetReturn(true);
StoreValue(rl_dest, rl_result);
return;
@@ -99,7 +100,8 @@
case Instruction::REM_DOUBLE_2ADDR:
case Instruction::REM_DOUBLE:
FlushAllRegs(); // Send everything to home location
- CallRuntimeHelperRegLocationRegLocation(ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2, false);
+ CallRuntimeHelperRegLocationRegLocation(QUICK_ENTRYPOINT_OFFSET(pFmod), rl_src1, rl_src2,
+ false);
rl_result = GetReturnWide(true);
StoreValueWide(rl_dest, rl_result);
return;
@@ -196,17 +198,17 @@
return;
}
case Instruction::LONG_TO_DOUBLE:
- GenConversionCall(ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2d), rl_dest, rl_src);
return;
case Instruction::LONG_TO_FLOAT:
// TODO: inline by using memory as a 64-bit source. Be careful about promoted registers.
- GenConversionCall(ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pL2f), rl_dest, rl_src);
return;
case Instruction::FLOAT_TO_LONG:
- GenConversionCall(ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pF2l), rl_dest, rl_src);
return;
case Instruction::DOUBLE_TO_LONG:
- GenConversionCall(ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
+ GenConversionCall(QUICK_ENTRYPOINT_OFFSET(pD2l), rl_dest, rl_src);
return;
default:
LOG(INFO) << "Unexpected opcode: " << opcode;
diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc
index 3be24df..0b4b4be 100644
--- a/compiler/dex/quick/x86/int_x86.cc
+++ b/compiler/dex/quick/x86/int_x86.cc
@@ -532,7 +532,7 @@
// Get the array's class.
LoadWordDisp(r_array, mirror::Object::ClassOffset().Int32Value(), r_array_class);
- CallRuntimeHelperRegReg(ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
+ CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(pCanPutArrayElementFromCode), r_value,
r_array_class, true);
// Redo LoadValues in case they didn't survive the call.
LoadValueDirectFixed(rl_array, r_array); // Reload array
diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc
index aeadb54..b069fbd 100644
--- a/compiler/jni/quick/jni_compiler.cc
+++ b/compiler/jni/quick/jni_compiler.cc
@@ -172,8 +172,8 @@
// can occur. The result is the saved JNI local state that is restored by the exit call. We
// abuse the JNI calling convention here, that is guaranteed to support passing 2 pointer
// arguments.
- uintptr_t jni_start = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodStartSynchronized)
- : ENTRYPOINT_OFFSET(pJniMethodStart);
+ uintptr_t jni_start = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(pJniMethodStartSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(pJniMethodStart);
main_jni_conv->ResetIterator(FrameOffset(main_out_arg_size));
FrameOffset locked_object_sirt_offset(0);
if (is_synchronized) {
@@ -304,13 +304,13 @@
uintptr_t jni_end;
if (reference_return) {
// Pass result.
- jni_end = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodEndWithReferenceSynchronized)
- : ENTRYPOINT_OFFSET(pJniMethodEndWithReference);
+ jni_end = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(pJniMethodEndWithReferenceSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(pJniMethodEndWithReference);
SetNativeParameter(jni_asm.get(), end_jni_conv.get(), end_jni_conv->ReturnRegister());
end_jni_conv->Next();
} else {
- jni_end = is_synchronized ? ENTRYPOINT_OFFSET(pJniMethodEndSynchronized)
- : ENTRYPOINT_OFFSET(pJniMethodEnd);
+ jni_end = is_synchronized ? QUICK_ENTRYPOINT_OFFSET(pJniMethodEndSynchronized)
+ : QUICK_ENTRYPOINT_OFFSET(pJniMethodEnd);
}
// Pass saved local reference state.
if (end_jni_conv->IsCurrentParamOnStack()) {
diff --git a/compiler/stubs/portable/stubs.cc b/compiler/stubs/portable/stubs.cc
index cee6847..def43e2 100644
--- a/compiler/stubs/portable/stubs.cc
+++ b/compiler/stubs/portable/stubs.cc
@@ -34,7 +34,8 @@
RegList save = (1 << R0) | (1 << R1) | (1 << R2) | (1 << R3) | (1 << LR);
__ PushList(save);
- __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode));
+ __ LoadFromOffset(kLoadWord, R12, TR,
+ PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode));
__ mov(R3, ShifterOperand(TR)); // Pass Thread::Current() in R3
__ mov(R2, ShifterOperand(SP)); // Pass sp for Method** callee_addr
__ IncreaseFrameSize(12); // 3 words of space for alignment
@@ -69,7 +70,7 @@
__ StoreToOffset(kStoreWord, A0, SP, 0);
__ LoadFromOffset(kLoadWord, T9, S1,
- ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode));
+ PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode));
__ Move(A3, S1); // Pass Thread::Current() in A3
__ Move(A2, SP); // Pass SP for Method** callee_addr
__ Jalr(T9); // Call to resolution trampoline (callee, receiver, callee_addr, Thread*)
@@ -112,7 +113,7 @@
__ pushl(ECX); // pass receiver
__ pushl(EAX); // pass called
// Call to resolve method.
- __ Call(ThreadOffset(ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)),
+ __ Call(ThreadOffset(PORTABLE_ENTRYPOINT_OFFSET(pPortableResolutionTrampolineFromCode)),
X86ManagedRegister::FromCpuRegister(ECX));
__ leave();
diff --git a/compiler/stubs/quick/stubs.cc b/compiler/stubs/quick/stubs.cc
index 598481f..912f1c0 100644
--- a/compiler/stubs/quick/stubs.cc
+++ b/compiler/stubs/quick/stubs.cc
@@ -46,7 +46,7 @@
// TODO: enable when GetCalleeSaveMethod is available at stub generation time
// DCHECK_EQ(save, Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs)->GetCoreSpillMask());
__ PushList(save);
- __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode));
+ __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode));
__ mov(R3, ShifterOperand(TR)); // Pass Thread::Current() in R3
__ IncreaseFrameSize(8); // 2 words of space for alignment
__ mov(R2, ShifterOperand(SP)); // Pass SP
@@ -71,7 +71,7 @@
const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() {
UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
- __ LoadFromOffset(kLoadWord, PC, R0, ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
+ __ LoadFromOffset(kLoadWord, PC, R0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
__ bkpt(0);
size_t cs = assembler->CodeSize();
@@ -85,7 +85,7 @@
const std::vector<uint8_t>* CreateInterpreterToQuickEntry() {
UniquePtr<ArmAssembler> assembler(static_cast<ArmAssembler*>(Assembler::Create(kArm)));
- __ LoadFromOffset(kLoadWord, PC, R0, ENTRYPOINT_OFFSET(pInterpreterToQuickEntry));
+ __ LoadFromOffset(kLoadWord, PC, R0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToQuickEntry));
__ bkpt(0);
size_t cs = assembler->CodeSize();
@@ -123,7 +123,7 @@
__ StoreToOffset(kStoreWord, A2, SP, 8);
__ StoreToOffset(kStoreWord, A1, SP, 4);
- __ LoadFromOffset(kLoadWord, T9, S1, ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode));
+ __ LoadFromOffset(kLoadWord, T9, S1, QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode));
__ Move(A3, S1); // Pass Thread::Current() in A3
__ Move(A2, SP); // Pass SP for Method** callee_addr
__ Jalr(T9); // Call to resolution trampoline (method_idx, receiver, sp, Thread*)
@@ -161,7 +161,7 @@
const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() {
UniquePtr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips)));
- __ LoadFromOffset(kLoadWord, T9, A0, ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
+ __ LoadFromOffset(kLoadWord, T9, A0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
__ Jr(T9);
__ Break();
@@ -176,7 +176,7 @@
const std::vector<uint8_t>* CreateInterpreterToQuickEntry() {
UniquePtr<MipsAssembler> assembler(static_cast<MipsAssembler*>(Assembler::Create(kMips)));
- __ LoadFromOffset(kLoadWord, T9, A0, ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
+ __ LoadFromOffset(kLoadWord, T9, A0, QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry));
__ Jr(T9);
__ Break();
@@ -208,7 +208,7 @@
__ pushl(EAX); // pass Method*
// Call to resolve method.
- __ Call(ThreadOffset(ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)),
+ __ Call(ThreadOffset(QUICK_ENTRYPOINT_OFFSET(pQuickResolutionTrampolineFromCode)),
X86ManagedRegister::FromCpuRegister(ECX));
__ movl(EDI, EAX); // save code pointer in EDI
@@ -236,7 +236,7 @@
const std::vector<uint8_t>* CreateInterpreterToInterpreterEntry() {
UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
- __ fs()->jmp(Address::Absolute(ThreadOffset(ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry))));
+ __ fs()->jmp(Address::Absolute(ThreadOffset(QUICK_ENTRYPOINT_OFFSET(pInterpreterToInterpreterEntry))));
size_t cs = assembler->CodeSize();
UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
@@ -249,7 +249,7 @@
const std::vector<uint8_t>* CreateInterpreterToQuickEntry() {
UniquePtr<X86Assembler> assembler(static_cast<X86Assembler*>(Assembler::Create(kX86)));
- __ fs()->jmp(Address::Absolute(ThreadOffset(ENTRYPOINT_OFFSET(pInterpreterToQuickEntry))));
+ __ fs()->jmp(Address::Absolute(ThreadOffset(QUICK_ENTRYPOINT_OFFSET(pInterpreterToQuickEntry))));
size_t cs = assembler->CodeSize();
UniquePtr<std::vector<uint8_t> > entry_stub(new std::vector<uint8_t>(cs));
diff --git a/compiler/utils/arm/assembler_arm.cc b/compiler/utils/arm/assembler_arm.cc
index 0778cd3..fa202c3 100644
--- a/compiler/utils/arm/assembler_arm.cc
+++ b/compiler/utils/arm/assembler_arm.cc
@@ -1884,7 +1884,7 @@
// Don't care about preserving R0 as this call won't return
__ mov(R0, ShifterOperand(scratch_.AsCoreRegister()));
// Set up call to Thread::Current()->pDeliverException
- __ LoadFromOffset(kLoadWord, R12, TR, ENTRYPOINT_OFFSET(pDeliverException));
+ __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(pDeliverException));
__ blx(R12);
// Call never returns
__ bkpt(0);
diff --git a/compiler/utils/mips/assembler_mips.cc b/compiler/utils/mips/assembler_mips.cc
index 58815da..931d7ab 100644
--- a/compiler/utils/mips/assembler_mips.cc
+++ b/compiler/utils/mips/assembler_mips.cc
@@ -988,7 +988,7 @@
// Don't care about preserving A0 as this call won't return
__ Move(A0, scratch_.AsCoreRegister());
// Set up call to Thread::Current()->pDeliverException
- __ LoadFromOffset(kLoadWord, T9, S1, ENTRYPOINT_OFFSET(pDeliverException));
+ __ LoadFromOffset(kLoadWord, T9, S1, QUICK_ENTRYPOINT_OFFSET(pDeliverException));
__ Jr(T9);
// Call never returns
__ Break();
diff --git a/compiler/utils/x86/assembler_x86.cc b/compiler/utils/x86/assembler_x86.cc
index 89bfeb5..9095180 100644
--- a/compiler/utils/x86/assembler_x86.cc
+++ b/compiler/utils/x86/assembler_x86.cc
@@ -1837,7 +1837,7 @@
}
// Pass exception as argument in EAX
__ fs()->movl(EAX, Address::Absolute(Thread::ExceptionOffset()));
- __ fs()->call(Address::Absolute(ENTRYPOINT_OFFSET(pDeliverException)));
+ __ fs()->call(Address::Absolute(QUICK_ENTRYPOINT_OFFSET(pDeliverException)));
// this call should never return
__ int3();
#undef __