More target-independence
Continuing to move target-specific code from the Arm
code generator into the independent realm. This will be
done in multiple small steps.
In this CL, the focus is on unifying the LIR data structure and
various enums that don't really need to be target specific. Also
creates two new shared source files: GenCommon.cc (to hold
top-level code generation functions) and GenInvoke.cc (which
is likely to be shared only by the Arm and Mips targets).
Also added is a makefile hack to build for Mips (which we'll
eventually remove when the compiler support multiple targets
via the command line) and various minor cleanups.
Overall, this CL moves more than 3,000 lines of code from
target dependent to target independent.
Change-Id: I431ca4ae728100ed7d0e9d83a966a3f789f731b1
diff --git a/src/compiler/codegen/arm/ArchFactory.cc b/src/compiler/codegen/arm/ArchFactory.cc
index c3de5ff..5f75ef8 100644
--- a/src/compiler/codegen/arm/ArchFactory.cc
+++ b/src/compiler/codegen/arm/ArchFactory.cc
@@ -22,1063 +22,17 @@
*
*/
-#include "oat_compilation_unit.h"
-
-#define SLOW_FIELD_PATH (cUnit->enableDebug & (1 << kDebugSlowFieldPath))
-#define SLOW_INVOKE_PATH (cUnit->enableDebug & (1 << kDebugSlowInvokePath))
-#define SLOW_STRING_PATH (cUnit->enableDebug & (1 << kDebugSlowStringPath))
-#define SLOW_TYPE_PATH (cUnit->enableDebug & (1 << kDebugSlowTypePath))
-#define EXERCISE_SLOWEST_FIELD_PATH (cUnit->enableDebug & \
- (1 << kDebugSlowestFieldPath))
-#define EXERCISE_SLOWEST_STRING_PATH (cUnit->enableDebug & \
- (1 << kDebugSlowestStringPath))
-#define EXERCISE_RESOLVE_METHOD (cUnit->enableDebug & \
- (1 << kDebugExerciseResolveMethod))
-
namespace art {
-STATIC void genDebuggerUpdate(CompilationUnit* cUnit, int32_t offset);
+void genDebuggerUpdate(CompilationUnit* cUnit, int32_t offset);
-/* Generate conditional branch instructions */
-STATIC ArmLIR* genConditionalBranch(CompilationUnit* cUnit,
- ArmConditionCode cond,
- ArmLIR* target)
+int loadHelper(CompilationUnit* cUnit, int offset)
{
- ArmLIR* branch = opCondBranch(cUnit, cond);
- branch->generic.target = (LIR*) target;
- return branch;
+ loadWordDisp(cUnit, rSELF, offset, rLR);
+ return rLR;
}
-/* Generate unconditional branch instructions */
-STATIC ArmLIR* genUnconditionalBranch(CompilationUnit* cUnit, ArmLIR* target)
-{
- ArmLIR* branch = opNone(cUnit, kOpUncondBr);
- branch->generic.target = (LIR*) target;
- return branch;
-}
-
-STATIC ArmLIR* callRuntimeHelper(CompilationUnit* cUnit, int reg)
-{
- oatClobberCalleeSave(cUnit);
- return opReg(cUnit, kOpBlx, reg);
-}
-
-/*
- * Mark garbage collection card. Skip if the value we're storing is null.
- */
-STATIC void markGCCard(CompilationUnit* cUnit, int valReg, int tgtAddrReg)
-{
- int regCardBase = oatAllocTemp(cUnit);
- int regCardNo = oatAllocTemp(cUnit);
- ArmLIR* branchOver = genCmpImmBranch(cUnit, kArmCondEq, valReg, 0);
- loadWordDisp(cUnit, rSELF, Thread::CardTableOffset().Int32Value(),
- regCardBase);
- opRegRegImm(cUnit, kOpLsr, regCardNo, tgtAddrReg, GC_CARD_SHIFT);
- storeBaseIndexed(cUnit, regCardBase, regCardNo, regCardBase, 0,
- kUnsignedByte);
- ArmLIR* target = newLIR0(cUnit, kArmPseudoTargetLabel);
- target->defMask = ENCODE_ALL;
- branchOver->generic.target = (LIR*)target;
- oatFreeTemp(cUnit, regCardBase);
- oatFreeTemp(cUnit, regCardNo);
-}
-
-/*
- * Utiltiy to load the current Method*. Broken out
- * to allow easy change between placing the current Method* in a
- * dedicated register or its home location in the frame.
- */
-STATIC void loadCurrMethodDirect(CompilationUnit *cUnit, int rTgt)
-{
-#if defined(METHOD_IN_REG)
- genRegCopy(cUnit, rTgt, rMETHOD);
-#else
- loadWordDisp(cUnit, rSP, 0, rTgt);
-#endif
-}
-
-STATIC int loadCurrMethod(CompilationUnit *cUnit)
-{
-#if defined(METHOD_IN_REG)
- return rMETHOD;
-#else
- int mReg = oatAllocTemp(cUnit);
- loadCurrMethodDirect(cUnit, mReg);
- return mReg;
-#endif
-}
-
-STATIC ArmLIR* genCheck(CompilationUnit* cUnit, ArmConditionCode cCode,
- MIR* mir, ArmThrowKind kind)
-{
- ArmLIR* tgt = (ArmLIR*)oatNew(cUnit, sizeof(ArmLIR), true, kAllocLIR);
- tgt->opcode = kArmPseudoThrowTarget;
- tgt->operands[0] = kind;
- tgt->operands[1] = mir ? mir->offset : 0;
- ArmLIR* branch = genConditionalBranch(cUnit, cCode, tgt);
- // Remember branch target - will process later
- oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
- return branch;
-}
-
-STATIC ArmLIR* genImmedCheck(CompilationUnit* cUnit, ArmConditionCode cCode,
- int reg, int immVal, MIR* mir, ArmThrowKind kind)
-{
- ArmLIR* tgt = (ArmLIR*)oatNew(cUnit, sizeof(ArmLIR), true, kAllocLIR);
- tgt->opcode = kArmPseudoThrowTarget;
- tgt->operands[0] = kind;
- tgt->operands[1] = mir->offset;
- ArmLIR* branch;
- if (cCode == kArmCondAl) {
- branch = genUnconditionalBranch(cUnit, tgt);
- } else {
- branch = genCmpImmBranch(cUnit, cCode, reg, immVal);
- branch->generic.target = (LIR*)tgt;
- }
- // Remember branch target - will process later
- oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
- return branch;
-}
-
-/* Perform null-check on a register. */
-STATIC ArmLIR* genNullCheck(CompilationUnit* cUnit, int sReg, int mReg,
- MIR* mir)
-{
- if (!(cUnit->disableOpt & (1 << kNullCheckElimination)) &&
- mir->optimizationFlags & MIR_IGNORE_NULL_CHECK) {
- return NULL;
- }
- return genImmedCheck(cUnit, kArmCondEq, mReg, 0, mir, kArmThrowNullPointer);
-}
-
-/* Perform check on two registers */
-STATIC TGT_LIR* genRegRegCheck(CompilationUnit* cUnit, ArmConditionCode cCode,
- int reg1, int reg2, MIR* mir, ArmThrowKind kind)
-{
- ArmLIR* tgt = (ArmLIR*)oatNew(cUnit, sizeof(ArmLIR), true, kAllocLIR);
- tgt->opcode = kArmPseudoThrowTarget;
- tgt->operands[0] = kind;
- tgt->operands[1] = mir ? mir->offset : 0;
- tgt->operands[2] = reg1;
- tgt->operands[3] = reg2;
- opRegReg(cUnit, kOpCmp, reg1, reg2);
- ArmLIR* branch = genConditionalBranch(cUnit, cCode, tgt);
- // Remember branch target - will process later
- oatInsertGrowableList(cUnit, &cUnit->throwLaunchpads, (intptr_t)tgt);
- return branch;
-}
-
-/*
- * Let helper function take care of everything. Will call
- * Array::AllocFromCode(type_idx, method, count);
- * Note: AllocFromCode will handle checks for errNegativeArraySize.
- */
-STATIC void genNewArray(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
- RegLocation rlSrc)
-{
- oatFlushAllRegs(cUnit); /* Everything to home location */
- uint32_t type_idx = mir->dalvikInsn.vC;
- if (cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
- cUnit->dex_cache,
- *cUnit->dex_file,
- type_idx)) {
- loadWordDisp(cUnit, rSELF,
- OFFSETOF_MEMBER(Thread, pAllocArrayFromCode), rLR);
- } else {
- loadWordDisp(cUnit, rSELF,
- OFFSETOF_MEMBER(Thread, pAllocArrayFromCodeWithAccessCheck), rLR);
- }
- loadCurrMethodDirect(cUnit, r1); // arg1 <- Method*
- loadConstant(cUnit, r0, type_idx); // arg0 <- type_id
- loadValueDirectFixed(cUnit, rlSrc, r2); // arg2 <- count
- callRuntimeHelper(cUnit, rLR);
- RegLocation rlResult = oatGetReturn(cUnit);
- storeValue(cUnit, rlDest, rlResult);
-}
-
-/*
- * Similar to genNewArray, but with post-allocation initialization.
- * Verifier guarantees we're dealing with an array class. Current
- * code throws runtime exception "bad Filled array req" for 'D' and 'J'.
- * Current code also throws internal unimp if not 'L', '[' or 'I'.
- */
-STATIC void genFilledNewArray(CompilationUnit* cUnit, MIR* mir, bool isRange)
-{
- DecodedInstruction* dInsn = &mir->dalvikInsn;
- int elems = dInsn->vA;
- int typeId = dInsn->vB;
- oatFlushAllRegs(cUnit); /* Everything to home location */
- if (cUnit->compiler->CanAccessTypeWithoutChecks(cUnit->method_idx,
- cUnit->dex_cache,
- *cUnit->dex_file,
- typeId)) {
- loadWordDisp(cUnit, rSELF,
- OFFSETOF_MEMBER(Thread, pCheckAndAllocArrayFromCode), rLR);
- } else {
- loadWordDisp(cUnit, rSELF,
- OFFSETOF_MEMBER(Thread, pCheckAndAllocArrayFromCodeWithAccessCheck), rLR);
- }
- loadCurrMethodDirect(cUnit, r1); // arg1 <- Method*
- loadConstant(cUnit, r0, typeId); // arg0 <- type_id
- loadConstant(cUnit, r2, elems); // arg2 <- count
- callRuntimeHelper(cUnit, rLR);
- /*
- * NOTE: the implicit target for OP_FILLED_NEW_ARRAY is the
- * return region. Because AllocFromCode placed the new array
- * in r0, we'll just lock it into place. When debugger support is
- * added, it may be necessary to additionally copy all return
- * values to a home location in thread-local storage
- */
- oatLockTemp(cUnit, r0);
-
- // TODO: use the correct component size, currently all supported types share array alignment
- // with ints (see comment at head of function)
- size_t component_size = sizeof(int32_t);
-
- // Having a range of 0 is legal
- if (isRange && (dInsn->vA > 0)) {
- /*
- * Bit of ugliness here. We're going generate a mem copy loop
- * on the register range, but it is possible that some regs
- * in the range have been promoted. This is unlikely, but
- * before generating the copy, we'll just force a flush
- * of any regs in the source range that have been promoted to
- * home location.
- */
- for (unsigned int i = 0; i < dInsn->vA; i++) {
- RegLocation loc = oatUpdateLoc(cUnit,
- oatGetSrc(cUnit, mir, i));
- if (loc.location == kLocPhysReg) {
- storeBaseDisp(cUnit, rSP, oatSRegOffset(cUnit, loc.sRegLow),
- loc.lowReg, kWord);
- }
- }
- /*
- * TUNING note: generated code here could be much improved, but
- * this is an uncommon operation and isn't especially performance
- * critical.
- */
- int rSrc = oatAllocTemp(cUnit);
- int rDst = oatAllocTemp(cUnit);
- int rIdx = oatAllocTemp(cUnit);
- int rVal = rLR; // Using a lot of temps, rLR is known free here
- // Set up source pointer
- RegLocation rlFirst = oatGetSrc(cUnit, mir, 0);
- opRegRegImm(cUnit, kOpAdd, rSrc, rSP,
- oatSRegOffset(cUnit, rlFirst.sRegLow));
- // Set up the target pointer
- opRegRegImm(cUnit, kOpAdd, rDst, r0,
- Array::DataOffset(component_size).Int32Value());
- // Set up the loop counter (known to be > 0)
- loadConstant(cUnit, rIdx, dInsn->vA - 1);
- // Generate the copy loop. Going backwards for convenience
- ArmLIR* target = newLIR0(cUnit, kArmPseudoTargetLabel);
- target->defMask = ENCODE_ALL;
- // Copy next element
- loadBaseIndexed(cUnit, rSrc, rIdx, rVal, 2, kWord);
- storeBaseIndexed(cUnit, rDst, rIdx, rVal, 2, kWord);
- // Use setflags encoding here
- newLIR3(cUnit, kThumb2SubsRRI12, rIdx, rIdx, 1);
- ArmLIR* branch = opCondBranch(cUnit, kArmCondGe);
- branch->generic.target = (LIR*)target;
- } else if (!isRange) {
- // TUNING: interleave
- for (unsigned int i = 0; i < dInsn->vA; i++) {
- RegLocation rlArg = loadValue(cUnit,
- oatGetSrc(cUnit, mir, i), kCoreReg);
- storeBaseDisp(cUnit, r0,
- Array::DataOffset(component_size).Int32Value() +
- i * 4, rlArg.lowReg, kWord);
- // If the loadValue caused a temp to be allocated, free it
- if (oatIsTemp(cUnit, rlArg.lowReg)) {
- oatFreeTemp(cUnit, rlArg.lowReg);
- }
- }
- }
-}
-
-STATIC void genSput(CompilationUnit* cUnit, MIR* mir, RegLocation rlSrc,
- bool isLongOrDouble, bool isObject)
-{
- int fieldOffset;
- int ssbIndex;
- bool isVolatile;
- bool isReferrersClass;
- uint32_t fieldIdx = mir->dalvikInsn.vB;
-
- OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker,
- *cUnit->dex_file, *cUnit->dex_cache, cUnit->code_item,
- cUnit->method_idx, cUnit->access_flags);
-
- bool fastPath =
- cUnit->compiler->ComputeStaticFieldInfo(fieldIdx, &mUnit,
- fieldOffset, ssbIndex,
- isReferrersClass, isVolatile, true);
- if (fastPath && !SLOW_FIELD_PATH) {
- DCHECK_GE(fieldOffset, 0);
- int rBase;
- int rMethod;
- if (isReferrersClass) {
- // Fast path, static storage base is this method's class
- rMethod = loadCurrMethod(cUnit);
- rBase = oatAllocTemp(cUnit);
- loadWordDisp(cUnit, rMethod,
- Method::DeclaringClassOffset().Int32Value(), rBase);
- } else {
- // Medium path, static storage base in a different class which
- // requires checks that the other class is initialized.
- DCHECK_GE(ssbIndex, 0);
- // May do runtime call so everything to home locations.
- oatFlushAllRegs(cUnit);
- // Using fixed register to sync with possible call to runtime
- // support.
- rMethod = r1;
- oatLockTemp(cUnit, rMethod);
- loadCurrMethodDirect(cUnit, rMethod);
- rBase = r0;
- oatLockTemp(cUnit, rBase);
- loadWordDisp(cUnit, rMethod,
- Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
- rBase);
- loadWordDisp(cUnit, rBase,
- Array::DataOffset(sizeof(Object*)).Int32Value() + sizeof(int32_t*) *
- ssbIndex, rBase);
- // rBase now points at appropriate static storage base (Class*)
- // or NULL if not initialized. Check for NULL and call helper if NULL.
- // TUNING: fast path should fall through
- ArmLIR* branchOver = genCmpImmBranch(cUnit, kArmCondNe, rBase, 0);
- loadWordDisp(cUnit, rSELF,
- OFFSETOF_MEMBER(Thread, pInitializeStaticStorage), rLR);
- loadConstant(cUnit, r0, ssbIndex);
- callRuntimeHelper(cUnit, rLR);
- ArmLIR* skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
- skipTarget->defMask = ENCODE_ALL;
- branchOver->generic.target = (LIR*)skipTarget;
- }
- // rBase now holds static storage base
- oatFreeTemp(cUnit, rMethod);
- if (isLongOrDouble) {
- rlSrc = oatGetSrcWide(cUnit, mir, 0, 1);
- rlSrc = loadValueWide(cUnit, rlSrc, kAnyReg);
- } else {
- rlSrc = oatGetSrc(cUnit, mir, 0);
- rlSrc = loadValue(cUnit, rlSrc, kAnyReg);
- }
- if (isVolatile) {
- oatGenMemBarrier(cUnit, kST);
- }
- if (isLongOrDouble) {
- storeBaseDispWide(cUnit, rBase, fieldOffset, rlSrc.lowReg,
- rlSrc.highReg);
- } else {
- storeWordDisp(cUnit, rBase, fieldOffset, rlSrc.lowReg);
- }
- if (isVolatile) {
- oatGenMemBarrier(cUnit, kSY);
- }
- if (isObject) {
- markGCCard(cUnit, rlSrc.lowReg, rBase);
- }
- oatFreeTemp(cUnit, rBase);
- } else {
- oatFlushAllRegs(cUnit); // Everything to home locations
- int setterOffset = isLongOrDouble ? OFFSETOF_MEMBER(Thread, pSet64Static) :
- (isObject ? OFFSETOF_MEMBER(Thread, pSetObjStatic)
- : OFFSETOF_MEMBER(Thread, pSet32Static));
- loadWordDisp(cUnit, rSELF, setterOffset, rLR);
- loadConstant(cUnit, r0, fieldIdx);
- if (isLongOrDouble) {
- loadValueDirectWideFixed(cUnit, rlSrc, r2, r3);
- } else {
- loadValueDirect(cUnit, rlSrc, r1);
- }
- callRuntimeHelper(cUnit, rLR);
- }
-}
-
-STATIC void genSget(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
- bool isLongOrDouble, bool isObject)
-{
- int fieldOffset;
- int ssbIndex;
- bool isVolatile;
- bool isReferrersClass;
- uint32_t fieldIdx = mir->dalvikInsn.vB;
-
- OatCompilationUnit mUnit(cUnit->class_loader, cUnit->class_linker,
- *cUnit->dex_file, *cUnit->dex_cache, cUnit->code_item,
- cUnit->method_idx, cUnit->access_flags);
-
- bool fastPath =
- cUnit->compiler->ComputeStaticFieldInfo(fieldIdx, &mUnit,
- fieldOffset, ssbIndex,
- isReferrersClass, isVolatile, false);
- if (fastPath && !SLOW_FIELD_PATH) {
- DCHECK_GE(fieldOffset, 0);
- int rBase;
- int rMethod;
- if (isReferrersClass) {
- // Fast path, static storage base is this method's class
- rMethod = loadCurrMethod(cUnit);
- rBase = oatAllocTemp(cUnit);
- loadWordDisp(cUnit, rMethod,
- Method::DeclaringClassOffset().Int32Value(), rBase);
- } else {
- // Medium path, static storage base in a different class which
- // requires checks that the other class is initialized
- DCHECK_GE(ssbIndex, 0);
- // May do runtime call so everything to home locations.
- oatFlushAllRegs(cUnit);
- // Using fixed register to sync with possible call to runtime
- // support
- rMethod = r1;
- oatLockTemp(cUnit, rMethod);
- loadCurrMethodDirect(cUnit, rMethod);
- rBase = r0;
- oatLockTemp(cUnit, rBase);
- loadWordDisp(cUnit, rMethod,
- Method::DexCacheInitializedStaticStorageOffset().Int32Value(),
- rBase);
- loadWordDisp(cUnit, rBase,
- Array::DataOffset(sizeof(Object*)).Int32Value() +
- sizeof(int32_t*) * ssbIndex,
- rBase);
- // rBase now points at appropriate static storage base (Class*)
- // or NULL if not initialized. Check for NULL and call helper if NULL.
- // TUNING: fast path should fall through
- ArmLIR* branchOver = genCmpImmBranch(cUnit, kArmCondNe, rBase, 0);
- loadWordDisp(cUnit, rSELF,
- OFFSETOF_MEMBER(Thread, pInitializeStaticStorage), rLR);
- loadConstant(cUnit, r0, ssbIndex);
- callRuntimeHelper(cUnit, rLR);
- ArmLIR* skipTarget = newLIR0(cUnit, kArmPseudoTargetLabel);
- skipTarget->defMask = ENCODE_ALL;
- branchOver->generic.target = (LIR*)skipTarget;
- }
- // rBase now holds static storage base
- oatFreeTemp(cUnit, rMethod);
- rlDest = isLongOrDouble ? oatGetDestWide(cUnit, mir, 0, 1)
- : oatGetDest(cUnit, mir, 0);
- RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kAnyReg, true);
- if (isVolatile) {
- oatGenMemBarrier(cUnit, kSY);
- }
- if (isLongOrDouble) {
- loadBaseDispWide(cUnit, NULL, rBase, fieldOffset, rlResult.lowReg,
- rlResult.highReg, INVALID_SREG);
- } else {
- loadWordDisp(cUnit, rBase, fieldOffset, rlResult.lowReg);
- }
- oatFreeTemp(cUnit, rBase);
- if (isLongOrDouble) {
- storeValueWide(cUnit, rlDest, rlResult);
- } else {
- storeValue(cUnit, rlDest, rlResult);
- }
- } else {
- oatFlushAllRegs(cUnit); // Everything to home locations
- int getterOffset = isLongOrDouble ? OFFSETOF_MEMBER(Thread, pGet64Static) :
- (isObject ? OFFSETOF_MEMBER(Thread, pGetObjStatic)
- : OFFSETOF_MEMBER(Thread, pGet32Static));
- loadWordDisp(cUnit, rSELF, getterOffset, rLR);
- loadConstant(cUnit, r0, fieldIdx);
- callRuntimeHelper(cUnit, rLR);
- if (isLongOrDouble) {
- RegLocation rlResult = oatGetReturnWide(cUnit);
- storeValueWide(cUnit, rlDest, rlResult);
- } else {
- RegLocation rlResult = oatGetReturn(cUnit);
- storeValue(cUnit, rlDest, rlResult);
- }
- }
-}
-
-typedef int (*NextCallInsn)(CompilationUnit*, MIR*, int, uint32_t dexIdx,
- uint32_t methodIdx);
-
-/*
- * Bit of a hack here - in leiu of a real scheduling pass,
- * emit the next instruction in static & direct invoke sequences.
- */
-STATIC int nextSDCallInsn(CompilationUnit* cUnit, MIR* mir,
- int state, uint32_t dexIdx, uint32_t unused)
-{
- switch(state) {
- case 0: // Get the current Method* [sets r0]
- loadCurrMethodDirect(cUnit, r0);
- break;
- case 1: // Get method->dex_cache_resolved_methods_
- loadWordDisp(cUnit, r0,
- Method::DexCacheResolvedMethodsOffset().Int32Value(),
- r0);
- break;
- case 2: // Grab target method*
- loadWordDisp(cUnit, r0,
- Array::DataOffset(sizeof(Object*)).Int32Value() + dexIdx * 4,
- r0);
- break;
- case 3: // Grab the code from the method*
- loadWordDisp(cUnit, r0, Method::GetCodeOffset().Int32Value(), rLR);
- break;
- default:
- return -1;
- }
- return state + 1;
-}
-
-/*
- * Bit of a hack here - in leiu of a real scheduling pass,
- * emit the next instruction in a virtual invoke sequence.
- * We can use rLR as a temp prior to target address loading
- * Note also that we'll load the first argument ("this") into
- * r1 here rather than the standard loadArgRegs.
- */
-STATIC int nextVCallInsn(CompilationUnit* cUnit, MIR* mir,
- int state, uint32_t dexIdx, uint32_t methodIdx)
-{
- RegLocation rlArg;
- /*
- * This is the fast path in which the target virtual method is
- * fully resolved at compile time.
- */
- switch(state) {
- case 0: // Get "this" [set r1]
- rlArg = oatGetSrc(cUnit, mir, 0);
- loadValueDirectFixed(cUnit, rlArg, r1);
- break;
- case 1: // Is "this" null? [use r1]
- genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir);
- // get this->klass_ [use r1, set rLR]
- loadWordDisp(cUnit, r1, Object::ClassOffset().Int32Value(), rLR);
- break;
- case 2: // Get this->klass_->vtable [usr rLR, set rLR]
- loadWordDisp(cUnit, rLR, Class::VTableOffset().Int32Value(), rLR);
- break;
- case 3: // Get target method [use rLR, set r0]
- loadWordDisp(cUnit, rLR, (methodIdx * 4) +
- Array::DataOffset(sizeof(Object*)).Int32Value(), r0);
- break;
- case 4: // Get the target compiled code address [uses r0, sets rLR]
- loadWordDisp(cUnit, r0, Method::GetCodeOffset().Int32Value(), rLR);
- break;
- default:
- return -1;
- }
- return state + 1;
-}
-
-/*
- * Interleave launch code for INVOKE_SUPER. See comments
- * for nextVCallIns.
- */
-STATIC int nextSuperCallInsn(CompilationUnit* cUnit, MIR* mir,
- int state, uint32_t dexIdx, uint32_t methodIdx)
-{
- /*
- * This is the fast path in which the target virtual method is
- * fully resolved at compile time. Note also that this path assumes
- * that the check to verify that the target method index falls
- * within the size of the super's vtable has been done at compile-time.
- */
- RegLocation rlArg;
- switch(state) {
- case 0: // Get current Method* [set r0]
- loadCurrMethodDirect(cUnit, r0);
- // Load "this" [set r1]
- rlArg = oatGetSrc(cUnit, mir, 0);
- loadValueDirectFixed(cUnit, rlArg, r1);
- // Get method->declaring_class_ [use r0, set rLR]
- loadWordDisp(cUnit, r0, Method::DeclaringClassOffset().Int32Value(),
- rLR);
- // Is "this" null? [use r1]
- genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir);
- break;
- case 1: // Get method->declaring_class_->super_class [usr rLR, set rLR]
- loadWordDisp(cUnit, rLR, Class::SuperClassOffset().Int32Value(),
- rLR);
- break;
- case 2: // Get ...->super_class_->vtable [u/s rLR]
- loadWordDisp(cUnit, rLR, Class::VTableOffset().Int32Value(), rLR);
- break;
- case 3: // Get target method [use rLR, set r0]
- loadWordDisp(cUnit, rLR, (methodIdx * 4) +
- Array::DataOffset(sizeof(Object*)).Int32Value(), r0);
- break;
- case 4: // Get the target compiled code address [uses r0, sets rLR]
- loadWordDisp(cUnit, r0, Method::GetCodeOffset().Int32Value(), rLR);
- break;
- default:
- return -1;
- }
- return state + 1;
-}
-
-STATIC int nextInvokeInsnSP(CompilationUnit* cUnit, MIR* mir, int trampoline,
- int state, uint32_t dexIdx, uint32_t methodIdx)
-{
- /*
- * This handles the case in which the base method is not fully
- * resolved at compile time, we bail to a runtime helper.
- */
- if (state == 0) {
- // Load trampoline target
- loadWordDisp(cUnit, rSELF, trampoline, rLR);
- // Load r0 with method index
- loadConstant(cUnit, r0, dexIdx);
- return 1;
- }
- return -1;
-}
-
-STATIC int nextStaticCallInsnSP(CompilationUnit* cUnit, MIR* mir,
- int state, uint32_t dexIdx, uint32_t methodIdx)
-{
- int trampoline = OFFSETOF_MEMBER(Thread, pInvokeStaticTrampolineWithAccessCheck);
- return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
-}
-
-STATIC int nextDirectCallInsnSP(CompilationUnit* cUnit, MIR* mir,
- int state, uint32_t dexIdx, uint32_t methodIdx)
-{
- int trampoline = OFFSETOF_MEMBER(Thread, pInvokeDirectTrampolineWithAccessCheck);
- return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
-}
-
-STATIC int nextSuperCallInsnSP(CompilationUnit* cUnit, MIR* mir,
- int state, uint32_t dexIdx, uint32_t methodIdx)
-{
- int trampoline = OFFSETOF_MEMBER(Thread, pInvokeSuperTrampolineWithAccessCheck);
- return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
-}
-
-STATIC int nextVCallInsnSP(CompilationUnit* cUnit, MIR* mir,
- int state, uint32_t dexIdx, uint32_t methodIdx)
-{
- int trampoline = OFFSETOF_MEMBER(Thread, pInvokeVirtualTrampolineWithAccessCheck);
- return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
-}
-
-/*
- * All invoke-interface calls bounce off of art_invoke_interface_trampoline,
- * which will locate the target and continue on via a tail call.
- */
-STATIC int nextInterfaceCallInsn(CompilationUnit* cUnit, MIR* mir,
- int state, uint32_t dexIdx, uint32_t unused)
-{
- int trampoline = OFFSETOF_MEMBER(Thread, pInvokeInterfaceTrampoline);
- return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
-}
-
-STATIC int nextInterfaceCallInsnWithAccessCheck(CompilationUnit* cUnit,
- MIR* mir, int state,
- uint32_t dexIdx,
- uint32_t unused)
-{
- int trampoline = OFFSETOF_MEMBER(Thread, pInvokeInterfaceTrampolineWithAccessCheck);
- return nextInvokeInsnSP(cUnit, mir, trampoline, state, dexIdx, 0);
-}
-
-STATIC int loadArgRegs(CompilationUnit* cUnit, MIR* mir,
- DecodedInstruction* dInsn, int callState,
- NextCallInsn nextCallInsn, uint32_t dexIdx,
- uint32_t methodIdx, bool skipThis)
-{
- int nextReg = r1;
- int nextArg = 0;
- if (skipThis) {
- nextReg++;
- nextArg++;
- }
- for (; (nextReg <= r3) && (nextArg < mir->ssaRep->numUses); nextReg++) {
- RegLocation rlArg = oatGetRawSrc(cUnit, mir, nextArg++);
- rlArg = oatUpdateRawLoc(cUnit, rlArg);
- if (rlArg.wide && (nextReg <= r2)) {
- loadValueDirectWideFixed(cUnit, rlArg, nextReg, nextReg + 1);
- nextReg++;
- nextArg++;
- } else {
- rlArg.wide = false;
- loadValueDirectFixed(cUnit, rlArg, nextReg);
- }
- callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
- }
- return callState;
-}
-
-/*
- * Load up to 5 arguments, the first three of which will be in
- * r1 .. r3. On entry r0 contains the current method pointer,
- * and as part of the load sequence, it must be replaced with
- * the target method pointer. Note, this may also be called
- * for "range" variants if the number of arguments is 5 or fewer.
- */
-STATIC int genDalvikArgsNoRange(CompilationUnit* cUnit, MIR* mir,
- DecodedInstruction* dInsn, int callState,
- ArmLIR** pcrLabel, NextCallInsn nextCallInsn,
- uint32_t dexIdx, uint32_t methodIdx,
- bool skipThis)
-{
- RegLocation rlArg;
-
- /* If no arguments, just return */
- if (dInsn->vA == 0)
- return callState;
-
- callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
-
- DCHECK_LE(dInsn->vA, 5U);
- if (dInsn->vA > 3) {
- uint32_t nextUse = 3;
- //Detect special case of wide arg spanning arg3/arg4
- RegLocation rlUse0 = oatGetRawSrc(cUnit, mir, 0);
- RegLocation rlUse1 = oatGetRawSrc(cUnit, mir, 1);
- RegLocation rlUse2 = oatGetRawSrc(cUnit, mir, 2);
- if (((!rlUse0.wide && !rlUse1.wide) || rlUse0.wide) &&
- rlUse2.wide) {
- int reg;
- // Wide spans, we need the 2nd half of uses[2].
- rlArg = oatUpdateLocWide(cUnit, rlUse2);
- if (rlArg.location == kLocPhysReg) {
- reg = rlArg.highReg;
- } else {
- // r2 & r3 can safely be used here
- reg = r3;
- loadWordDisp(cUnit, rSP,
- oatSRegOffset(cUnit, rlArg.sRegLow) + 4, reg);
- callState = nextCallInsn(cUnit, mir, callState, dexIdx,
- methodIdx);
- }
- storeBaseDisp(cUnit, rSP, (nextUse + 1) * 4, reg, kWord);
- storeBaseDisp(cUnit, rSP, 16 /* (3+1)*4 */, reg, kWord);
- callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
- nextUse++;
- }
- // Loop through the rest
- while (nextUse < dInsn->vA) {
- int lowReg;
- int highReg;
- rlArg = oatGetRawSrc(cUnit, mir, nextUse);
- rlArg = oatUpdateRawLoc(cUnit, rlArg);
- if (rlArg.location == kLocPhysReg) {
- lowReg = rlArg.lowReg;
- highReg = rlArg.highReg;
- } else {
- lowReg = r2;
- highReg = r3;
- if (rlArg.wide) {
- loadValueDirectWideFixed(cUnit, rlArg, lowReg, highReg);
- } else {
- loadValueDirectFixed(cUnit, rlArg, lowReg);
- }
- callState = nextCallInsn(cUnit, mir, callState, dexIdx,
- methodIdx);
- }
- int outsOffset = (nextUse + 1) * 4;
- if (rlArg.wide) {
- storeBaseDispWide(cUnit, rSP, outsOffset, lowReg, highReg);
- nextUse += 2;
- } else {
- storeWordDisp(cUnit, rSP, outsOffset, lowReg);
- nextUse++;
- }
- callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
- }
- }
-
- callState = loadArgRegs(cUnit, mir, dInsn, callState, nextCallInsn,
- dexIdx, methodIdx, skipThis);
-
- if (pcrLabel) {
- *pcrLabel = genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir);
- }
- return callState;
-}
-
-/*
- * May have 0+ arguments (also used for jumbo). Note that
- * source virtual registers may be in physical registers, so may
- * need to be flushed to home location before copying. This
- * applies to arg3 and above (see below).
- *
- * Two general strategies:
- * If < 20 arguments
- * Pass args 3-18 using vldm/vstm block copy
- * Pass arg0, arg1 & arg2 in r1-r3
- * If 20+ arguments
- * Pass args arg19+ using memcpy block copy
- * Pass arg0, arg1 & arg2 in r1-r3
- *
- */
-STATIC int genDalvikArgsRange(CompilationUnit* cUnit, MIR* mir,
- DecodedInstruction* dInsn, int callState,
- ArmLIR** pcrLabel, NextCallInsn nextCallInsn,
- uint32_t dexIdx, uint32_t methodIdx,
- bool skipThis)
-{
- int firstArg = dInsn->vC;
- int numArgs = dInsn->vA;
-
- // If we can treat it as non-range (Jumbo ops will use range form)
- if (numArgs <= 5)
- return genDalvikArgsNoRange(cUnit, mir, dInsn, callState, pcrLabel,
- nextCallInsn, dexIdx, methodIdx,
- skipThis);
- /*
- * Make sure range list doesn't span the break between in normal
- * Dalvik vRegs and the ins.
- */
- int highestArg = oatGetSrc(cUnit, mir, numArgs-1).sRegLow;
- int boundaryReg = cUnit->numDalvikRegisters - cUnit->numIns;
- if ((firstArg < boundaryReg) && (highestArg >= boundaryReg)) {
- LOG(FATAL) << "Argument list spanned locals & args";
- }
-
- /*
- * First load the non-register arguments. Both forms expect all
- * of the source arguments to be in their home frame location, so
- * scan the sReg names and flush any that have been promoted to
- * frame backing storage.
- */
- // Scan the rest of the args - if in physReg flush to memory
- for (int nextArg = 0; nextArg < numArgs;) {
- RegLocation loc = oatGetRawSrc(cUnit, mir, nextArg);
- if (loc.wide) {
- loc = oatUpdateLocWide(cUnit, loc);
- if ((nextArg >= 2) && (loc.location == kLocPhysReg)) {
- storeBaseDispWide(cUnit, rSP,
- oatSRegOffset(cUnit, loc.sRegLow),
- loc.lowReg, loc.highReg);
- }
- nextArg += 2;
- } else {
- loc = oatUpdateLoc(cUnit, loc);
- if ((nextArg >= 3) && (loc.location == kLocPhysReg)) {
- storeBaseDisp(cUnit, rSP, oatSRegOffset(cUnit, loc.sRegLow),
- loc.lowReg, kWord);
- }
- nextArg++;
- }
- }
-
- int startOffset = oatSRegOffset(cUnit,
- cUnit->regLocation[mir->ssaRep->uses[3]].sRegLow);
- int outsOffset = 4 /* Method* */ + (3 * 4);
- if (numArgs >= 20) {
- // Generate memcpy
- opRegRegImm(cUnit, kOpAdd, r0, rSP, outsOffset);
- opRegRegImm(cUnit, kOpAdd, r1, rSP, startOffset);
- loadWordDisp(cUnit, rSELF, OFFSETOF_MEMBER(Thread, pMemcpy), rLR);
- loadConstant(cUnit, r2, (numArgs - 3) * 4);
- callRuntimeHelper(cUnit, rLR);
- // Restore Method*
- loadCurrMethodDirect(cUnit, r0);
- } else {
- // Use vldm/vstm pair using r3 as a temp
- int regsLeft = std::min(numArgs - 3, 16);
- callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
- opRegRegImm(cUnit, kOpAdd, r3, rSP, startOffset);
- ArmLIR* ld = newLIR3(cUnit, kThumb2Vldms, r3, fr0, regsLeft);
- //TUNING: loosen barrier
- ld->defMask = ENCODE_ALL;
- setMemRefType(ld, true /* isLoad */, kDalvikReg);
- callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
- opRegRegImm(cUnit, kOpAdd, r3, rSP, 4 /* Method* */ + (3 * 4));
- callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
- ArmLIR* st = newLIR3(cUnit, kThumb2Vstms, r3, fr0, regsLeft);
- setMemRefType(st, false /* isLoad */, kDalvikReg);
- st->defMask = ENCODE_ALL;
- callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
- }
-
- callState = loadArgRegs(cUnit, mir, dInsn, callState, nextCallInsn,
- dexIdx, methodIdx, skipThis);
-
- callState = nextCallInsn(cUnit, mir, callState, dexIdx, methodIdx);
- if (pcrLabel) {
- *pcrLabel = genNullCheck(cUnit, oatSSASrc(mir,0), r1, mir);
- }
- return callState;
-}
-
-// Debugging routine - if null target, branch to DebugMe
-STATIC void genShowTarget(CompilationUnit* cUnit)
-{
- ArmLIR* branchOver = genCmpImmBranch(cUnit, kArmCondNe, rLR, 0);
- loadWordDisp(cUnit, rSELF,
- OFFSETOF_MEMBER(Thread, pDebugMe), rLR);
- ArmLIR* target = newLIR0(cUnit, kArmPseudoTargetLabel);
- target->defMask = -1;
- branchOver->generic.target = (LIR*)target;
-}
-
-STATIC void genThrowVerificationError(CompilationUnit* cUnit, MIR* mir)
-{
- loadWordDisp(cUnit, rSELF,
- OFFSETOF_MEMBER(Thread, pThrowVerificationErrorFromCode), rLR);
- loadConstant(cUnit, r0, mir->dalvikInsn.vA);
- loadConstant(cUnit, r1, mir->dalvikInsn.vB);
- callRuntimeHelper(cUnit, rLR);
-}
-
-STATIC void genCompareAndBranch(CompilationUnit* cUnit, BasicBlock* bb,
- MIR* mir, RegLocation rlSrc1,
- RegLocation rlSrc2, ArmLIR* labelList)
-{
- ArmConditionCode cond;
- rlSrc1 = loadValue(cUnit, rlSrc1, kCoreReg);
- rlSrc2 = loadValue(cUnit, rlSrc2, kCoreReg);
- opRegReg(cUnit, kOpCmp, rlSrc1.lowReg, rlSrc2.lowReg);
- Opcode opcode = mir->dalvikInsn.opcode;
- switch(opcode) {
- case OP_IF_EQ:
- cond = kArmCondEq;
- break;
- case OP_IF_NE:
- cond = kArmCondNe;
- break;
- case OP_IF_LT:
- cond = kArmCondLt;
- break;
- case OP_IF_GE:
- cond = kArmCondGe;
- break;
- case OP_IF_GT:
- cond = kArmCondGt;
- break;
- case OP_IF_LE:
- cond = kArmCondLe;
- break;
- default:
- cond = (ArmConditionCode)0;
- LOG(FATAL) << "Unexpected opcode " << (int)opcode;
- }
- genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
- genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
-}
-
-STATIC void genCompareZeroAndBranch(CompilationUnit* cUnit, BasicBlock* bb,
- MIR* mir, RegLocation rlSrc,
- ArmLIR* labelList)
-{
- ArmConditionCode cond;
- rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
- opRegImm(cUnit, kOpCmp, rlSrc.lowReg, 0);
- Opcode opcode = mir->dalvikInsn.opcode;
- switch(opcode) {
- case OP_IF_EQZ:
- cond = kArmCondEq;
- break;
- case OP_IF_NEZ:
- cond = kArmCondNe;
- break;
- case OP_IF_LTZ:
- cond = kArmCondLt;
- break;
- case OP_IF_GEZ:
- cond = kArmCondGe;
- break;
- case OP_IF_GTZ:
- cond = kArmCondGt;
- break;
- case OP_IF_LEZ:
- cond = kArmCondLe;
- break;
- default:
- cond = (ArmConditionCode)0;
- LOG(FATAL) << "Unexpected opcode " << (int)opcode;
- }
- genConditionalBranch(cUnit, cond, &labelList[bb->taken->id]);
- genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]);
-}
-
-STATIC void genIntToLong(CompilationUnit* cUnit, MIR* mir, RegLocation rlDest,
- RegLocation rlSrc)
-{
- RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- if (rlSrc.location == kLocPhysReg) {
- genRegCopy(cUnit, rlResult.lowReg, rlSrc.lowReg);
- } else {
- loadValueDirect(cUnit, rlSrc, rlResult.lowReg);
- }
- opRegRegImm(cUnit, kOpAsr, rlResult.highReg,
- rlResult.lowReg, 31);
- storeValueWide(cUnit, rlDest, rlResult);
-}
-
-STATIC void genIntNarrowing(CompilationUnit* cUnit, MIR* mir,
- RegLocation rlDest, RegLocation rlSrc)
-{
- rlSrc = loadValue(cUnit, rlSrc, kCoreReg);
- RegLocation rlResult = oatEvalLoc(cUnit, rlDest, kCoreReg, true);
- OpKind op = kOpInvalid;
- switch(mir->dalvikInsn.opcode) {
- case OP_INT_TO_BYTE:
- op = kOp2Byte;
- break;
- case OP_INT_TO_SHORT:
- op = kOp2Short;
- break;
- case OP_INT_TO_CHAR:
- op = kOp2Char;
- break;
- default:
- LOG(ERROR) << "Bad int conversion type";
- }
- opRegReg(cUnit, op, rlResult.lowReg, rlSrc.lowReg);
- storeValue(cUnit, rlDest, rlResult);
-}
-
-/*
- * If there are any ins passed in registers that have not been promoted
- * to a callee-save register, flush them to the frame. Perform intial
- * assignment of promoted arguments.
- */
-STATIC void flushIns(CompilationUnit* cUnit)
-{
- if (cUnit->numIns == 0)
- return;
- int firstArgReg = r1;
- int lastArgReg = r3;
- int startVReg = cUnit->numDalvikRegisters - cUnit->numIns;
- /*
- * Arguments passed in registers should be flushed
- * to their backing locations in the frame for now.
- * Also, we need to do initial assignment for promoted
- * arguments. NOTE: an older version of dx had an issue
- * in which it would reuse static method argument registers.
- * This could result in the same Dalvik virtual register
- * being promoted to both core and fp regs. In those
- * cases, copy argument to both. This will be uncommon
- * enough that it isn't worth attempting to optimize.
- */
- for (int i = 0; i < cUnit->numIns; i++) {
- PromotionMap vMap = cUnit->promotionMap[startVReg + i];
- if (i <= (lastArgReg - firstArgReg)) {
- // If arriving in register
- if (vMap.coreLocation == kLocPhysReg) {
- genRegCopy(cUnit, vMap.coreReg, firstArgReg + i);
- }
- if (vMap.fpLocation == kLocPhysReg) {
- genRegCopy(cUnit, vMap.fpReg, firstArgReg + i);
- }
- // Also put a copy in memory in case we're partially promoted
- storeBaseDisp(cUnit, rSP, oatSRegOffset(cUnit, startVReg + i),
- firstArgReg + i, kWord);
- } else {
- // If arriving in frame & promoted
- if (vMap.coreLocation == kLocPhysReg) {
- loadWordDisp(cUnit, rSP, oatSRegOffset(cUnit, startVReg + i),
- vMap.coreReg);
- }
- if (vMap.fpLocation == kLocPhysReg) {
- loadWordDisp(cUnit, rSP, oatSRegOffset(cUnit, startVReg + i),
- vMap.fpReg);
- }
- }
- }
-}
-
-STATIC void genEntrySequence(CompilationUnit* cUnit, BasicBlock* bb)
+void genEntrySequence(CompilationUnit* cUnit, BasicBlock* bb)
{
int spillCount = cUnit->numCoreSpills + cUnit->numFPSpills;
/*
@@ -1099,7 +53,7 @@
bool skipOverflowCheck = ((cUnit->attrs & METHOD_IS_LEAF) &&
((size_t)cUnit->frameSize <
Thread::kStackOverflowReservedBytes));
- newLIR0(cUnit, kArmPseudoMethodEntry);
+ newLIR0(cUnit, kPseudoMethodEntry);
if (!skipOverflowCheck) {
/* Load stack limit */
loadWordDisp(cUnit, rSELF,
@@ -1119,8 +73,8 @@
if (!skipOverflowCheck) {
opRegRegImm(cUnit, kOpSub, rLR, rSP,
cUnit->frameSize - (spillCount * 4));
- genRegRegCheck(cUnit, kArmCondCc, rLR, r12, NULL,
- kArmThrowStackOverflow);
+ genRegRegCheck(cUnit, kCondCc, rLR, r12, NULL,
+ kThrowStackOverflow);
genRegCopy(cUnit, rSP, rLR); // Establish stack
} else {
opRegImm(cUnit, kOpSub, rSP,
@@ -1142,7 +96,7 @@
oatFreeTemp(cUnit, r3);
}
-STATIC void genExitSequence(CompilationUnit* cUnit, BasicBlock* bb)
+void genExitSequence(CompilationUnit* cUnit, BasicBlock* bb)
{
int spillCount = cUnit->numCoreSpills + cUnit->numFPSpills;
/*
@@ -1152,7 +106,7 @@
oatLockTemp(cUnit, r0);
oatLockTemp(cUnit, r1);
- newLIR0(cUnit, kArmPseudoMethodExit);
+ newLIR0(cUnit, kPseudoMethodExit);
/* If we're compiling for the debugger, generate an update callout */
if (cUnit->genDebugger) {
genDebuggerUpdate(cUnit, DEBUGGER_METHOD_EXIT);
@@ -1181,16 +135,16 @@
*/
void removeRedundantBranches(CompilationUnit* cUnit)
{
- ArmLIR* thisLIR;
+ LIR* thisLIR;
- for (thisLIR = (ArmLIR*) cUnit->firstLIRInsn;
- thisLIR != (ArmLIR*) cUnit->lastLIRInsn;
+ for (thisLIR = (LIR*) cUnit->firstLIRInsn;
+ thisLIR != (LIR*) cUnit->lastLIRInsn;
thisLIR = NEXT_LIR(thisLIR)) {
/* Branch to the next instruction */
if ((thisLIR->opcode == kThumbBUncond) ||
(thisLIR->opcode == kThumb2BUncond)) {
- ArmLIR* nextLIR = thisLIR;
+ LIR* nextLIR = thisLIR;
while (true) {
nextLIR = NEXT_LIR(nextLIR);
@@ -1198,7 +152,7 @@
/*
* Is the branch target the next instruction?
*/
- if (nextLIR == (ArmLIR*) thisLIR->generic.target) {
+ if (nextLIR == (LIR*) thisLIR->target) {
thisLIR->flags.isNop = true;
break;
}
@@ -1209,109 +163,13 @@
* might be the last real instruction.
*/
if (!isPseudoOpcode(nextLIR->opcode) ||
- (nextLIR = (ArmLIR*) cUnit->lastLIRInsn))
+ (nextLIR = (LIR*) cUnit->lastLIRInsn))
break;
}
}
}
}
-STATIC void handleSuspendLaunchpads(CompilationUnit *cUnit)
-{
- ArmLIR** suspendLabel =
- (ArmLIR **) cUnit->suspendLaunchpads.elemList;
- int numElems = cUnit->suspendLaunchpads.numUsed;
-
- for (int i = 0; i < numElems; i++) {
- /* TUNING: move suspend count load into helper */
- ArmLIR* lab = suspendLabel[i];
- ArmLIR* resumeLab = (ArmLIR*)lab->operands[0];
- cUnit->currentDalvikOffset = lab->operands[1];
- oatAppendLIR(cUnit, (LIR *)lab);
- loadWordDisp(cUnit, rSELF,
- OFFSETOF_MEMBER(Thread, pTestSuspendFromCode), rLR);
- if (!cUnit->genDebugger) {
- // use rSUSPEND for suspend count
- loadWordDisp(cUnit, rSELF,
- Thread::SuspendCountOffset().Int32Value(), rSUSPEND);
- }
- opReg(cUnit, kOpBlx, rLR);
- if ( cUnit->genDebugger) {
- // use rSUSPEND for update debugger
- loadWordDisp(cUnit, rSELF,
- OFFSETOF_MEMBER(Thread, pUpdateDebuggerFromCode), rSUSPEND);
- }
- genUnconditionalBranch(cUnit, resumeLab);
- }
-}
-
-STATIC void handleThrowLaunchpads(CompilationUnit *cUnit)
-{
- ArmLIR** throwLabel =
- (ArmLIR **) cUnit->throwLaunchpads.elemList;
- int numElems = cUnit->throwLaunchpads.numUsed;
- int i;
-
- for (i = 0; i < numElems; i++) {
- ArmLIR* lab = throwLabel[i];
- cUnit->currentDalvikOffset = lab->operands[1];
- oatAppendLIR(cUnit, (LIR *)lab);
- int funcOffset = 0;
- int v1 = lab->operands[2];
- int v2 = lab->operands[3];
- switch(lab->operands[0]) {
- case kArmThrowNullPointer:
- funcOffset = OFFSETOF_MEMBER(Thread, pThrowNullPointerFromCode);
- break;
- case kArmThrowArrayBounds:
- if (v2 != r0) {
- genRegCopy(cUnit, r0, v1);
- genRegCopy(cUnit, r1, v2);
- } else {
- if (v1 == r1) {
- genRegCopy(cUnit, r12, v1);
- genRegCopy(cUnit, r1, v2);
- genRegCopy(cUnit, r0, r12);
- } else {
- genRegCopy(cUnit, r1, v2);
- genRegCopy(cUnit, r0, v1);
- }
- }
- funcOffset = OFFSETOF_MEMBER(Thread, pThrowArrayBoundsFromCode);
- break;
- case kArmThrowDivZero:
- funcOffset = OFFSETOF_MEMBER(Thread, pThrowDivZeroFromCode);
- break;
- case kArmThrowVerificationError:
- loadConstant(cUnit, r0, v1);
- loadConstant(cUnit, r1, v2);
- funcOffset =
- OFFSETOF_MEMBER(Thread, pThrowVerificationErrorFromCode);
- break;
- case kArmThrowNegArraySize:
- genRegCopy(cUnit, r0, v1);
- funcOffset =
- OFFSETOF_MEMBER(Thread, pThrowNegArraySizeFromCode);
- break;
- case kArmThrowNoSuchMethod:
- genRegCopy(cUnit, r0, v1);
- funcOffset =
- OFFSETOF_MEMBER(Thread, pThrowNoSuchMethodFromCode);
- break;
- case kArmThrowStackOverflow:
- funcOffset =
- OFFSETOF_MEMBER(Thread, pThrowStackOverflowFromCode);
- // Restore stack alignment
- opRegImm(cUnit, kOpAdd, rSP,
- (cUnit->numCoreSpills + cUnit->numFPSpills) * 4);
- break;
- default:
- LOG(FATAL) << "Unexpected throw kind: " << lab->operands[0];
- }
- loadWordDisp(cUnit, rSELF, funcOffset, rLR);
- callRuntimeHelper(cUnit, rLR);
- }
-}
/* Common initialization routine for an architecture family */
bool oatArchInit()
@@ -1328,11 +186,4 @@
return oatArchVariantInit();
}
-
-/* Needed by the Assembler */
-void oatSetupResourceMasks(ArmLIR* lir)
-{
- setupResourceMasks(lir);
-}
-
} // namespace art