diff options
Diffstat (limited to 'compiler/dex/quick/gen_common.cc')
| -rw-r--r-- | compiler/dex/quick/gen_common.cc | 163 |
1 files changed, 84 insertions, 79 deletions
diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index c266a3c2e9..1fc0cff678 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -127,8 +127,8 @@ void Mir2Lir::GenArrayBoundsCheck(int index, RegStorage length) { m2l_->ResetDefTracking(); GenerateTargetLabel(kPseudoThrowTarget); - RegStorage arg1_32 = m2l_->TargetReg(kArg1, false); - RegStorage arg0_32 = m2l_->TargetReg(kArg0, false); + RegStorage arg1_32 = m2l_->TargetReg(kArg1, kNotWide); + RegStorage arg0_32 = m2l_->TargetReg(kArg0, kNotWide); m2l_->OpRegCopy(arg1_32, length_); m2l_->LoadConstant(arg0_32, index_); @@ -368,7 +368,8 @@ static void GenNewArrayImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, if (!use_direct_type_ptr) { mir_to_lir->LoadClassType(type_idx, kArg0); func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocArrayResolved); - mir_to_lir->CallRuntimeHelperRegMethodRegLocation(func_offset, mir_to_lir->TargetReg(kArg0, false), + mir_to_lir->CallRuntimeHelperRegMethodRegLocation(func_offset, + mir_to_lir->TargetReg(kArg0, kNotWide), rl_src, true); } else { // Use the direct pointer. @@ -431,8 +432,8 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) { } else { GenFilledNewArrayCall<4>(this, cu_, elems, type_idx); } - FreeTemp(TargetReg(kArg2, false)); - FreeTemp(TargetReg(kArg1, false)); + FreeTemp(TargetReg(kArg2, kNotWide)); + FreeTemp(TargetReg(kArg1, kNotWide)); /* * NOTE: the implicit target for Instruction::FILLED_NEW_ARRAY is the * return region. Because AllocFromCode placed the new array @@ -440,7 +441,7 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) { * added, it may be necessary to additionally copy all return * values to a home location in thread-local storage */ - RegStorage ref_reg = TargetRefReg(kRet0); + RegStorage ref_reg = TargetReg(kRet0, kRef); LockTemp(ref_reg); // TODO: use the correct component size, currently all supported types @@ -477,7 +478,7 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) { switch (cu_->instruction_set) { case kThumb2: case kArm64: - r_val = TargetReg(kLr, false); + r_val = TargetReg(kLr, kNotWide); break; case kX86: case kX86_64: @@ -553,7 +554,7 @@ class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath { storage_index_, true); } // Copy helper's result into r_base, a no-op on all but MIPS. - m2l_->OpRegCopy(r_base_, m2l_->TargetRefReg(kRet0)); + m2l_->OpRegCopy(r_base_, m2l_->TargetReg(kRet0, kRef)); m2l_->OpUnconditionalBranch(cont_); } @@ -601,10 +602,10 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double, // May do runtime call so everything to home locations. FlushAllRegs(); // Using fixed register to sync with possible call to runtime support. - RegStorage r_method = TargetRefReg(kArg1); + RegStorage r_method = TargetReg(kArg1, kRef); LockTemp(r_method); LoadCurrMethodDirect(r_method); - r_base = TargetRefReg(kArg0); + r_base = TargetReg(kArg0, kRef); LockTemp(r_base); LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base, kNotVolatile); @@ -618,7 +619,7 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double, // The slow path is invoked if the r_base is NULL or the class pointed // to by it is not initialized. LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); - RegStorage r_tmp = TargetReg(kArg2, false); + RegStorage r_tmp = TargetReg(kArg2, kNotWide); LockTemp(r_tmp); LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, mirror::Class::StatusOffset().Int32Value(), @@ -698,10 +699,10 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, // May do runtime call so everything to home locations. FlushAllRegs(); // Using fixed register to sync with possible call to runtime support. - RegStorage r_method = TargetRefReg(kArg1); + RegStorage r_method = TargetReg(kArg1, kRef); LockTemp(r_method); LoadCurrMethodDirect(r_method); - r_base = TargetRefReg(kArg0); + r_base = TargetReg(kArg0, kRef); LockTemp(r_base); LoadRefDisp(r_method, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), r_base, kNotVolatile); @@ -715,7 +716,7 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, // The slow path is invoked if the r_base is NULL or the class pointed // to by it is not initialized. LIR* unresolved_branch = OpCmpImmBranch(kCondEq, r_base, 0, NULL); - RegStorage r_tmp = TargetReg(kArg2, false); + RegStorage r_tmp = TargetReg(kArg2, kNotWide); LockTemp(r_tmp); LIR* uninit_branch = OpCmpMemImmBranch(kCondLt, r_tmp, r_base, mirror::Class::StatusOffset().Int32Value(), @@ -961,7 +962,7 @@ void Mir2Lir::GenConstClass(uint32_t type_idx, RegLocation rl_dest) { m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_, rl_method_.reg, true); } - m2l_->OpRegCopy(rl_result_.reg, m2l_->TargetRefReg(kRet0)); + m2l_->OpRegCopy(rl_result_.reg, m2l_->TargetReg(kRet0, kRef)); m2l_->OpUnconditionalBranch(cont_); } @@ -1001,15 +1002,15 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { DCHECK(!IsTemp(rl_method.reg)); r_method = rl_method.reg; } else { - r_method = TargetRefReg(kArg2); + r_method = TargetReg(kArg2, kRef); LoadCurrMethodDirect(r_method); } LoadRefDisp(r_method, mirror::ArtMethod::DexCacheStringsOffset().Int32Value(), - TargetRefReg(kArg0), kNotVolatile); + TargetReg(kArg0, kRef), kNotVolatile); // Might call out to helper, which will return resolved string in kRet0 - LoadRefDisp(TargetRefReg(kArg0), offset_of_string, TargetRefReg(kRet0), kNotVolatile); - LIR* fromfast = OpCmpImmBranch(kCondEq, TargetRefReg(kRet0), 0, NULL); + LoadRefDisp(TargetReg(kArg0, kRef), offset_of_string, TargetReg(kRet0, kRef), kNotVolatile); + LIR* fromfast = OpCmpImmBranch(kCondEq, TargetReg(kRet0, kRef), 0, NULL); LIR* cont = NewLIR0(kPseudoTargetLabel); { @@ -1078,10 +1079,12 @@ static void GenNewInstanceImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, uint32_ mir_to_lir->LoadClassType(type_idx, kArg0); if (!is_type_initialized) { func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectResolved); - mir_to_lir->CallRuntimeHelperRegMethod(func_offset, mir_to_lir->TargetRefReg(kArg0), true); + mir_to_lir->CallRuntimeHelperRegMethod(func_offset, mir_to_lir->TargetReg(kArg0, kRef), + true); } else { func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pAllocObjectInitialized); - mir_to_lir->CallRuntimeHelperRegMethod(func_offset, mir_to_lir->TargetRefReg(kArg0), true); + mir_to_lir->CallRuntimeHelperRegMethod(func_offset, mir_to_lir->TargetReg(kArg0, kRef), + true); } } else { // Use the direct pointer. @@ -1200,9 +1203,9 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know FlushAllRegs(); // May generate a call - use explicit registers LockCallTemps(); - RegStorage method_reg = TargetRefReg(kArg1); + RegStorage method_reg = TargetReg(kArg1, kRef); LoadCurrMethodDirect(method_reg); // kArg1 <= current Method* - RegStorage class_reg = TargetRefReg(kArg2); // kArg2 will hold the Class* + RegStorage class_reg = TargetReg(kArg2, kRef); // kArg2 will hold the Class* if (needs_access_check) { // Check we have access to type_idx and if not throw IllegalAccessError, // returns Class* in kArg0 @@ -1213,15 +1216,15 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), type_idx, true); } - OpRegCopy(class_reg, TargetRefReg(kRet0)); // Align usage with fast path - LoadValueDirectFixed(rl_src, TargetRefReg(kArg0)); // kArg0 <= ref + OpRegCopy(class_reg, TargetReg(kRet0, kRef)); // Align usage with fast path + LoadValueDirectFixed(rl_src, TargetReg(kArg0, kRef)); // kArg0 <= ref } else if (use_declaring_class) { - LoadValueDirectFixed(rl_src, TargetRefReg(kArg0)); // kArg0 <= ref + LoadValueDirectFixed(rl_src, TargetReg(kArg0, kRef)); // kArg0 <= ref LoadRefDisp(method_reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), class_reg, kNotVolatile); } else { // Load dex cache entry into class_reg (kArg2) - LoadValueDirectFixed(rl_src, TargetRefReg(kArg0)); // kArg0 <= ref + LoadValueDirectFixed(rl_src, TargetReg(kArg0, kRef)); // kArg0 <= ref LoadRefDisp(method_reg, mirror::ArtMethod::DexCacheResolvedTypesOffset().Int32Value(), class_reg, kNotVolatile); int32_t offset_of_type = ClassArray::OffsetOfElement(type_idx).Int32Value(); @@ -1236,8 +1239,8 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know } else { CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx, true); } - OpRegCopy(TargetRefReg(kArg2), TargetRefReg(kRet0)); // Align usage with fast path - LoadValueDirectFixed(rl_src, TargetRefReg(kArg0)); /* reload Ref */ + OpRegCopy(TargetReg(kArg2, kRef), TargetReg(kRet0, kRef)); // Align usage with fast path + LoadValueDirectFixed(rl_src, TargetReg(kArg0, kRef)); /* reload Ref */ // Rejoin code paths LIR* hop_target = NewLIR0(kPseudoTargetLabel); hop_branch->target = hop_target; @@ -1249,25 +1252,25 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know // On MIPS rArg0 != rl_result, place false in result if branch is taken. LoadConstant(rl_result.reg, 0); } - LIR* branch1 = OpCmpImmBranch(kCondEq, TargetRefReg(kArg0), 0, NULL); + LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0, kRef), 0, NULL); /* load object->klass_ */ DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); - LoadRefDisp(TargetRefReg(kArg0), mirror::Object::ClassOffset().Int32Value(), TargetRefReg(kArg1), - kNotVolatile); + LoadRefDisp(TargetReg(kArg0, kRef), mirror::Object::ClassOffset().Int32Value(), + TargetReg(kArg1, kRef), kNotVolatile); /* kArg0 is ref, kArg1 is ref->klass_, kArg2 is class */ LIR* branchover = NULL; if (type_known_final) { // rl_result == ref == null == 0. if (cu_->instruction_set == kThumb2) { - OpRegReg(kOpCmp, TargetRefReg(kArg1), TargetRefReg(kArg2)); // Same? + OpRegReg(kOpCmp, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef)); // Same? LIR* it = OpIT(kCondEq, "E"); // if-convert the test LoadConstant(rl_result.reg, 1); // .eq case - load true LoadConstant(rl_result.reg, 0); // .ne case - load false OpEndIT(it); } else { LoadConstant(rl_result.reg, 0); // ne case - load false - branchover = OpCmpBranch(kCondNe, TargetRefReg(kArg1), TargetRefReg(kArg2), NULL); + branchover = OpCmpBranch(kCondNe, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), NULL); LoadConstant(rl_result.reg, 1); // eq case - load true } } else { @@ -1278,11 +1281,11 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know LIR* it = nullptr; if (!type_known_abstract) { /* Uses conditional nullification */ - OpRegReg(kOpCmp, TargetRefReg(kArg1), TargetRefReg(kArg2)); // Same? + OpRegReg(kOpCmp, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef)); // Same? it = OpIT(kCondEq, "EE"); // if-convert the test - LoadConstant(TargetReg(kArg0, false), 1); // .eq case - load true + LoadConstant(TargetReg(kArg0, kNotWide), 1); // .eq case - load true } - OpRegCopy(TargetRefReg(kArg0), TargetRefReg(kArg2)); // .ne case - arg0 <= class + OpRegCopy(TargetReg(kArg0, kRef), TargetReg(kArg2, kRef)); // .ne case - arg0 <= class OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) if (it != nullptr) { OpEndIT(it); @@ -1292,12 +1295,12 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know if (!type_known_abstract) { /* Uses branchovers */ LoadConstant(rl_result.reg, 1); // assume true - branchover = OpCmpBranch(kCondEq, TargetRefReg(kArg1), TargetRefReg(kArg2), NULL); + branchover = OpCmpBranch(kCondEq, TargetReg(kArg1, kRef), TargetReg(kArg2, kRef), NULL); } RegStorage r_tgt = cu_->target64 ? LoadHelper(QUICK_ENTRYPOINT_OFFSET(8, pInstanceofNonTrivial)) : LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pInstanceofNonTrivial)); - OpRegCopy(TargetRefReg(kArg0), TargetRefReg(kArg2)); // .ne case - arg0 <= class + OpRegCopy(TargetReg(kArg0, kRef), TargetReg(kArg2, kRef)); // .ne case - arg0 <= class OpReg(kOpBlx, r_tgt); // .ne case: helper(class, ref->class) FreeTemp(r_tgt); } @@ -1351,9 +1354,9 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ FlushAllRegs(); // May generate a call - use explicit registers LockCallTemps(); - RegStorage method_reg = TargetRefReg(kArg1); + RegStorage method_reg = TargetReg(kArg1, kRef); LoadCurrMethodDirect(method_reg); // kArg1 <= current Method* - RegStorage class_reg = TargetRefReg(kArg2); // kArg2 will hold the Class* + RegStorage class_reg = TargetReg(kArg2, kRef); // kArg2 will hold the Class* if (needs_access_check) { // Check we have access to type_idx and if not throw IllegalAccessError, // returns Class* in kRet0 @@ -1365,7 +1368,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ CallRuntimeHelperImm(QUICK_ENTRYPOINT_OFFSET(4, pInitializeTypeAndVerifyAccess), type_idx, true); } - OpRegCopy(class_reg, TargetRefReg(kRet0)); // Align usage with fast path + OpRegCopy(class_reg, TargetReg(kRet0, kRef)); // Align usage with fast path } else if (use_declaring_class) { LoadRefDisp(method_reg, mirror::ArtMethod::DeclaringClassOffset().Int32Value(), class_reg, kNotVolatile); @@ -1396,12 +1399,12 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ // InitializeTypeFromCode(idx, method) if (m2l_->cu_->target64) { m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(8, pInitializeType), type_idx_, - m2l_->TargetRefReg(kArg1), true); + m2l_->TargetReg(kArg1, kRef), true); } else { m2l_->CallRuntimeHelperImmReg(QUICK_ENTRYPOINT_OFFSET(4, pInitializeType), type_idx_, - m2l_->TargetRefReg(kArg1), true); + m2l_->TargetReg(kArg1, kRef), true); } - m2l_->OpRegCopy(class_reg_, m2l_->TargetRefReg(kRet0)); // Align usage with fast path + m2l_->OpRegCopy(class_reg_, m2l_->TargetReg(kRet0, kRef)); // Align usage with fast path m2l_->OpUnconditionalBranch(cont_); } @@ -1414,7 +1417,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ } } // At this point, class_reg (kArg2) has class - LoadValueDirectFixed(rl_src, TargetRefReg(kArg0)); // kArg0 <= ref + LoadValueDirectFixed(rl_src, TargetReg(kArg0, kRef)); // kArg0 <= ref // Slow path for the case where the classes are not equal. In this case we need // to call a helper function to do the check. @@ -1428,15 +1431,17 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ GenerateTargetLabel(); if (load_) { - m2l_->LoadRefDisp(m2l_->TargetRefReg(kArg0), mirror::Object::ClassOffset().Int32Value(), - m2l_->TargetRefReg(kArg1), kNotVolatile); + m2l_->LoadRefDisp(m2l_->TargetReg(kArg0, kRef), mirror::Object::ClassOffset().Int32Value(), + m2l_->TargetReg(kArg1, kRef), kNotVolatile); } if (m2l_->cu_->target64) { - m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pCheckCast), m2l_->TargetRefReg(kArg2), - m2l_->TargetRefReg(kArg1), true); + m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(8, pCheckCast), + m2l_->TargetReg(kArg2, kRef), m2l_->TargetReg(kArg1, kRef), + true); } else { - m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pCheckCast), m2l_->TargetRefReg(kArg2), - m2l_->TargetRefReg(kArg1), true); + m2l_->CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pCheckCast), + m2l_->TargetReg(kArg2, kRef), m2l_->TargetReg(kArg1, kRef), + true); } m2l_->OpUnconditionalBranch(cont_); @@ -1448,7 +1453,7 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ if (type_known_abstract) { // Easier case, run slow path if target is non-null (slow path will load from target) - LIR* branch = OpCmpImmBranch(kCondNe, TargetRefReg(kArg0), 0, nullptr); + LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kArg0, kRef), 0, nullptr); LIR* cont = NewLIR0(kPseudoTargetLabel); AddSlowPath(new (arena_) SlowPath(this, branch, cont, true)); } else { @@ -1457,13 +1462,13 @@ void Mir2Lir::GenCheckCast(uint32_t insn_idx, uint32_t type_idx, RegLocation rl_ // slow path if the classes are not equal. /* Null is OK - continue */ - LIR* branch1 = OpCmpImmBranch(kCondEq, TargetRefReg(kArg0), 0, nullptr); + LIR* branch1 = OpCmpImmBranch(kCondEq, TargetReg(kArg0, kRef), 0, nullptr); /* load object->klass_ */ DCHECK_EQ(mirror::Object::ClassOffset().Int32Value(), 0); - LoadRefDisp(TargetRefReg(kArg0), mirror::Object::ClassOffset().Int32Value(), - TargetRefReg(kArg1), kNotVolatile); + LoadRefDisp(TargetReg(kArg0, kRef), mirror::Object::ClassOffset().Int32Value(), + TargetReg(kArg1, kRef), kNotVolatile); - LIR* branch2 = OpCmpBranch(kCondNe, TargetRefReg(kArg1), class_reg, nullptr); + LIR* branch2 = OpCmpBranch(kCondNe, TargetReg(kArg1, kRef), class_reg, nullptr); LIR* cont = NewLIR0(kPseudoTargetLabel); // Add the slow path that will not perform load since this is already done. @@ -1486,8 +1491,8 @@ void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_des * lr is used explicitly elsewhere in the code generator and cannot * normally be used as a general temp register. */ - MarkTemp(TargetReg(kLr)); // Add lr to the temp pool - FreeTemp(TargetReg(kLr)); // and make it available + MarkTemp(TargetReg(kLr, kNotWide)); // Add lr to the temp pool + FreeTemp(TargetReg(kLr, kNotWide)); // and make it available } rl_src1 = LoadValueWide(rl_src1, kCoreReg); rl_src2 = LoadValueWide(rl_src2, kCoreReg); @@ -1514,8 +1519,8 @@ void Mir2Lir::GenLong3Addr(OpKind first_op, OpKind second_op, RegLocation rl_des FreeRegLocTemps(rl_result, rl_src2); StoreValueWide(rl_dest, rl_result); if (cu_->instruction_set == kThumb2) { - Clobber(TargetReg(kLr)); - UnmarkTemp(TargetReg(kLr)); // Remove lr from the temp pool + Clobber(TargetReg(kLr, kNotWide)); + UnmarkTemp(TargetReg(kLr, kNotWide)); // Remove lr from the temp pool } } @@ -1679,13 +1684,13 @@ void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, // If we haven't already generated the code use the callout function. if (!done) { FlushAllRegs(); /* Send everything to home location */ - LoadValueDirectFixed(rl_src2, TargetReg(kArg1, false)); + LoadValueDirectFixed(rl_src2, TargetReg(kArg1, kNotWide)); RegStorage r_tgt = cu_->target64 ? CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(8, pIdivmod)) : CallHelperSetup(QUICK_ENTRYPOINT_OFFSET(4, pIdivmod)); - LoadValueDirectFixed(rl_src1, TargetReg(kArg0, false)); + LoadValueDirectFixed(rl_src1, TargetReg(kArg0, kNotWide)); if (check_zero) { - GenDivZeroCheck(TargetReg(kArg1, false)); + GenDivZeroCheck(TargetReg(kArg1, kNotWide)); } // NOTE: callout here is not a safepoint. if (cu_->target64) { @@ -1949,14 +1954,14 @@ void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, Re if (!done) { FlushAllRegs(); /* Everything to home location. */ - LoadValueDirectFixed(rl_src, TargetReg(kArg0, false)); - Clobber(TargetReg(kArg0, false)); + LoadValueDirectFixed(rl_src, TargetReg(kArg0, kNotWide)); + Clobber(TargetReg(kArg0, kNotWide)); if (cu_->target64) { - CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(8, pIdivmod), TargetReg(kArg0, false), lit, - false); + CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(8, pIdivmod), TargetReg(kArg0, kNotWide), + lit, false); } else { - CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(4, pIdivmod), TargetReg(kArg0, false), lit, - false); + CallRuntimeHelperRegImm(QUICK_ENTRYPOINT_OFFSET(4, pIdivmod), TargetReg(kArg0, kNotWide), + lit, false); } if (is_div) rl_result = GetReturn(kCoreReg); @@ -1989,7 +1994,7 @@ static void GenArithOpLongImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, Instruc bool call_out = false; bool check_zero = false; ThreadOffset<pointer_size> func_offset(-1); - int ret_reg = mir_to_lir->TargetReg(kRet0, false).GetReg(); + int ret_reg = mir_to_lir->TargetReg(kRet0, kNotWide).GetReg(); switch (opcode) { case Instruction::NOT_LONG: @@ -2037,7 +2042,7 @@ static void GenArithOpLongImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, Instruc return; } else { call_out = true; - ret_reg = mir_to_lir->TargetReg(kRet0, false).GetReg(); + ret_reg = mir_to_lir->TargetReg(kRet0, kNotWide).GetReg(); func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLmul); } break; @@ -2049,7 +2054,7 @@ static void GenArithOpLongImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, Instruc } call_out = true; check_zero = true; - ret_reg = mir_to_lir->TargetReg(kRet0, false).GetReg(); + ret_reg = mir_to_lir->TargetReg(kRet0, kNotWide).GetReg(); func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLdiv); break; case Instruction::REM_LONG: @@ -2062,8 +2067,8 @@ static void GenArithOpLongImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, Instruc check_zero = true; func_offset = QUICK_ENTRYPOINT_OFFSET(pointer_size, pLmod); /* NOTE - for Arm, result is in kArg2/kArg3 instead of kRet0/kRet1 */ - ret_reg = (cu->instruction_set == kThumb2) ? mir_to_lir->TargetReg(kArg2, false).GetReg() : - mir_to_lir->TargetReg(kRet0, false).GetReg(); + ret_reg = (cu->instruction_set == kThumb2) ? mir_to_lir->TargetReg(kArg2, kNotWide).GetReg() : + mir_to_lir->TargetReg(kRet0, kNotWide).GetReg(); break; case Instruction::AND_LONG_2ADDR: case Instruction::AND_LONG: @@ -2106,11 +2111,11 @@ static void GenArithOpLongImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, Instruc } else { mir_to_lir->FlushAllRegs(); /* Send everything to home location */ if (check_zero) { - RegStorage r_tmp1 = mir_to_lir->TargetReg(kArg0, kArg1); - RegStorage r_tmp2 = mir_to_lir->TargetReg(kArg2, kArg3); + RegStorage r_tmp1 = mir_to_lir->TargetReg(kArg0, kWide); + RegStorage r_tmp2 = mir_to_lir->TargetReg(kArg2, kWide); mir_to_lir->LoadValueDirectWideFixed(rl_src2, r_tmp2); RegStorage r_tgt = mir_to_lir->CallHelperSetup(func_offset); - mir_to_lir->GenDivZeroCheckWide(mir_to_lir->TargetReg(kArg2, kArg3)); + mir_to_lir->GenDivZeroCheckWide(r_tmp2); mir_to_lir->LoadValueDirectWideFixed(rl_src1, r_tmp1); // NOTE: callout here is not a safepoint mir_to_lir->CallHelper(r_tgt, func_offset, false /* not safepoint */); @@ -2118,7 +2123,7 @@ static void GenArithOpLongImpl(Mir2Lir* mir_to_lir, CompilationUnit* cu, Instruc mir_to_lir->CallRuntimeHelperRegLocationRegLocation(func_offset, rl_src1, rl_src2, false); } // Adjust return regs in to handle case of rem returning kArg2/kArg3 - if (ret_reg == mir_to_lir->TargetReg(kRet0, false).GetReg()) + if (ret_reg == mir_to_lir->TargetReg(kRet0, kNotWide).GetReg()) rl_result = mir_to_lir->GetReturnWide(kCoreReg); else rl_result = mir_to_lir->GetReturnWideAlt(); |